diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__info__.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/__info__.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb75d49504fab7a43ef9bb21cf637dd7a7cd5ef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/__info__.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE +''' +----------------------------------------------------------------- +multiprocess: better multiprocessing and multithreading in Python +----------------------------------------------------------------- + +About Multiprocess +================== + +``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6. + +``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing. +``multiprocess`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``multiprocess`` enables: + + - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues + - objects to be shared between processes using a server process or (for simple data) shared memory + +``multiprocess`` provides: + + - equivalents of all the synchronization primitives in ``threading`` + - a ``Pool`` class to facilitate submitting tasks to worker processes + - enhanced serialization, using ``dill`` + + +Current Release +=============== + +The latest released version of ``multiprocess`` is available from: + + https://pypi.org/project/multiprocess + +``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``multiprocess`` can be installed with ``pip``:: + + $ pip install multiprocess + +For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler. + + +Requirements +============ + +``multiprocess`` requires: + + - ``python`` (or ``pypy``), **>=3.8** + - ``setuptools``, **>=42** + - ``dill``, **>=0.3.8** + + +Basic Usage +=========== + +The ``multiprocess.Process`` class follows the API of ``threading.Thread``. +For example :: + + from multiprocess import Process, Queue + + def f(q): + q.put('hello world') + + if __name__ == '__main__': + q = Queue() + p = Process(target=f, args=[q]) + p.start() + print (q.get()) + p.join() + +Synchronization primitives like locks, semaphores and conditions are +available, for example :: + + >>> from multiprocess import Condition + >>> c = Condition() + >>> print (c) + ), 0> + >>> c.acquire() + True + >>> print (c) + ), 0> + +One can also use a manager to create shared objects either in shared +memory or in a server process, for example :: + + >>> from multiprocess import Manager + >>> manager = Manager() + >>> l = manager.list(range(10)) + >>> l.reverse() + >>> print (l) + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + >>> print (repr(l)) + + +Tasks can be offloaded to a pool of worker processes in various ways, +for example :: + + >>> from multiprocess import Pool + >>> def f(x): return x*x + ... + >>> p = Pool(4) + >>> result = p.map_async(f, range(10)) + >>> print (result.get(timeout=1)) + [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] + +When ``dill`` is installed, serialization is extended to most objects, +for example :: + + >>> from multiprocess import Pool + >>> p = Pool(4) + >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10))) + [0, 2, 6, 12, 20, 30, 42, 56, 72, 90] + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that +demonstrate how ``multiprocess`` can be used to leverge multiple processes +to execute Python in parallel. You can run the test suite with +``python -m multiprocess.tests``. As ``multiprocess`` conforms to the +``multiprocessing`` interface, the examples and documentation found at +http://docs.python.org/library/multiprocessing.html also apply to +``multiprocess`` if one will ``import multiprocessing as multiprocess``. +See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples +for a set of examples that demonstrate some basic use cases and benchmarking +for running Python code in parallel. Please feel free to submit a ticket on +github, or ask a question on stackoverflow (**@Mike McKerns**). If you would +like to share how you use ``multiprocess`` in your work, please send an email +(to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``multiprocess`` to do research that leads to publication, we ask that you +acknowledge use of ``multiprocess`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. + +''' + +__all__ = [] +__version__ = '0.70.16' +__author__ = 'Mike McKerns' + +__license__ = ''' +Copyright (c) 2008-2016 California Institute of Technology. +Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +All rights reserved. + +This software forks the python package "multiprocessing". Licence and +copyright information for multiprocessing can be found in "COPYING". + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a5e516cede929d73890688ca5c22c9b82c9759e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4f84483d0061dbff308e3377941213b3374eee8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5fe6ef3e4756f418774639c6994a99cfcba088e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37fe37dae979ca344221ecb2f09b8bd2ade5a030 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca35b5e02fbfa92113a51d63bd5794f74c394386 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3571c91a967fa0a8a135c01982c12120b73f5e10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a3f1215d7602c61cf1f8d6c94dd11e2e8df6405 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4562841da6ef313387170101f065ce551d2cc071 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32f3884c00442a1affe2a33c4af2f0439d320787 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57cafc45d214aa8836337e6711b3df7cb8054470 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3c3376427e1bca8e34c725ad839ae07a4b0c2e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..067e36989e3d6ca14775eede6c3d245ad91c4442 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47a1a005cd7aecde485cdb260a6bfcf9c667b93d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7693dd5b8f17ff6fca4e5442ba21b357c174e55a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a4f2d75b9c1a849d9d0e8130152da95b59d3a25 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a723083ac942fc4b669ae6338e28a6ea0894ba06 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..107d50a8dd2c24853e3538e1f5497dda9f789fac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b208a9ac7ba8fc2a5c4e1f7a2e6cce5a9fd264 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d727c0a1fa4b81d00dbc1574148ee00be3fee761 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d948fdc4e75fde6e5f34754da6ddd2b92b7f517a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa32d8912ec8799a10cb46e3e6c66c55bcfeb38d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dca527ab3995b44d0773a4e0342866345721a301 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/context.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/context.py new file mode 100644 index 0000000000000000000000000000000000000000..e8f29c65fe98b4c16598cdd3c93be195e1a84676 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/context.py @@ -0,0 +1,376 @@ +import os +import sys +import threading + +from . import process +from . import reduction + +__all__ = () + +# +# Exceptions +# + +class ProcessError(Exception): + pass + +class BufferTooShort(ProcessError): + pass + +class TimeoutError(ProcessError): + pass + +class AuthenticationError(ProcessError): + pass + +# +# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py +# + +class BaseContext(object): + + ProcessError = ProcessError + BufferTooShort = BufferTooShort + TimeoutError = TimeoutError + AuthenticationError = AuthenticationError + + current_process = staticmethod(process.current_process) + parent_process = staticmethod(process.parent_process) + active_children = staticmethod(process.active_children) + + def cpu_count(self): + '''Returns the number of CPUs in the system''' + num = os.cpu_count() + if num is None: + raise NotImplementedError('cannot determine number of cpus') + else: + return num + + def Manager(self): + '''Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager(ctx=self.get_context()) + m.start() + return m + + def Pipe(self, duplex=True): + '''Returns two connection object connected by a pipe''' + from .connection import Pipe + return Pipe(duplex) + + def Lock(self): + '''Returns a non-recursive lock object''' + from .synchronize import Lock + return Lock(ctx=self.get_context()) + + def RLock(self): + '''Returns a recursive lock object''' + from .synchronize import RLock + return RLock(ctx=self.get_context()) + + def Condition(self, lock=None): + '''Returns a condition object''' + from .synchronize import Condition + return Condition(lock, ctx=self.get_context()) + + def Semaphore(self, value=1): + '''Returns a semaphore object''' + from .synchronize import Semaphore + return Semaphore(value, ctx=self.get_context()) + + def BoundedSemaphore(self, value=1): + '''Returns a bounded semaphore object''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value, ctx=self.get_context()) + + def Event(self): + '''Returns an event object''' + from .synchronize import Event + return Event(ctx=self.get_context()) + + def Barrier(self, parties, action=None, timeout=None): + '''Returns a barrier object''' + from .synchronize import Barrier + return Barrier(parties, action, timeout, ctx=self.get_context()) + + def Queue(self, maxsize=0): + '''Returns a queue object''' + from .queues import Queue + return Queue(maxsize, ctx=self.get_context()) + + def JoinableQueue(self, maxsize=0): + '''Returns a queue object''' + from .queues import JoinableQueue + return JoinableQueue(maxsize, ctx=self.get_context()) + + def SimpleQueue(self): + '''Returns a queue object''' + from .queues import SimpleQueue + return SimpleQueue(ctx=self.get_context()) + + def Pool(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None): + '''Returns a process pool object''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + context=self.get_context()) + + def RawValue(self, typecode_or_type, *args): + '''Returns a shared object''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + def RawArray(self, typecode_or_type, size_or_initializer): + '''Returns a shared array''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + def Value(self, typecode_or_type, *args, lock=True): + '''Returns a synchronized shared object''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, lock=lock, + ctx=self.get_context()) + + def Array(self, typecode_or_type, size_or_initializer, *, lock=True): + '''Returns a synchronized shared array''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, lock=lock, + ctx=self.get_context()) + + def freeze_support(self): + '''Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + if sys.platform == 'win32' and getattr(sys, 'frozen', False): + from .spawn import freeze_support + freeze_support() + + def get_logger(self): + '''Return package logger -- if it does not already exist then + it is created. + ''' + from .util import get_logger + return get_logger() + + def log_to_stderr(self, level=None): + '''Turn on logging and add a handler which prints to stderr''' + from .util import log_to_stderr + return log_to_stderr(level) + + def allow_connection_pickling(self): + '''Install support for sending connections and sockets + between processes + ''' + # This is undocumented. In previous versions of multiprocessing + # its only effect was to make socket objects inheritable on Windows. + from . import connection + + def set_executable(self, executable): + '''Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) + + def set_forkserver_preload(self, module_names): + '''Set list of module names to try to load in forkserver process. + This is really just a hint. + ''' + from .forkserver import set_forkserver_preload + set_forkserver_preload(module_names) + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError('cannot find context for %r' % method) from None + ctx._check_available() + return ctx + + def get_start_method(self, allow_none=False): + return self._name + + def set_start_method(self, method, force=False): + raise ValueError('cannot set start method of concrete context') + + @property + def reducer(self): + '''Controls how objects will be reduced to a form that can be + shared with other processes.''' + return globals().get('reduction') + + @reducer.setter + def reducer(self, reduction): + globals()['reduction'] = reduction + + def _check_available(self): + pass + +# +# Type of default context -- underlying context can be set at most once +# + +class Process(process.BaseProcess): + _start_method = None + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + + @staticmethod + def _after_fork(): + return _default_context.get_context().Process._after_fork() + +class DefaultContext(BaseContext): + Process = Process + + def __init__(self, context): + self._default_context = context + self._actual_context = None + + def get_context(self, method=None): + if method is None: + if self._actual_context is None: + self._actual_context = self._default_context + return self._actual_context + else: + return super().get_context(method) + + def set_start_method(self, method, force=False): + if self._actual_context is not None and not force: + raise RuntimeError('context has already been set') + if method is None and force: + self._actual_context = None + return + self._actual_context = self.get_context(method) + + def get_start_method(self, allow_none=False): + if self._actual_context is None: + if allow_none: + return None + self._actual_context = self._default_context + return self._actual_context._name + + def get_all_start_methods(self): + if sys.platform == 'win32': + return ['spawn'] + else: + methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] + if reduction.HAVE_SEND_HANDLE: + methods.append('forkserver') + return methods + + +# +# Context types for fixed start method +# + +if sys.platform != 'win32': + + class ForkProcess(process.BaseProcess): + _start_method = 'fork' + @staticmethod + def _Popen(process_obj): + from .popen_fork import Popen + return Popen(process_obj) + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_posix import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class ForkServerProcess(process.BaseProcess): + _start_method = 'forkserver' + @staticmethod + def _Popen(process_obj): + from .popen_forkserver import Popen + return Popen(process_obj) + + class ForkContext(BaseContext): + _name = 'fork' + Process = ForkProcess + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + class ForkServerContext(BaseContext): + _name = 'forkserver' + Process = ForkServerProcess + def _check_available(self): + if not reduction.HAVE_SEND_HANDLE: + raise ValueError('forkserver start method not available') + + _concrete_contexts = { + 'fork': ForkContext(), + 'spawn': SpawnContext(), + 'forkserver': ForkServerContext(), + } + if sys.platform == 'darwin': + # bpo-33725: running arbitrary code after fork() is no longer reliable + # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. + _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn + else: + _default_context = DefaultContext(_concrete_contexts['fork']) + +else: + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_win32 import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + _concrete_contexts = { + 'spawn': SpawnContext(), + } + _default_context = DefaultContext(_concrete_contexts['spawn']) + +# +# Force the start method +# + +def _force_start_method(method): + _default_context._actual_context = _concrete_contexts[method] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__init__.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1468609e347b3a0b9281e5c9e6ec311fcb37e5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a418d841c6630ed4bb19d6800c4c7b0ba6119980 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d377963ccb0403930d7b5b71aa8abc06fcd450bc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/connection.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ce320fcf514083f3a6477e87abf40e9719285a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/dummy/connection.py @@ -0,0 +1,75 @@ +# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + @property + def address(self): + return self._backlog_queue + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + with self._in.not_empty: + self._in.not_empty.wait(timeout) + return self._in.qsize() > 0 + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/forkserver.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..f988506e1a6f30f04d5680f62bf9ae965f993254 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/forkserver.py @@ -0,0 +1,347 @@ +import errno +import os +import selectors +import signal +import socket +import struct +import sys +import threading +import warnings + +from . import connection +from . import process +from .context import reduction +from . import resource_tracker +from . import spawn +from . import util + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', + 'set_forkserver_preload'] + +# +# +# + +MAXFDS_TO_SEND = 256 +SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t + +# +# Forkserver class +# + +class ForkServer(object): + + def __init__(self): + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + self._inherited_fds = None + self._lock = threading.Lock() + self._preload_modules = ['__main__'] + + def _stop(self): + # Method used by unit tests to stop the server + with self._lock: + self._stop_unlocked() + + def _stop_unlocked(self): + if self._forkserver_pid is None: + return + + # close the "alive" file descriptor asks the server to stop + os.close(self._forkserver_alive_fd) + self._forkserver_alive_fd = None + + os.waitpid(self._forkserver_pid, 0) + self._forkserver_pid = None + + if not util.is_abstract_socket_namespace(self._forkserver_address): + os.unlink(self._forkserver_address) + self._forkserver_address = None + + def set_forkserver_preload(self, modules_names): + '''Set list of module names to try to load in forkserver process.''' + if not all(type(mod) is str for mod in self._preload_modules): + raise TypeError('module_names must be a list of strings') + self._preload_modules = modules_names + + def get_inherited_fds(self): + '''Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + ''' + return self._inherited_fds + + def connect_to_new_process(self, fds): + '''Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + ''' + self.ensure_running() + if len(fds) + 4 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + with socket.socket(socket.AF_UNIX) as client: + client.connect(self._forkserver_address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, self._forkserver_alive_fd, + resource_tracker.getfd()] + allfds += fds + try: + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + + def ensure_running(self): + '''Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + ''' + with self._lock: + resource_tracker.ensure_running() + if self._forkserver_pid is not None: + # forkserver was launched before, is it still running? + pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) + if not pid: + # still alive + return + # dead, launch it again + os.close(self._forkserver_alive_fd) + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + + cmd = ('from multiprocess.forkserver import main; ' + + 'main(%d, %d, %r, **%r)') + + if self._preload_modules: + desired_keys = {'main_path', 'sys_path'} + data = spawn.get_preparation_data('ignore') + data = {x: y for x, y in data.items() if x in desired_keys} + else: + data = {} + + with socket.socket(socket.AF_UNIX) as listener: + address = connection.arbitrary_address('AF_UNIX') + listener.bind(address) + if not util.is_abstract_socket_namespace(address): + os.chmod(address, 0o600) + listener.listen() + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + alive_r, alive_w = os.pipe() + try: + fds_to_pass = [listener.fileno(), alive_r] + cmd %= (listener.fileno(), alive_r, self._preload_modules, + data) + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd] + pid = util.spawnv_passfds(exe, args, fds_to_pass) + except: + os.close(alive_w) + raise + finally: + os.close(alive_r) + self._forkserver_address = address + self._forkserver_alive_fd = alive_w + self._forkserver_pid = pid + +# +# +# + +def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): + '''Run forkserver.''' + if preload: + if '__main__' in preload and main_path is not None: + process.current_process()._inheriting = True + try: + spawn.import_main_path(main_path) + finally: + del process.current_process()._inheriting + for modname in preload: + try: + __import__(modname) + except ImportError: + pass + + util._close_stdin() + + sig_r, sig_w = os.pipe() + os.set_blocking(sig_r, False) + os.set_blocking(sig_w, False) + + def sigchld_handler(*_unused): + # Dummy signal handler, doesn't do anything + pass + + handlers = { + # unblocking SIGCHLD allows the wakeup fd to notify our event loop + signal.SIGCHLD: sigchld_handler, + # protect the process from ^C + signal.SIGINT: signal.SIG_IGN, + } + old_handlers = {sig: signal.signal(sig, val) + for (sig, val) in handlers.items()} + + # calling os.write() in the Python signal handler is racy + signal.set_wakeup_fd(sig_w) + + # map child pids to client fds + pid_to_fd = {} + + with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ + selectors.DefaultSelector() as selector: + _forkserver._forkserver_address = listener.getsockname() + + selector.register(listener, selectors.EVENT_READ) + selector.register(alive_r, selectors.EVENT_READ) + selector.register(sig_r, selectors.EVENT_READ) + + while True: + try: + while True: + rfds = [key.fileobj for (key, events) in selector.select()] + if rfds: + break + + if alive_r in rfds: + # EOF because no more client processes left + assert os.read(alive_r, 1) == b'', "Not at EOF?" + raise SystemExit + + if sig_r in rfds: + # Got SIGCHLD + os.read(sig_r, 65536) # exhaust + while True: + # Scan for child processes + try: + pid, sts = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + break + if pid == 0: + break + child_w = pid_to_fd.pop(pid, None) + if child_w is not None: + returncode = os.waitstatus_to_exitcode(sts) + # Send exit code to client process + try: + write_signed(child_w, returncode) + except BrokenPipeError: + # client vanished + pass + os.close(child_w) + else: + # This shouldn't happen really + warnings.warn('forkserver: waitpid returned ' + 'unexpected pid %d' % pid) + + if listener in rfds: + # Incoming fork request + with listener.accept()[0] as s: + # Receive fds from client + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + if len(fds) > MAXFDS_TO_SEND: + raise RuntimeError( + "Too many ({0:n}) fds to send".format( + len(fds))) + child_r, child_w, *fds = fds + s.close() + pid = os.fork() + if pid == 0: + # Child + code = 1 + try: + listener.close() + selector.close() + unused_fds = [alive_r, child_w, sig_r, sig_w] + unused_fds.extend(pid_to_fd.values()) + code = _serve_one(child_r, fds, + unused_fds, + old_handlers) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + os._exit(code) + else: + # Send pid to client process + try: + write_signed(child_w, pid) + except BrokenPipeError: + # client vanished + pass + pid_to_fd[pid] = child_w + os.close(child_r) + for fd in fds: + os.close(fd) + + except OSError as e: + if e.errno != errno.ECONNABORTED: + raise + + +def _serve_one(child_r, fds, unused_fds, handlers): + # close unnecessary stuff and reset signal handlers + signal.set_wakeup_fd(-1) + for sig, val in handlers.items(): + signal.signal(sig, val) + for fd in unused_fds: + os.close(fd) + + (_forkserver._forkserver_alive_fd, + resource_tracker._resource_tracker._fd, + *_forkserver._inherited_fds) = fds + + # Run process object received over pipe + parent_sentinel = os.dup(child_r) + code = spawn._main(child_r, parent_sentinel) + + return code + + +# +# Read and write signed numbers +# + +def read_signed(fd): + data = b'' + length = SIGNED_STRUCT.size + while len(data) < length: + s = os.read(fd, length - len(data)) + if not s: + raise EOFError('unexpected EOF') + data += s + return SIGNED_STRUCT.unpack(data)[0] + +def write_signed(fd, n): + msg = SIGNED_STRUCT.pack(n) + while msg: + nbytes = os.write(fd, msg) + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# +# + +_forkserver = ForkServer() +ensure_running = _forkserver.ensure_running +get_inherited_fds = _forkserver.get_inherited_fds +connect_to_new_process = _forkserver.connect_to_new_process +set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/managers.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/managers.py new file mode 100644 index 0000000000000000000000000000000000000000..50989cbe0a841dd91924e02df4eaaf50c9b5b2fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/managers.py @@ -0,0 +1,1378 @@ +# +# Module providing manager classes for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] + +# +# Imports +# + +import sys +import threading +import signal +import array +import queue +import time +import types +import os +from os import getpid + +from traceback import format_exc + +from . import connection +from .context import reduction, get_spawning_popen, ProcessError +from . import pool +from . import process +from . import util +from . import get_context +try: + from . import shared_memory +except ImportError: + HAS_SHMEM = False +else: + HAS_SHMEM = True + __all__.append('SharedMemoryManager') + +# +# Register some things for pickling +# + +def reduce_array(a): + return array.array, (a.typecode, a.tobytes()) +reduction.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] +if view_types[0] is not list: # only needed in Py3.0 + def rebuild_as_list(obj): + return list, (list(obj),) + for view_type in view_types: + reduction.register(view_type, rebuild_as_list) + +# +# Type for identifying shared objects +# + +class Token(object): + ''' + Type to uniquely identify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return '%s(typeid=%r, address=%r, id=%r)' % \ + (self.__class__.__name__, self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + raise convert_to_error(kind, result) + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): + if not isinstance(result, str): + raise TypeError( + "Result {0!r} (kind '{1}') type is {2}, not str".format( + result, kind, type(result))) + if kind == '#UNSERIALIZABLE': + return RemoteError('Unserializable message: %s\n' % result) + else: + return RemoteError(result) + else: + return ValueError('Unrecognized message type {!r}'.format(kind)) + +class RemoteError(Exception): + def __str__(self): + return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) + +# +# Functions for finding the method names of an object +# + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + if not isinstance(authkey, bytes): + raise TypeError( + "Authkey {0!r} is type {1!s}, not bytes".format( + authkey, type(authkey))) + self.registry = registry + self.authkey = process.AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=16) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.id_to_local_proxy_obj = {} + self.mutex = threading.Lock() + + def serve_forever(self): + ''' + Run the server forever + ''' + self.stop_event = threading.Event() + process.current_process()._manager_server = self + try: + accepter = threading.Thread(target=self.accepter) + accepter.daemon = True + accepter.start() + try: + while not self.stop_event.is_set(): + self.stop_event.wait(1) + except (KeyboardInterrupt, SystemExit): + pass + finally: + if sys.stdout != sys.__stdout__: # what about stderr? + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + sys.exit(0) + + def accepter(self): + while True: + try: + c = self.listener.accept() + except OSError: + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + + def _handle_request(self, c): + request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + + try: + c.send(msg) + except Exception as e: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + util.info('Failure to send message: %r', msg) + util.info(' ... request was %r', request) + util.info(' ... exception was %r', e) + + def handle_request(self, conn): + ''' + Handle a new connection + ''' + try: + self._handle_request(conn) + except SystemExit: + # Server.serve_client() calls sys.exit(0) on EOF + pass + finally: + conn.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.current_thread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop_event.is_set(): + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + try: + obj, exposed, gettypeid = id_to_obj[ident] + except KeyError as ke: + try: + obj, exposed, gettypeid = \ + self.id_to_local_proxy_obj[ident] + except KeyError: + raise ke + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % + (methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as e: + msg = ('#ERROR', e) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.current_thread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception: + send(('#UNSERIALIZABLE', format_exc())) + except Exception as e: + util.info('exception in thread serving %r', + threading.current_thread().name) + util.info(' ... message was %r', msg) + util.info(' ... exception was %r', e) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__':fallback_str, + '__repr__':fallback_repr, + '#GETVALUE':fallback_getvalue + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + # Perhaps include debug info about 'c'? + with self.mutex: + result = [] + keys = list(self.id_to_refcount.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' + return len(self.id_to_refcount) + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + except: + import traceback + traceback.print_exc() + finally: + self.stop_event.set() + + def create(self, c, typeid, /, *args, **kwds): + ''' + Create a new shared object and return its id + ''' + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + if kwds or (len(args) != 1): + raise ValueError( + "Without callable, must have one non-keyword argument") + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + if not isinstance(method_to_typeid, dict): + raise TypeError( + "Method_to_typeid {0!r}: type {1!s}, not dict".format( + method_to_typeid, type(method_to_typeid))) + exposed = list(exposed) + list(method_to_typeid) + + ident = '%x' % id(obj) # convert to string because xmlrpclib + # only has 32 bit signed integers + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + + self.incref(c, ident) + return ident, tuple(exposed) + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.current_thread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + try: + self.id_to_refcount[ident] += 1 + except KeyError as ke: + # If no external references exist but an internal (to the + # manager) still does and a new external reference is created + # from it, restore the manager's tracking of it from the + # previously stashed internal ref. + if ident in self.id_to_local_proxy_obj: + self.id_to_refcount[ident] = 1 + self.id_to_obj[ident] = \ + self.id_to_local_proxy_obj[ident] + obj, exposed, gettypeid = self.id_to_obj[ident] + util.debug('Server re-enabled tracking & INCREF %r', ident) + else: + raise ke + + def decref(self, c, ident): + if ident not in self.id_to_refcount and \ + ident in self.id_to_local_proxy_obj: + util.debug('Server DECREF skipping %r', ident) + return + + with self.mutex: + if self.id_to_refcount[ident] <= 0: + raise AssertionError( + "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( + ident, self.id_to_obj[ident], + self.id_to_refcount[ident])) + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_refcount[ident] + + if ident not in self.id_to_refcount: + # Two-step process in case the object turns out to contain other + # proxy objects (e.g. a managed list of managed lists). + # Otherwise, deleting self.id_to_obj[ident] would trigger the + # deleting of the stored value (another managed object) which would + # in turn attempt to acquire the mutex that is already held here. + self.id_to_obj[ident] = (None, (), None) # thread-safe + util.debug('disposing of obj with id %r', ident) + with self.mutex: + del self.id_to_obj[ident] + + +# +# Class to represent state of a manager +# + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { #XXX: register dill? + 'pickle' : (connection.Listener, connection.Client), + 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) + } + +# +# Definition of BaseManager +# + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle', + ctx=None): + if authkey is None: + authkey = process.current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = process.AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + self._ctx = ctx or get_context() + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = self._ctx.Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = util.Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, + self._state, self._Client), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + # bpo-36368: protect server process from KeyboardInterrupt signals + signal.signal(signal.SIGINT, signal.SIG_IGN) + + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + util.info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, /, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + if self._process is not None: + self._process.join(timeout) + if not self._process.is_alive(): + self._process = None + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + if self._state.value == State.INITIAL: + self.start() + if self._state.value != State.STARTED: + if self._state.value == State.INITIAL: + raise ProcessError("Unable to start server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + util.info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=1.0) + if process.is_alive(): + util.info('manager still alive') + if hasattr(process, 'terminate'): + util.info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=1.0) + if process.is_alive(): + util.info('manager still alive after terminate') + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + @property + def address(self): + return self._address + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = method_to_typeid or \ + getattr(proxytype, '_method_to_typeid_', None) + + if method_to_typeid: + for key, value in list(method_to_typeid.items()): # isinstance? + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, /, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + +class ProcessLocalSet(set): + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True, manager_owned=False): + with BaseProxy._mutex: + tls_idset = BaseProxy._address_to_local.get(token.address, None) + if tls_idset is None: + tls_idset = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_idset + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_idset[0] + + # self._idset is used to record the identities of all shared + # objects for which the current process owns references and + # which are in the manager at token.address + self._idset = tls_idset[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + # Should be set to True only when a proxy object is being created + # on the manager server; primary use case: nested proxy objects. + # RebuildProxy detects when a proxy is being created on the manager + # and sets this value appropriately. + self._owned_by_manager = manager_owned + + if authkey is not None: + self._authkey = process.AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = process.current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.current_thread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + token.address = self._token.address + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + raise convert_to_error(kind, result) + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + if self._owned_by_manager: + util.debug('owned_by_manager skipped INCREF of %r', self._token.id) + return + + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._idset.add(self._id) + + state = self._manager and self._manager._state + + self._close = util.Finalize( + self, BaseProxy._decref, + args=(self._token, self._authkey, state, + self._tls, self._idset, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, authkey, state, tls, idset, _Client): + idset.discard(token.id) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as e: + util.debug('... decref failed %s', e) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.current_thread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as e: + # the proxy may just be for a manager which has shutdown + util.info('incref failed: %s' % e) + + def __reduce__(self): + kwds = {} + if get_spawning_popen() is not None: + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %#x>' % \ + (type(self).__name__, self._token.typeid, id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + ''' + server = getattr(process.current_process(), '_manager_server', None) + if server and server.address == token.address: + util.debug('Rebuild a proxy owned by manager, token=%r', token) + kwds['manager_owned'] = True + if token.id not in server.id_to_local_proxy_obj: + server.id_to_local_proxy_obj[token.id] = \ + server.id_to_obj[token.id] + incref = ( + kwds.pop('incref', True) and + not getattr(process.current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return a proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, /, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True, manager_owned=False): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = process.current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref, manager_owned=manager_owned) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__', 'send', 'throw', 'close') + def __iter__(self): + return self + def __next__(self, *args): + return self._callmethod('__next__', args) + def send(self, *args): + return self._callmethod('send', args) + def throw(self, *args): + return self._callmethod('throw', args) + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release') + def acquire(self, blocking=True, timeout=None): + args = (blocking,) if timeout is None else (blocking, timeout) + return self._callmethod('acquire', args) + def release(self): + return self._callmethod('release') + def __enter__(self): + return self._callmethod('acquire') + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def notify(self, n=1): + return self._callmethod('notify', (n,)) + def notify_all(self): + return self._callmethod('notify_all') + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = getattr(time,'monotonic',time.time)() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - getattr(time,'monotonic',time.time)() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + def is_set(self): + return self._callmethod('is_set') + def set(self): + return self._callmethod('set') + def clear(self): + return self._callmethod('clear') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class BarrierProxy(BaseProxy): + _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def abort(self): + return self._callmethod('abort') + def reset(self): + return self._callmethod('reset') + @property + def parties(self): + return self._callmethod('__getattribute__', ('parties',)) + @property + def n_waiting(self): + return self._callmethod('__getattribute__', ('n_waiting',)) + @property + def broken(self): + return self._callmethod('__getattribute__', ('broken',)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + def get(self): + return self._callmethod('get') + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + __class_getitem__ = classmethod(types.GenericAlias) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', + '__mul__', '__reversed__', '__rmul__', '__setitem__', + 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__imul__' + )) +class ListProxy(BaseListProxy): + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + +DictProxy = MakeProxyType('DictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', + '__setitem__', 'clear', 'copy', 'get', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + )) +DictProxy._method_to_typeid_ = { + '__iter__': 'Iterator', + } + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__' + )) + + +BasePoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', + )) +BasePoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator' + } +class PoolProxy(BasePoolProxy): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Definition of SyncManager +# + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `multiprocess.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', queue.Queue) +SyncManager.register('JoinableQueue', queue.Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Barrier', threading.Barrier, BarrierProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) + +# +# Definition of SharedMemoryManager and SharedMemoryServer +# + +if HAS_SHMEM: + class _SharedMemoryTracker: + "Manages one or more shared memory segments." + + def __init__(self, name, segment_names=[]): + self.shared_memory_context_name = name + self.segment_names = segment_names + + def register_segment(self, segment_name): + "Adds the supplied shared memory block name to tracker." + util.debug(f"Register segment {segment_name!r} in pid {getpid()}") + self.segment_names.append(segment_name) + + def destroy_segment(self, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the list of blocks being tracked.""" + util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") + self.segment_names.remove(segment_name) + segment = shared_memory.SharedMemory(segment_name) + segment.close() + segment.unlink() + + def unlink(self): + "Calls destroy_segment() on all tracked shared memory blocks." + for segment_name in self.segment_names[:]: + self.destroy_segment(segment_name) + + def __del__(self): + util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") + self.unlink() + + def __getstate__(self): + return (self.shared_memory_context_name, self.segment_names) + + def __setstate__(self, state): + self.__init__(*state) + + + class SharedMemoryServer(Server): + + public = Server.public + \ + ['track_segment', 'release_segment', 'list_segments'] + + def __init__(self, *args, **kwargs): + Server.__init__(self, *args, **kwargs) + address = self.address + # The address of Linux abstract namespaces can be bytes + if isinstance(address, bytes): + address = os.fsdecode(address) + self.shared_memory_context = \ + _SharedMemoryTracker(f"shm_{address}_{getpid()}") + util.debug(f"SharedMemoryServer started by pid {getpid()}") + + def create(self, c, typeid, /, *args, **kwargs): + """Create a new distributed-shared object (not backed by a shared + memory block) and return its id to be used in a Proxy Object.""" + # Unless set up as a shared proxy, don't make shared_memory_context + # a standard part of kwargs. This makes things easier for supplying + # simple functions. + if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): + kwargs['shared_memory_context'] = self.shared_memory_context + return Server.create(self, c, typeid, *args, **kwargs) + + def shutdown(self, c): + "Call unlink() on all tracked shared memory, terminate the Server." + self.shared_memory_context.unlink() + return Server.shutdown(self, c) + + def track_segment(self, c, segment_name): + "Adds the supplied shared memory block name to Server's tracker." + self.shared_memory_context.register_segment(segment_name) + + def release_segment(self, c, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the tracker instance inside the Server.""" + self.shared_memory_context.destroy_segment(segment_name) + + def list_segments(self, c): + """Returns a list of names of shared memory blocks that the Server + is currently tracking.""" + return self.shared_memory_context.segment_names + + + class SharedMemoryManager(BaseManager): + """Like SyncManager but uses SharedMemoryServer instead of Server. + + It provides methods for creating and returning SharedMemory instances + and for creating a list-like object (ShareableList) backed by shared + memory. It also provides methods that create and return Proxy Objects + that support synchronization across processes (i.e. multi-process-safe + locks and semaphores). + """ + + _Server = SharedMemoryServer + + def __init__(self, *args, **kwargs): + if os.name == "posix": + # bpo-36867: Ensure the resource_tracker is running before + # launching the manager process, so that concurrent + # shared_memory manipulation both in the manager and in the + # current process does not create two resource_tracker + # processes. + from . import resource_tracker + resource_tracker.ensure_running() + BaseManager.__init__(self, *args, **kwargs) + util.debug(f"{self.__class__.__name__} created by pid {getpid()}") + + def __del__(self): + util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") + pass + + def get_server(self): + 'Better than monkeypatching for now; merge into Server ultimately' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started SharedMemoryServer") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("SharedMemoryManager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self._Server(self._registry, self._address, + self._authkey, self._serializer) + + def SharedMemory(self, size): + """Returns a new SharedMemory instance with the specified size in + bytes, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sms = shared_memory.SharedMemory(None, create=True, size=size) + try: + dispatch(conn, None, 'track_segment', (sms.name,)) + except BaseException as e: + sms.unlink() + raise e + return sms + + def ShareableList(self, sequence): + """Returns a new ShareableList instance populated with the values + from the input sequence, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sl = shared_memory.ShareableList(sequence) + try: + dispatch(conn, None, 'track_segment', (sl.shm.name,)) + except BaseException as e: + sl.shm.unlink() + raise e + return sl diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/pool.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..961d7e5991847af1ea8034a5a37d9159cf4614ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/pool.py @@ -0,0 +1,957 @@ +# +# Module providing the `Pool` class for managing a process pool +# +# multiprocessing/pool.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Pool', 'ThreadPool'] + +# +# Imports +# + +import collections +import itertools +import os +import queue +import threading +import time +import traceback +import types +import warnings + +# If threading is available then ThreadPool should be provided. Therefore +# we avoid top-level imports which are liable to fail on some systems. +from . import util +from . import get_context, TimeoutError +from .connection import wait + +# +# Constants representing the state of a pool +# + +INIT = "INIT" +RUN = "RUN" +CLOSE = "CLOSE" +TERMINATE = "TERMINATE" + +# +# Miscellaneous +# + +job_counter = itertools.count() + +def mapstar(args): + return list(map(*args)) + +def starmapstar(args): + return list(itertools.starmap(args[0], args[1])) + +# +# Hack to embed stringification of remote traceback in local traceback +# + +class RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return rebuild_exc, (self.exc, self.tb) + +def rebuild_exc(exc, tb): + exc.__cause__ = RemoteTraceback(tb) + return exc + +# +# Code run by worker processes +# + +class MaybeEncodingError(Exception): + """Wraps possible unpickleable errors, so they can be + safely sent through the socket.""" + + def __init__(self, exc, value): + self.exc = repr(exc) + self.value = repr(value) + super(MaybeEncodingError, self).__init__(self.exc, self.value) + + def __str__(self): + return "Error sending result: '%s'. Reason: '%s'" % (self.value, + self.exc) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + +def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, + wrap_exception=False): + if (maxtasks is not None) and not (isinstance(maxtasks, int) + and maxtasks >= 1): + raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) + put = outqueue.put + get = inqueue.get + if hasattr(inqueue, '_writer'): + inqueue._writer.close() + outqueue._reader.close() + + if initializer is not None: + initializer(*initargs) + + completed = 0 + while maxtasks is None or (maxtasks and completed < maxtasks): + try: + task = get() + except (EOFError, OSError): + util.debug('worker got EOFError or OSError -- exiting') + break + + if task is None: + util.debug('worker got sentinel -- exiting') + break + + job, i, func, args, kwds = task + try: + result = (True, func(*args, **kwds)) + except Exception as e: + if wrap_exception and func is not _helper_reraises_exception: + e = ExceptionWithTraceback(e, e.__traceback__) + result = (False, e) + try: + put((job, i, result)) + except Exception as e: + wrapped = MaybeEncodingError(e, result[1]) + util.debug("Possible encoding error while sending result: %s" % ( + wrapped)) + put((job, i, (False, wrapped))) + + task = job = result = func = args = kwds = None + completed += 1 + util.debug('worker exiting after %d tasks' % completed) + +def _helper_reraises_exception(ex): + 'Pickle-able helper function for use by _guarded_task_generation.' + raise ex + +# +# Class representing a process pool +# + +class _PoolCache(dict): + """ + Class that implements a cache for the Pool class that will notify + the pool management threads every time the cache is emptied. The + notification is done by the use of a queue that is provided when + instantiating the cache. + """ + def __init__(self, /, *args, notifier=None, **kwds): + self.notifier = notifier + super().__init__(*args, **kwds) + + def __delitem__(self, item): + super().__delitem__(item) + + # Notify that the cache is empty. This is important because the + # pool keeps maintaining workers until the cache gets drained. This + # eliminates a race condition in which a task is finished after the + # the pool's _handle_workers method has enter another iteration of the + # loop. In this situation, the only event that can wake up the pool + # is the cache to be emptied (no more tasks available). + if not self: + self.notifier.put(None) + +class Pool(object): + ''' + Class which supports an async version of applying functions to arguments. + ''' + _wrap_exception = True + + @staticmethod + def Process(ctx, *args, **kwds): + return ctx.Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None, context=None): + # Attributes initialized early to make sure that they exist in + # __del__() if __init__() raises an exception + self._pool = [] + self._state = INIT + + self._ctx = context or get_context() + self._setup_queues() + self._taskqueue = queue.SimpleQueue() + # The _change_notifier queue exist to wake up self._handle_workers() + # when the cache (self._cache) is empty or when there is a change in + # the _state variable of the thread that runs _handle_workers. + self._change_notifier = self._ctx.SimpleQueue() + self._cache = _PoolCache(notifier=self._change_notifier) + self._maxtasksperchild = maxtasksperchild + self._initializer = initializer + self._initargs = initargs + + if processes is None: + processes = os.cpu_count() or 1 + if processes < 1: + raise ValueError("Number of processes must be at least 1") + if maxtasksperchild is not None: + if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: + raise ValueError("maxtasksperchild must be a positive int or None") + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + self._processes = processes + try: + self._repopulate_pool() + except Exception: + for p in self._pool: + if p.exitcode is None: + p.terminate() + for p in self._pool: + p.join() + raise + + sentinels = self._get_sentinels() + + self._worker_handler = threading.Thread( + target=Pool._handle_workers, + args=(self._cache, self._taskqueue, self._ctx, self.Process, + self._processes, self._pool, self._inqueue, self._outqueue, + self._initializer, self._initargs, self._maxtasksperchild, + self._wrap_exception, sentinels, self._change_notifier) + ) + self._worker_handler.daemon = True + self._worker_handler._state = RUN + self._worker_handler.start() + + + self._task_handler = threading.Thread( + target=Pool._handle_tasks, + args=(self._taskqueue, self._quick_put, self._outqueue, + self._pool, self._cache) + ) + self._task_handler.daemon = True + self._task_handler._state = RUN + self._task_handler.start() + + self._result_handler = threading.Thread( + target=Pool._handle_results, + args=(self._outqueue, self._quick_get, self._cache) + ) + self._result_handler.daemon = True + self._result_handler._state = RUN + self._result_handler.start() + + self._terminate = util.Finalize( + self, self._terminate_pool, + args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, + self._change_notifier, self._worker_handler, self._task_handler, + self._result_handler, self._cache), + exitpriority=15 + ) + self._state = RUN + + # Copy globals as function locals to make sure that they are available + # during Python shutdown when the Pool is destroyed. + def __del__(self, _warn=warnings.warn, RUN=RUN): + if self._state == RUN: + _warn(f"unclosed running multiprocessing pool {self!r}", + ResourceWarning, source=self) + if getattr(self, '_change_notifier', None) is not None: + self._change_notifier.put(None) + + def __repr__(self): + cls = self.__class__ + return (f'<{cls.__module__}.{cls.__qualname__} ' + f'state={self._state} ' + f'pool_size={len(self._pool)}>') + + def _get_sentinels(self): + task_queue_sentinels = [self._outqueue._reader] + self_notifier_sentinels = [self._change_notifier._reader] + return [*task_queue_sentinels, *self_notifier_sentinels] + + @staticmethod + def _get_worker_sentinels(workers): + return [worker.sentinel for worker in + workers if hasattr(worker, "sentinel")] + + @staticmethod + def _join_exited_workers(pool): + """Cleanup after any worker processes which have exited due to reaching + their specified lifetime. Returns True if any workers were cleaned up. + """ + cleaned = False + for i in reversed(range(len(pool))): + worker = pool[i] + if worker.exitcode is not None: + # worker exited + util.debug('cleaning up worker %d' % i) + worker.join() + cleaned = True + del pool[i] + return cleaned + + def _repopulate_pool(self): + return self._repopulate_pool_static(self._ctx, self.Process, + self._processes, + self._pool, self._inqueue, + self._outqueue, self._initializer, + self._initargs, + self._maxtasksperchild, + self._wrap_exception) + + @staticmethod + def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception): + """Bring the number of pool processes up to the specified number, + for use after reaping workers which have exited. + """ + for i in range(processes - len(pool)): + w = Process(ctx, target=worker, + args=(inqueue, outqueue, + initializer, + initargs, maxtasksperchild, + wrap_exception)) + w.name = w.name.replace('Process', 'PoolWorker') + w.daemon = True + w.start() + pool.append(w) + util.debug('added worker') + + @staticmethod + def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, + initializer, initargs, maxtasksperchild, + wrap_exception): + """Clean up any exited workers and start replacements for them. + """ + if Pool._join_exited_workers(pool): + Pool._repopulate_pool_static(ctx, Process, processes, pool, + inqueue, outqueue, initializer, + initargs, maxtasksperchild, + wrap_exception) + + def _setup_queues(self): + self._inqueue = self._ctx.SimpleQueue() + self._outqueue = self._ctx.SimpleQueue() + self._quick_put = self._inqueue._writer.send + self._quick_get = self._outqueue._reader.recv + + def _check_running(self): + if self._state != RUN: + raise ValueError("Pool not running") + + def apply(self, func, args=(), kwds={}): + ''' + Equivalent of `func(*args, **kwds)`. + Pool must be running. + ''' + return self.apply_async(func, args, kwds).get() + + def map(self, func, iterable, chunksize=None): + ''' + Apply `func` to each element in `iterable`, collecting the results + in a list that is returned. + ''' + return self._map_async(func, iterable, mapstar, chunksize).get() + + def starmap(self, func, iterable, chunksize=None): + ''' + Like `map()` method but the elements of the `iterable` are expected to + be iterables as well and will be unpacked as arguments. Hence + `func` and (a, b) becomes func(a, b). + ''' + return self._map_async(func, iterable, starmapstar, chunksize).get() + + def starmap_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `starmap()` method. + ''' + return self._map_async(func, iterable, starmapstar, chunksize, + callback, error_callback) + + def _guarded_task_generation(self, result_job, func, iterable): + '''Provides a generator of tasks for imap and imap_unordered with + appropriate handling for iterables which throw exceptions during + iteration.''' + try: + i = -1 + for i, x in enumerate(iterable): + yield (result_job, i, func, (x,), {}) + except Exception as e: + yield (result_job, i+1, _helper_reraises_exception, (e,), {}) + + def imap(self, func, iterable, chunksize=1): + ''' + Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. + ''' + self._check_running() + if chunksize == 1: + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0:n}".format( + chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def imap_unordered(self, func, iterable, chunksize=1): + ''' + Like `imap()` method but ordering of results is arbitrary. + ''' + self._check_running() + if chunksize == 1: + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0!r}".format(chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def apply_async(self, func, args=(), kwds={}, callback=None, + error_callback=None): + ''' + Asynchronous version of `apply()` method. + ''' + self._check_running() + result = ApplyResult(self, callback, error_callback) + self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) + return result + + def map_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `map()` method. + ''' + return self._map_async(func, iterable, mapstar, chunksize, callback, + error_callback) + + def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, + error_callback=None): + ''' + Helper function to implement map, starmap and their async counterparts. + ''' + self._check_running() + if not hasattr(iterable, '__len__'): + iterable = list(iterable) + + if chunksize is None: + chunksize, extra = divmod(len(iterable), len(self._pool) * 4) + if extra: + chunksize += 1 + if len(iterable) == 0: + chunksize = 0 + + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = MapResult(self, chunksize, len(iterable), callback, + error_callback=error_callback) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapper, + task_batches), + None + ) + ) + return result + + @staticmethod + def _wait_for_updates(sentinels, change_notifier, timeout=None): + wait(sentinels, timeout=timeout) + while not change_notifier.empty(): + change_notifier.get() + + @classmethod + def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, + pool, inqueue, outqueue, initializer, initargs, + maxtasksperchild, wrap_exception, sentinels, + change_notifier): + thread = threading.current_thread() + + # Keep maintaining workers until the cache gets drained, unless the pool + # is terminated. + while thread._state == RUN or (cache and thread._state != TERMINATE): + cls._maintain_pool(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception) + + current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] + + cls._wait_for_updates(current_sentinels, change_notifier) + # send sentinel to stop workers + taskqueue.put(None) + util.debug('worker handler exiting') + + @staticmethod + def _handle_tasks(taskqueue, put, outqueue, pool, cache): + thread = threading.current_thread() + + for taskseq, set_length in iter(taskqueue.get, None): + task = None + try: + # iterating taskseq cannot fail + for task in taskseq: + if thread._state != RUN: + util.debug('task handler found thread._state != RUN') + break + try: + put(task) + except Exception as e: + job, idx = task[:2] + try: + cache[job]._set(idx, (False, e)) + except KeyError: + pass + else: + if set_length: + util.debug('doing set_length()') + idx = task[1] if task else -1 + set_length(idx + 1) + continue + break + finally: + task = taskseq = job = None + else: + util.debug('task handler got sentinel') + + try: + # tell result handler to finish when cache is empty + util.debug('task handler sending sentinel to result handler') + outqueue.put(None) + + # tell workers there is no more work + util.debug('task handler sending sentinel to workers') + for p in pool: + put(None) + except OSError: + util.debug('task handler got OSError when sending sentinels') + + util.debug('task handler exiting') + + @staticmethod + def _handle_results(outqueue, get, cache): + thread = threading.current_thread() + + while 1: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if thread._state != RUN: + assert thread._state == TERMINATE, "Thread not in TERMINATE" + util.debug('result handler found thread._state=TERMINATE') + break + + if task is None: + util.debug('result handler got sentinel') + break + + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + while cache and thread._state != TERMINATE: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if task is None: + util.debug('result handler ignoring extra sentinel') + continue + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + if hasattr(outqueue, '_reader'): + util.debug('ensuring that outqueue is not full') + # If we don't make room available in outqueue then + # attempts to add the sentinel (None) to outqueue may + # block. There is guaranteed to be no more than 2 sentinels. + try: + for i in range(10): + if not outqueue._reader.poll(): + break + get() + except (OSError, EOFError): + pass + + util.debug('result handler exiting: len(cache)=%s, thread._state=%s', + len(cache), thread._state) + + @staticmethod + def _get_tasks(func, it, size): + it = iter(it) + while 1: + x = tuple(itertools.islice(it, size)) + if not x: + return + yield (func, x) + + def __reduce__(self): + raise NotImplementedError( + 'pool objects cannot be passed between processes or pickled' + ) + + def close(self): + util.debug('closing pool') + if self._state == RUN: + self._state = CLOSE + self._worker_handler._state = CLOSE + self._change_notifier.put(None) + + def terminate(self): + util.debug('terminating pool') + self._state = TERMINATE + self._terminate() + + def join(self): + util.debug('joining pool') + if self._state == RUN: + raise ValueError("Pool is still running") + elif self._state not in (CLOSE, TERMINATE): + raise ValueError("In unknown state") + self._worker_handler.join() + self._task_handler.join() + self._result_handler.join() + for p in self._pool: + p.join() + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # task_handler may be blocked trying to put items on inqueue + util.debug('removing tasks from inqueue until task handler finished') + inqueue._rlock.acquire() + while task_handler.is_alive() and inqueue._reader.poll(): + inqueue._reader.recv() + time.sleep(0) + + @classmethod + def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, + worker_handler, task_handler, result_handler, cache): + # this is guaranteed to only be called once + util.debug('finalizing pool') + + # Notify that the worker_handler state has been changed so the + # _handle_workers loop can be unblocked (and exited) in order to + # send the finalization sentinel all the workers. + worker_handler._state = TERMINATE + change_notifier.put(None) + + task_handler._state = TERMINATE + + util.debug('helping task handler/workers to finish') + cls._help_stuff_finish(inqueue, task_handler, len(pool)) + + if (not result_handler.is_alive()) and (len(cache) != 0): + raise AssertionError( + "Cannot have cache with result_hander not alive") + + result_handler._state = TERMINATE + change_notifier.put(None) + outqueue.put(None) # sentinel + + # We must wait for the worker handler to exit before terminating + # workers because we don't want workers to be restarted behind our back. + util.debug('joining worker handler') + if threading.current_thread() is not worker_handler: + worker_handler.join() + + # Terminate workers which haven't already finished. + if pool and hasattr(pool[0], 'terminate'): + util.debug('terminating workers') + for p in pool: + if p.exitcode is None: + p.terminate() + + util.debug('joining task handler') + if threading.current_thread() is not task_handler: + task_handler.join() + + util.debug('joining result handler') + if threading.current_thread() is not result_handler: + result_handler.join() + + if pool and hasattr(pool[0], 'terminate'): + util.debug('joining pool workers') + for p in pool: + if p.is_alive(): + # worker has not yet exited + util.debug('cleaning up worker %d' % p.pid) + p.join() + + def __enter__(self): + self._check_running() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Class whose instances are returned by `Pool.apply_async()` +# + +class ApplyResult(object): + + def __init__(self, pool, callback, error_callback): + self._pool = pool + self._event = threading.Event() + self._job = next(job_counter) + self._cache = pool._cache + self._callback = callback + self._error_callback = error_callback + self._cache[self._job] = self + + def ready(self): + return self._event.is_set() + + def successful(self): + if not self.ready(): + raise ValueError("{0!r} not ready".format(self)) + return self._success + + def wait(self, timeout=None): + self._event.wait(timeout) + + def get(self, timeout=None): + self.wait(timeout) + if not self.ready(): + raise TimeoutError + if self._success: + return self._value + else: + raise self._value + + def _set(self, i, obj): + self._success, self._value = obj + if self._callback and self._success: + self._callback(self._value) + if self._error_callback and not self._success: + self._error_callback(self._value) + self._event.set() + del self._cache[self._job] + self._pool = None + + __class_getitem__ = classmethod(types.GenericAlias) + +AsyncResult = ApplyResult # create alias -- see #17805 + +# +# Class whose instances are returned by `Pool.map_async()` +# + +class MapResult(ApplyResult): + + def __init__(self, pool, chunksize, length, callback, error_callback): + ApplyResult.__init__(self, pool, callback, + error_callback=error_callback) + self._success = True + self._value = [None] * length + self._chunksize = chunksize + if chunksize <= 0: + self._number_left = 0 + self._event.set() + del self._cache[self._job] + else: + self._number_left = length//chunksize + bool(length % chunksize) + + def _set(self, i, success_result): + self._number_left -= 1 + success, result = success_result + if success and self._success: + self._value[i*self._chunksize:(i+1)*self._chunksize] = result + if self._number_left == 0: + if self._callback: + self._callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + else: + if not success and self._success: + # only store first exception + self._success = False + self._value = result + if self._number_left == 0: + # only consider the result ready once all jobs are done + if self._error_callback: + self._error_callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + +# +# Class whose instances are returned by `Pool.imap()` +# + +class IMapIterator(object): + + def __init__(self, pool): + self._pool = pool + self._cond = threading.Condition(threading.Lock()) + self._job = next(job_counter) + self._cache = pool._cache + self._items = collections.deque() + self._index = 0 + self._length = None + self._unsorted = {} + self._cache[self._job] = self + + def __iter__(self): + return self + + def next(self, timeout=None): + with self._cond: + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + self._cond.wait(timeout) + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + raise TimeoutError from None + + success, value = item + if success: + return value + raise value + + __next__ = next # XXX + + def _set(self, i, obj): + with self._cond: + if self._index == i: + self._items.append(obj) + self._index += 1 + while self._index in self._unsorted: + obj = self._unsorted.pop(self._index) + self._items.append(obj) + self._index += 1 + self._cond.notify() + else: + self._unsorted[i] = obj + + if self._index == self._length: + del self._cache[self._job] + self._pool = None + + def _set_length(self, length): + with self._cond: + self._length = length + if self._index == self._length: + self._cond.notify() + del self._cache[self._job] + self._pool = None + +# +# Class whose instances are returned by `Pool.imap_unordered()` +# + +class IMapUnorderedIterator(IMapIterator): + + def _set(self, i, obj): + with self._cond: + self._items.append(obj) + self._index += 1 + self._cond.notify() + if self._index == self._length: + del self._cache[self._job] + self._pool = None + +# +# +# + +class ThreadPool(Pool): + _wrap_exception = False + + @staticmethod + def Process(ctx, *args, **kwds): + from .dummy import Process + return Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=()): + Pool.__init__(self, processes, initializer, initargs) + + def _setup_queues(self): + self._inqueue = queue.SimpleQueue() + self._outqueue = queue.SimpleQueue() + self._quick_put = self._inqueue.put + self._quick_get = self._outqueue.get + + def _get_sentinels(self): + return [self._change_notifier._reader] + + @staticmethod + def _get_worker_sentinels(workers): + return [] + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # drain inqueue, and put sentinels at its head to make workers finish + try: + while True: + inqueue.get(block=False) + except queue.Empty: + pass + for i in range(size): + inqueue.put(None) + + def _wait_for_updates(self, sentinels, change_notifier, timeout): + time.sleep(timeout) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_fork.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7c52d589fea22aad4b2ff4ba969db442a9ec1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_fork.py @@ -0,0 +1,83 @@ +import os +import signal + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + util._flush_std_streams() + self.returncode = None + self.finalizer = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + self.returncode = os.waitstatus_to_exitcode(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from multiprocess.connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def _send_signal(self, sig): + if self.returncode is None: + try: + os.kill(self.pid, sig) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def terminate(self): + self._send_signal(signal.SIGTERM) + + def kill(self): + self._send_signal(signal.SIGKILL) + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + os.close(parent_r) + os.close(parent_w) + code = process_obj._bootstrap(parent_sentinel=child_r) + finally: + os._exit(code) + else: + os.close(child_w) + os.close(child_r) + self.finalizer = util.Finalize(self, util.close_fds, + (parent_r, parent_w,)) + self.sentinel = parent_r + + def close(self): + if self.finalizer is not None: + self.finalizer() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_forkserver.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..2cea1fc7a4602172b87df391fdc831c9d40ac49a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_forkserver.py @@ -0,0 +1,74 @@ +import io +import os + +from .context import reduction, set_spawning_popen +if not reduction.HAVE_SEND_HANDLE: + raise ImportError('No support for sending fds between processes') +from . import forkserver +from . import popen_fork +from . import spawn +from . import util + + +__all__ = ['Popen'] + +# +# Wrapper for an fd used while launching a process +# + +class _DupFd(object): + def __init__(self, ind): + self.ind = ind + def detach(self): + return forkserver.get_inherited_fds()[self.ind] + +# +# Start child process using a server process +# + +class Popen(popen_fork.Popen): + method = 'forkserver' + DupFd = _DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return len(self._fds) - 1 + + def _launch(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + buf = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, buf) + reduction.dump(process_obj, buf) + finally: + set_spawning_popen(None) + + self.sentinel, w = forkserver.connect_to_new_process(self._fds) + # Keep a duplicate of the data pipe's write end as a sentinel of the + # parent process used by the child process. + _parent_w = os.dup(w) + self.finalizer = util.Finalize(self, util.close_fds, + (_parent_w, self.sentinel)) + with open(w, 'wb', closefd=True) as f: + f.write(buf.getbuffer()) + self.pid = forkserver.read_signed(self.sentinel) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + from multiprocess.connection import wait + timeout = 0 if flag == os.WNOHANG else None + if not wait([self.sentinel], timeout): + return None + try: + self.returncode = forkserver.read_signed(self.sentinel) + except (OSError, EOFError): + # This should not happen usually, but perhaps the forkserver + # process itself got killed + self.returncode = 255 + + return self.returncode diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4098d0fa4f1e6e3ec94ecc8e596dd3857d741f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py @@ -0,0 +1,131 @@ +import os +import msvcrt +import signal +import sys +import _winapi + +from .context import reduction, get_spawning_popen, set_spawning_popen +from . import spawn +from . import util + +__all__ = ['Popen'] + +# +# +# + +TERMINATE = 0x10000 +WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) +WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + +WINENV = not _path_eq(sys.executable, sys._base_executable) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + +class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + method = 'spawn' + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = spawn.get_command_line(parent_pid=os.getpid(), + pipe_handle=rhandle) + cmd = ' '.join('"%s"' % x for x in cmd) + + python_exe = spawn.get_executable() + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + python_exe = sys._base_executable + env = os.environ.copy() + env["__PYVENV_LAUNCHER__"] = sys.executable + else: + env = None + + with open(wfd, 'wb', closefd=True) as to_child: + # start process + try: + hp, ht, pid, tid = _winapi.CreateProcess( + python_exe, cmd, + None, None, False, 0, env, None, None) + _winapi.CloseHandle(ht) + except: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize(self, _close_handles, + (self.sentinel, int(rhandle))) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + def duplicate_for_child(self, handle): + assert self is get_spawning_popen() + return reduction.duplicate(handle, self.sentinel) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _winapi.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _winapi.WaitForSingleObject(int(self._handle), msecs) + if res == _winapi.WAIT_OBJECT_0: + code = _winapi.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _winapi.TerminateProcess(int(self._handle), TERMINATE) + except OSError: + if self.wait(timeout=1.0) is None: + raise + + kill = terminate + + def close(self): + self.finalizer() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/resource_sharer.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/resource_sharer.py new file mode 100644 index 0000000000000000000000000000000000000000..66076509a1202e7a1b4d8a481f64621a4bfbbf3e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/resource_sharer.py @@ -0,0 +1,154 @@ +# +# We use a background thread for sharing fds on Unix, and for sharing sockets on +# Windows. +# +# A client which wants to pickle a resource registers it with the resource +# sharer and gets an identifier in return. The unpickling process will connect +# to the resource sharer, sends the identifier and its pid, and then receives +# the resource. +# + +import os +import signal +import socket +import sys +import threading + +from . import process +from .context import reduction +from . import util + +__all__ = ['stop'] + + +if sys.platform == 'win32': + __all__ += ['DupSocket'] + + class DupSocket(object): + '''Picklable wrapper for a socket.''' + def __init__(self, sock): + new_sock = sock.dup() + def send(conn, pid): + share = new_sock.share(pid) + conn.send_bytes(share) + self._id = _resource_sharer.register(send, new_sock.close) + + def detach(self): + '''Get the socket. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + share = conn.recv_bytes() + return socket.fromshare(share) + +else: + __all__ += ['DupFd'] + + class DupFd(object): + '''Wrapper for fd which can be used at any time.''' + def __init__(self, fd): + new_fd = os.dup(fd) + def send(conn, pid): + reduction.send_handle(conn, new_fd, pid) + def close(): + os.close(new_fd) + self._id = _resource_sharer.register(send, close) + + def detach(self): + '''Get the fd. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + return reduction.recv_handle(conn) + + +class _ResourceSharer(object): + '''Manager for resources using background thread.''' + def __init__(self): + self._key = 0 + self._cache = {} + self._lock = threading.Lock() + self._listener = None + self._address = None + self._thread = None + util.register_after_fork(self, _ResourceSharer._afterfork) + + def register(self, send, close): + '''Register resource, returning an identifier.''' + with self._lock: + if self._address is None: + self._start() + self._key += 1 + self._cache[self._key] = (send, close) + return (self._address, self._key) + + @staticmethod + def get_connection(ident): + '''Return connection from which to receive identified resource.''' + from .connection import Client + address, key = ident + c = Client(address, authkey=process.current_process().authkey) + c.send((key, os.getpid())) + return c + + def stop(self, timeout=None): + '''Stop the background thread and clear registered resources.''' + from .connection import Client + with self._lock: + if self._address is not None: + c = Client(self._address, + authkey=process.current_process().authkey) + c.send(None) + c.close() + self._thread.join(timeout) + if self._thread.is_alive(): + util.sub_warning('_ResourceSharer thread did ' + 'not stop when asked') + self._listener.close() + self._thread = None + self._address = None + self._listener = None + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + + def _afterfork(self): + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + self._lock._at_fork_reinit() + if self._listener is not None: + self._listener.close() + self._listener = None + self._address = None + self._thread = None + + def _start(self): + from .connection import Listener + assert self._listener is None, "Already have Listener" + util.debug('starting listener and thread for sending handles') + self._listener = Listener(authkey=process.current_process().authkey) + self._address = self._listener.address + t = threading.Thread(target=self._serve) + t.daemon = True + t.start() + self._thread = t + + def _serve(self): + if hasattr(signal, 'pthread_sigmask'): + signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) + while 1: + try: + with self._listener.accept() as conn: + msg = conn.recv() + if msg is None: + break + key, destination_pid = msg + send, close = self._cache.pop(key) + try: + send(conn, destination_pid) + finally: + close() + except: + if not util.is_exiting(): + sys.excepthook(*sys.exc_info()) + + +_resource_sharer = _ResourceSharer() +stop = _resource_sharer.stop diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bce8d2df435cbdd9cd0e44dc5ceced4f4cf1daf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8968e3aa05a00ddf5ac66436bafd84fbd9fafae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1522525532e5f7bceb64043c5d5b5e12f1abad22 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..974131ab19c0fa7f059d6d9255067fc9e22ccc42 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cce98813ee8451b3cb8aae063200d1dd040f884a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9744febcd6222812a7609cd898315a912753e31f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab3701038c2312b3008e4c962761229c459202e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..350160013ea5abd58b3b579c021908f71c31153b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..f331e381613626c6b406f782bb2e4afb850c658c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py @@ -0,0 +1,19 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("fork is not available on Windows") + +if sys.platform == 'darwin': + raise unittest.SkipTest("test may crash on macOS (bpo-33725)") + +install_tests_in_module_dict(globals(), 'fork') + +if __name__ == '__main__': + unittest.main() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/util.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/util.py new file mode 100644 index 0000000000000000000000000000000000000000..1f248d0957c10b69da4767b37bafa8bb92419d10 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/util.py @@ -0,0 +1,489 @@ +# +# Module providing various facilities to other parts of the package +# +# multiprocessing/util.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import itertools +import sys +import weakref +import atexit +import threading # we want threading to install it's + # cleanup function before multiprocessing does +from subprocess import _args_from_interpreter_flags + +from . import process + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', + ] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 + +LOGGER_NAME = 'multiprocess' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + +def sub_debug(msg, *args): + if _logger: + _logger.log(SUBDEBUG, msg, *args) + +def debug(msg, *args): + if _logger: + _logger.log(DEBUG, msg, *args) + +def info(msg, *args): + if _logger: + _logger.log(INFO, msg, *args) + +def sub_warning(msg, *args): + if _logger: + _logger.log(SUBWARNING, msg, *args) + +def get_logger(): + ''' + Returns logger used by multiprocess + ''' + global _logger + import logging + + logging._acquireLock() + try: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + + finally: + logging._releaseLock() + + return _logger + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +# Abstract socket support + +def _platform_supports_abstract_sockets(): + if sys.platform == "linux": + return True + if hasattr(sys, 'getandroidapilevel'): + return True + return False + + +def is_abstract_socket_namespace(address): + if not address: + return False + if isinstance(address, bytes): + return address[0] == 0 + elif isinstance(address, str): + return address[0] == "\0" + raise TypeError(f'address type of {address!r} unrecognized') + + +abstract_sockets_supported = _platform_supports_abstract_sockets() + +# +# Function returning a temp directory which will be removed on exit +# + +def _remove_temp_dir(rmtree, tempdir): + rmtree(tempdir) + + current_process = process.current_process() + # current_process() can be None if the finalizer is called + # late during Python finalization + if current_process is not None: + current_process._config['tempdir'] = None + +def get_temp_dir(): + # get name of a temp directory which will be automatically cleaned up + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: + import shutil, tempfile + tempdir = tempfile.mkdtemp(prefix='pymp-') + info('created temp directory %s', tempdir) + # keep a strong reference to shutil.rmtree(), since the finalizer + # can be called late during Python shutdown + Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), + exitpriority=-100) + process.current_process()._config['tempdir'] = tempdir + return tempdir + +# +# Support for reinitialization of objects when bootstrapping a child process +# + +_afterfork_registry = weakref.WeakValueDictionary() +_afterfork_counter = itertools.count() + +def _run_after_forkers(): + items = list(_afterfork_registry.items()) + items.sort() + for (index, ident, func), obj in items: + try: + func(obj) + except Exception as e: + info('after forker raised exception %s', e) + +def register_after_fork(obj, func): + _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj + +# +# Finalization using weakrefs +# + +_finalizer_registry = {} +_finalizer_counter = itertools.count() + + +class Finalize(object): + ''' + Class which supports object finalization using weakrefs + ''' + def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): + if (exitpriority is not None) and not isinstance(exitpriority,int): + raise TypeError( + "Exitpriority ({0!r}) must be None or int, not {1!s}".format( + exitpriority, type(exitpriority))) + + if obj is not None: + self._weakref = weakref.ref(obj, self) + elif exitpriority is None: + raise ValueError("Without object, exitpriority cannot be None") + + self._callback = callback + self._args = args + self._kwargs = kwargs or {} + self._key = (exitpriority, next(_finalizer_counter)) + self._pid = os.getpid() + + _finalizer_registry[self._key] = self + + def __call__(self, wr=None, + # Need to bind these locally because the globals can have + # been cleared at shutdown + _finalizer_registry=_finalizer_registry, + sub_debug=sub_debug, getpid=os.getpid): + ''' + Run the callback unless it has already been called or cancelled + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + sub_debug('finalizer no longer registered') + else: + if self._pid != getpid(): + sub_debug('finalizer ignored because different process') + res = None + else: + sub_debug('finalizer calling %s with args %s and kwargs %s', + self._callback, self._args, self._kwargs) + res = self._callback(*self._args, **self._kwargs) + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + return res + + def cancel(self): + ''' + Cancel finalization of the object + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + pass + else: + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + + def still_active(self): + ''' + Return whether this finalizer is still waiting to invoke callback + ''' + return self._key in _finalizer_registry + + def __repr__(self): + try: + obj = self._weakref() + except (AttributeError, TypeError): + obj = None + + if obj is None: + return '<%s object, dead>' % self.__class__.__name__ + + x = '<%s object, callback=%s' % ( + self.__class__.__name__, + getattr(self._callback, '__name__', self._callback)) + if self._args: + x += ', args=' + str(self._args) + if self._kwargs: + x += ', kwargs=' + str(self._kwargs) + if self._key[0] is not None: + x += ', exitpriority=' + str(self._key[0]) + return x + '>' + + +def _run_finalizers(minpriority=None): + ''' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + ''' + if _finalizer_registry is None: + # This function may be called after this module's globals are + # destroyed. See the _exit_function function in this module for more + # notes. + return + + if minpriority is None: + f = lambda p : p[0] is not None + else: + f = lambda p : p[0] is not None and p[0] >= minpriority + + # Careful: _finalizer_registry may be mutated while this function + # is running (either by a GC run or by another thread). + + # list(_finalizer_registry) should be atomic, while + # list(_finalizer_registry.items()) is not. + keys = [key for key in list(_finalizer_registry) if f(key)] + keys.sort(reverse=True) + + for key in keys: + finalizer = _finalizer_registry.get(key) + # key may have been removed from the registry + if finalizer is not None: + sub_debug('calling %s', finalizer) + try: + finalizer() + except Exception: + import traceback + traceback.print_exc() + + if minpriority is None: + _finalizer_registry.clear() + +# +# Clean up on exit +# + +def is_exiting(): + ''' + Returns true if the process is shutting down + ''' + return _exiting or _exiting is None + +_exiting = False + +def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, + active_children=process.active_children, + current_process=process.current_process): + # We hold on to references to functions in the arglist due to the + # situation described below, where this function is called after this + # module's globals are destroyed. + + global _exiting + + if not _exiting: + _exiting = True + + info('process shutting down') + debug('running all "atexit" finalizers with priority >= 0') + _run_finalizers(0) + + if current_process() is not None: + # We check if the current process is None here because if + # it's None, any call to ``active_children()`` will raise + # an AttributeError (active_children winds up trying to + # get attributes from util._current_process). One + # situation where this can happen is if someone has + # manipulated sys.modules, causing this module to be + # garbage collected. The destructor for the module type + # then replaces all values in the module dict with None. + # For instance, after setuptools runs a test it replaces + # sys.modules with a copy created earlier. See issues + # #9775 and #15881. Also related: #4106, #9205, and + # #9207. + + for p in active_children(): + if p.daemon: + info('calling terminate() for daemon %s', p.name) + p._popen.terminate() + + for p in active_children(): + info('calling join() for process %s', p.name) + p.join() + + debug('running the remaining "atexit" finalizers') + _run_finalizers() + +atexit.register(_exit_function) + +# +# Some fork aware types +# + +class ForkAwareThreadLock(object): + def __init__(self): + self._lock = threading.Lock() + self.acquire = self._lock.acquire + self.release = self._lock.release + register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + +class ForkAwareLocal(threading.local): + def __init__(self): + register_after_fork(self, lambda obj : obj.__dict__.clear()) + def __reduce__(self): + return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) +# +# Close sys.stdin and replace stdin with os.devnull +# + +def _close_stdin(): + if sys.stdin is None: + return + + try: + sys.stdin.close() + except (OSError, ValueError): + pass + + try: + fd = os.open(os.devnull, os.O_RDONLY) + try: + sys.stdin = open(fd, encoding="utf-8", closefd=False) + except: + os.close(fd) + raise + except (OSError, ValueError): + pass + +# +# Flush standard streams, if any +# + +def _flush_std_streams(): + try: + sys.stdout.flush() + except (AttributeError, ValueError): + pass + try: + sys.stderr.flush() + except (AttributeError, ValueError): + pass + +# +# Start a program with only specified fds kept open +# + +def spawnv_passfds(path, args, passfds): + import _posixsubprocess + passfds = tuple(sorted(map(int, passfds))) + errpipe_read, errpipe_write = os.pipe() + try: + return _posixsubprocess.fork_exec( + args, [os.fsencode(path)], True, passfds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, None, None, None, -1, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + + +def close_fds(*fds): + """Close each file descriptor given as an argument""" + for fd in fds: + os.close(fd) + + +def _cleanup_tests(): + """Cleanup multiprocessing resources when multiprocessing tests + completed.""" + + from test import support + + # cleanup multiprocessing + process._cleanup() + + # Stop the ForkServer process if it's running + from multiprocess import forkserver + forkserver._forkserver._stop() + + # Stop the ResourceTracker process if it's running + from multiprocess import resource_tracker + resource_tracker._resource_tracker._stop() + + # bpo-37421: Explicitly call _run_finalizers() to remove immediately + # temporary directories created by multiprocessing.util.get_temp_dir(). + _run_finalizers() + support.gc_collect() + + support.reap_children() diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..f860ce3222e8728a2fd007db3e1efd5b5884b737 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA @@ -0,0 +1,38 @@ +Metadata-Version: 2.1 +Name: nvidia-cusolver-cu12 +Version: 11.4.5.107 +Summary: CUDA solver native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: nvidia-cublas-cu12 +Requires-Dist: nvidia-nvjitlink-cu12 +Requires-Dist: nvidia-cusparse-cu12 + +CUDA solver native runtime libraries diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e637b3d9bc07904b5e4eec34cec9742227e41559 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD @@ -0,0 +1,22 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406 +nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549 +nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292 +nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561 +nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495 +nvidia/cusolver/include/cusolver_common.h,sha256=8SMCLEPkMN9Ni_KANkvPSHCieV1jrTARuS-Mhmuq5H8,8826 +nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/lib/libcusolver.so.11,sha256=ECh6vHzpxfx-fBY3YVZrWZ6uGzYsR-EACRHRmEQ9bVI,114481816 +nvidia/cusolver/lib/libcusolverMg.so.11,sha256=0f3uK8NQhMAFtQ5r76UCApP7coB7wWG2pQOMh1RMmwY,79763496 +nvidia_cusolver_cu12-11.4.5.107.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusolver_cu12-11.4.5.107.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusolver_cu12-11.4.5.107.dist-info/METADATA,sha256=b8Zxnx3ZVIwttTKBnzgVXjXu8-_pRL6wBkYMTV7i6gA,1626 +nvidia_cusolver_cu12-11.4.5.107.dist-info/RECORD,, +nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..6bfec839e4d42402efb1845ad1ee9d23be951177 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4b8c05ddfb7b1cc770c2c9e02c9b14cedddbf5723f4a46f628a4a7b9ad03622 +size 50 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..14b961bd7fd75717fe055182f368148e2b327f39 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90bc66c0c547b5fce91ea073b458df63f3d30a5a1a5e97c7abd0c30eccdbf169 +size 323 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..d743adc079ea1c06583e20e8e1dba371af4d260a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a167fb78147766d380f75393bdd790258a2cd5a9f9ea6b06b09c2118e6a7de5a +size 182453 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ac3cea6476f0799521059c755ca806751ded8ab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60f1945edc3e4ec38f6e234389e647a1b369de8afb9c7840c491a39880c0caa1 +size 3948 diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..44d40c61f53b3c3d1ed9e8480b5be8b0db555b85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/__init__.py @@ -0,0 +1,9214 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# When adding a new object to this init, remember to add it twice: once inside the `_import_structure` dictionary and +# once inside the `if TYPE_CHECKING` branch. The `TYPE_CHECKING` should have import statements as usual, but they are +# only there for type checking. The `_import_structure` is a dictionary submodule to list of object names, and is used +# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names +# in the namespace without actually importing anything (and especially none of the backends). + +__version__ = "4.39.3" + +from typing import TYPE_CHECKING + +# Check the dependencies satisfy the minimal versions required. +from . import dependency_versions_check +from .utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_bitsandbytes_available, + is_essentia_available, + is_flax_available, + is_g2p_en_available, + is_keras_nlp_available, + is_librosa_available, + is_pretty_midi_available, + is_scipy_available, + is_sentencepiece_available, + is_speech_available, + is_tensorflow_text_available, + is_tf_available, + is_timm_available, + is_tokenizers_available, + is_torch_available, + is_torchaudio_available, + is_torchvision_available, + is_vision_available, + logging, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Base objects, independent of any specific backend +_import_structure = { + "audio_utils": [], + "benchmark": [], + "commands": [], + "configuration_utils": ["PretrainedConfig"], + "convert_graph_to_onnx": [], + "convert_slow_tokenizers_checkpoints_to_fast": [], + "convert_tf_hub_seq_to_seq_bert_to_pytorch": [], + "data": [ + "DataProcessor", + "InputExample", + "InputFeatures", + "SingleSentenceClassificationProcessor", + "SquadExample", + "SquadFeatures", + "SquadV1Processor", + "SquadV2Processor", + "glue_compute_metrics", + "glue_convert_examples_to_features", + "glue_output_modes", + "glue_processors", + "glue_tasks_num_labels", + "squad_convert_examples_to_features", + "xnli_compute_metrics", + "xnli_output_modes", + "xnli_processors", + "xnli_tasks_num_labels", + ], + "data.data_collator": [ + "DataCollator", + "DataCollatorForLanguageModeling", + "DataCollatorForPermutationLanguageModeling", + "DataCollatorForSeq2Seq", + "DataCollatorForSOP", + "DataCollatorForTokenClassification", + "DataCollatorForWholeWordMask", + "DataCollatorWithPadding", + "DefaultDataCollator", + "default_data_collator", + ], + "data.metrics": [], + "data.processors": [], + "debug_utils": [], + "deepspeed": [], + "dependency_versions_check": [], + "dependency_versions_table": [], + "dynamic_module_utils": [], + "feature_extraction_sequence_utils": ["SequenceFeatureExtractor"], + "feature_extraction_utils": ["BatchFeature", "FeatureExtractionMixin"], + "file_utils": [], + "generation": ["GenerationConfig", "TextIteratorStreamer", "TextStreamer"], + "hf_argparser": ["HfArgumentParser"], + "hyperparameter_search": [], + "image_transforms": [], + "integrations": [ + "is_clearml_available", + "is_comet_available", + "is_dvclive_available", + "is_neptune_available", + "is_optuna_available", + "is_ray_available", + "is_ray_tune_available", + "is_sigopt_available", + "is_tensorboard_available", + "is_wandb_available", + ], + "modelcard": ["ModelCard"], + "modeling_tf_pytorch_utils": [ + "convert_tf_weight_name_to_pt_weight_name", + "load_pytorch_checkpoint_in_tf2_model", + "load_pytorch_model_in_tf2_model", + "load_pytorch_weights_in_tf2_model", + "load_tf2_checkpoint_in_pytorch_model", + "load_tf2_model_in_pytorch_model", + "load_tf2_weights_in_pytorch_model", + ], + "models": [], + # Models + "models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"], + "models.align": [ + "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP", + "AlignConfig", + "AlignProcessor", + "AlignTextConfig", + "AlignVisionConfig", + ], + "models.altclip": [ + "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "AltCLIPConfig", + "AltCLIPProcessor", + "AltCLIPTextConfig", + "AltCLIPVisionConfig", + ], + "models.audio_spectrogram_transformer": [ + "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ASTConfig", + "ASTFeatureExtractor", + ], + "models.auto": [ + "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CONFIG_MAPPING", + "FEATURE_EXTRACTOR_MAPPING", + "IMAGE_PROCESSOR_MAPPING", + "MODEL_NAMES_MAPPING", + "PROCESSOR_MAPPING", + "TOKENIZER_MAPPING", + "AutoConfig", + "AutoFeatureExtractor", + "AutoImageProcessor", + "AutoProcessor", + "AutoTokenizer", + ], + "models.autoformer": [ + "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "AutoformerConfig", + ], + "models.bark": [ + "BarkCoarseConfig", + "BarkConfig", + "BarkFineConfig", + "BarkProcessor", + "BarkSemanticConfig", + ], + "models.bart": ["BartConfig", "BartTokenizer"], + "models.barthez": [], + "models.bartpho": [], + "models.beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig"], + "models.bert": [ + "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BasicTokenizer", + "BertConfig", + "BertTokenizer", + "WordpieceTokenizer", + ], + "models.bert_generation": ["BertGenerationConfig"], + "models.bert_japanese": [ + "BertJapaneseTokenizer", + "CharacterTokenizer", + "MecabTokenizer", + ], + "models.bertweet": ["BertweetTokenizer"], + "models.big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig"], + "models.bigbird_pegasus": [ + "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BigBirdPegasusConfig", + ], + "models.biogpt": [ + "BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BioGptConfig", + "BioGptTokenizer", + ], + "models.bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig"], + "models.blenderbot": [ + "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BlenderbotConfig", + "BlenderbotTokenizer", + ], + "models.blenderbot_small": [ + "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BlenderbotSmallConfig", + "BlenderbotSmallTokenizer", + ], + "models.blip": [ + "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BlipConfig", + "BlipProcessor", + "BlipTextConfig", + "BlipVisionConfig", + ], + "models.blip_2": [ + "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Blip2Config", + "Blip2Processor", + "Blip2QFormerConfig", + "Blip2VisionConfig", + ], + "models.bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig"], + "models.bridgetower": [ + "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BridgeTowerConfig", + "BridgeTowerProcessor", + "BridgeTowerTextConfig", + "BridgeTowerVisionConfig", + ], + "models.bros": [ + "BROS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BrosConfig", + "BrosProcessor", + ], + "models.byt5": ["ByT5Tokenizer"], + "models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"], + "models.canine": [ + "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CanineConfig", + "CanineTokenizer", + ], + "models.chinese_clip": [ + "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ChineseCLIPConfig", + "ChineseCLIPProcessor", + "ChineseCLIPTextConfig", + "ChineseCLIPVisionConfig", + ], + "models.clap": [ + "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ClapAudioConfig", + "ClapConfig", + "ClapProcessor", + "ClapTextConfig", + ], + "models.clip": [ + "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CLIPConfig", + "CLIPProcessor", + "CLIPTextConfig", + "CLIPTokenizer", + "CLIPVisionConfig", + ], + "models.clipseg": [ + "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CLIPSegConfig", + "CLIPSegProcessor", + "CLIPSegTextConfig", + "CLIPSegVisionConfig", + ], + "models.clvp": [ + "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ClvpConfig", + "ClvpDecoderConfig", + "ClvpEncoderConfig", + "ClvpFeatureExtractor", + "ClvpProcessor", + "ClvpTokenizer", + ], + "models.code_llama": [], + "models.codegen": [ + "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CodeGenConfig", + "CodeGenTokenizer", + ], + "models.cohere": ["COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CohereConfig"], + "models.conditional_detr": [ + "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ConditionalDetrConfig", + ], + "models.convbert": [ + "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ConvBertConfig", + "ConvBertTokenizer", + ], + "models.convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig"], + "models.convnextv2": [ + "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ConvNextV2Config", + ], + "models.cpm": [], + "models.cpmant": [ + "CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CpmAntConfig", + "CpmAntTokenizer", + ], + "models.ctrl": [ + "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CTRLConfig", + "CTRLTokenizer", + ], + "models.cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"], + "models.data2vec": [ + "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Data2VecAudioConfig", + "Data2VecTextConfig", + "Data2VecVisionConfig", + ], + "models.deberta": [ + "DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DebertaConfig", + "DebertaTokenizer", + ], + "models.deberta_v2": [ + "DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DebertaV2Config", + ], + "models.decision_transformer": [ + "DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DecisionTransformerConfig", + ], + "models.deformable_detr": [ + "DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DeformableDetrConfig", + ], + "models.deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig"], + "models.deprecated": [], + "models.deprecated.bort": [], + "models.deprecated.mctct": [ + "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MCTCTConfig", + "MCTCTFeatureExtractor", + "MCTCTProcessor", + ], + "models.deprecated.mmbt": ["MMBTConfig"], + "models.deprecated.open_llama": [ + "OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "OpenLlamaConfig", + ], + "models.deprecated.retribert": [ + "RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "RetriBertConfig", + "RetriBertTokenizer", + ], + "models.deprecated.tapex": ["TapexTokenizer"], + "models.deprecated.trajectory_transformer": [ + "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TrajectoryTransformerConfig", + ], + "models.deprecated.transfo_xl": [ + "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TransfoXLConfig", + "TransfoXLCorpus", + "TransfoXLTokenizer", + ], + "models.deprecated.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"], + "models.depth_anything": ["DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP", "DepthAnythingConfig"], + "models.deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"], + "models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"], + "models.dialogpt": [], + "models.dinat": ["DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DinatConfig"], + "models.dinov2": ["DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Dinov2Config"], + "models.distilbert": [ + "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DistilBertConfig", + "DistilBertTokenizer", + ], + "models.dit": [], + "models.donut": [ + "DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DonutProcessor", + "DonutSwinConfig", + ], + "models.dpr": [ + "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP", + "DPRConfig", + "DPRContextEncoderTokenizer", + "DPRQuestionEncoderTokenizer", + "DPRReaderOutput", + "DPRReaderTokenizer", + ], + "models.dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"], + "models.efficientformer": [ + "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "EfficientFormerConfig", + ], + "models.efficientnet": [ + "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "EfficientNetConfig", + ], + "models.electra": [ + "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ElectraConfig", + "ElectraTokenizer", + ], + "models.encodec": [ + "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", + "EncodecConfig", + "EncodecFeatureExtractor", + ], + "models.encoder_decoder": ["EncoderDecoderConfig"], + "models.ernie": [ + "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ErnieConfig", + ], + "models.ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"], + "models.esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig", "EsmTokenizer"], + "models.falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], + "models.fastspeech2_conformer": [ + "FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP", + "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP", + "FastSpeech2ConformerConfig", + "FastSpeech2ConformerHifiGanConfig", + "FastSpeech2ConformerTokenizer", + "FastSpeech2ConformerWithHifiGanConfig", + ], + "models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"], + "models.flava": [ + "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "FlavaConfig", + "FlavaImageCodebookConfig", + "FlavaImageConfig", + "FlavaMultimodalConfig", + "FlavaTextConfig", + ], + "models.fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"], + "models.focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"], + "models.fsmt": [ + "FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "FSMTConfig", + "FSMTTokenizer", + ], + "models.funnel": [ + "FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "FunnelConfig", + "FunnelTokenizer", + ], + "models.fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"], + "models.gemma": ["GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "GemmaConfig"], + "models.git": [ + "GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GitConfig", + "GitProcessor", + "GitVisionConfig", + ], + "models.glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"], + "models.gpt2": [ + "GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GPT2Config", + "GPT2Tokenizer", + ], + "models.gpt_bigcode": [ + "GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GPTBigCodeConfig", + ], + "models.gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig"], + "models.gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"], + "models.gpt_neox_japanese": [ + "GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GPTNeoXJapaneseConfig", + ], + "models.gpt_sw3": [], + "models.gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig"], + "models.gptsan_japanese": [ + "GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GPTSanJapaneseConfig", + "GPTSanJapaneseTokenizer", + ], + "models.graphormer": [ + "GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GraphormerConfig", + ], + "models.groupvit": [ + "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GroupViTConfig", + "GroupViTTextConfig", + "GroupViTVisionConfig", + ], + "models.herbert": ["HerbertTokenizer"], + "models.hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"], + "models.ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig"], + "models.idefics": [ + "IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "IdeficsConfig", + ], + "models.imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig"], + "models.informer": ["INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig"], + "models.instructblip": [ + "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "InstructBlipConfig", + "InstructBlipProcessor", + "InstructBlipQFormerConfig", + "InstructBlipVisionConfig", + ], + "models.jukebox": [ + "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", + "JukeboxConfig", + "JukeboxPriorConfig", + "JukeboxTokenizer", + "JukeboxVQVAEConfig", + ], + "models.kosmos2": [ + "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Kosmos2Config", + "Kosmos2Processor", + ], + "models.layoutlm": [ + "LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LayoutLMConfig", + "LayoutLMTokenizer", + ], + "models.layoutlmv2": [ + "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LayoutLMv2Config", + "LayoutLMv2FeatureExtractor", + "LayoutLMv2ImageProcessor", + "LayoutLMv2Processor", + "LayoutLMv2Tokenizer", + ], + "models.layoutlmv3": [ + "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LayoutLMv3Config", + "LayoutLMv3FeatureExtractor", + "LayoutLMv3ImageProcessor", + "LayoutLMv3Processor", + "LayoutLMv3Tokenizer", + ], + "models.layoutxlm": ["LayoutXLMProcessor"], + "models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"], + "models.levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig"], + "models.lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], + "models.llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], + "models.llava": [ + "LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LlavaConfig", + "LlavaProcessor", + ], + "models.llava_next": [ + "LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LlavaNextConfig", + "LlavaNextProcessor", + ], + "models.longformer": [ + "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LongformerConfig", + "LongformerTokenizer", + ], + "models.longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config"], + "models.luke": [ + "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LukeConfig", + "LukeTokenizer", + ], + "models.lxmert": [ + "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "LxmertConfig", + "LxmertTokenizer", + ], + "models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"], + "models.mamba": ["MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MambaConfig"], + "models.marian": ["MarianConfig"], + "models.markuplm": [ + "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MarkupLMConfig", + "MarkupLMFeatureExtractor", + "MarkupLMProcessor", + "MarkupLMTokenizer", + ], + "models.mask2former": [ + "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Mask2FormerConfig", + ], + "models.maskformer": [ + "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MaskFormerConfig", + "MaskFormerSwinConfig", + ], + "models.mbart": ["MBartConfig"], + "models.mbart50": [], + "models.mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig"], + "models.megatron_bert": [ + "MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MegatronBertConfig", + ], + "models.megatron_gpt2": [], + "models.mgp_str": [ + "MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MgpstrConfig", + "MgpstrProcessor", + "MgpstrTokenizer", + ], + "models.mistral": ["MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MistralConfig"], + "models.mixtral": ["MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MixtralConfig"], + "models.mluke": [], + "models.mobilebert": [ + "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MobileBertConfig", + "MobileBertTokenizer", + ], + "models.mobilenet_v1": [ + "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MobileNetV1Config", + ], + "models.mobilenet_v2": [ + "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MobileNetV2Config", + ], + "models.mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig"], + "models.mobilevitv2": [ + "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MobileViTV2Config", + ], + "models.mpnet": [ + "MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MPNetConfig", + "MPNetTokenizer", + ], + "models.mpt": ["MPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MptConfig"], + "models.mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"], + "models.mt5": ["MT5Config"], + "models.musicgen": [ + "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MusicgenConfig", + "MusicgenDecoderConfig", + ], + "models.musicgen_melody": [ + "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", + "MusicgenMelodyConfig", + "MusicgenMelodyDecoderConfig", + ], + "models.mvp": ["MvpConfig", "MvpTokenizer"], + "models.nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"], + "models.nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], + "models.nllb": [], + "models.nllb_moe": ["NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig"], + "models.nougat": ["NougatProcessor"], + "models.nystromformer": [ + "NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "NystromformerConfig", + ], + "models.oneformer": [ + "ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "OneFormerConfig", + "OneFormerProcessor", + ], + "models.openai": [ + "OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "OpenAIGPTConfig", + "OpenAIGPTTokenizer", + ], + "models.opt": ["OPTConfig"], + "models.owlv2": [ + "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Owlv2Config", + "Owlv2Processor", + "Owlv2TextConfig", + "Owlv2VisionConfig", + ], + "models.owlvit": [ + "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "OwlViTConfig", + "OwlViTProcessor", + "OwlViTTextConfig", + "OwlViTVisionConfig", + ], + "models.patchtsmixer": [ + "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "PatchTSMixerConfig", + ], + "models.patchtst": ["PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", "PatchTSTConfig"], + "models.pegasus": [ + "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "PegasusConfig", + "PegasusTokenizer", + ], + "models.pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], + "models.perceiver": [ + "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "PerceiverConfig", + "PerceiverTokenizer", + ], + "models.persimmon": ["PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP", "PersimmonConfig"], + "models.phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"], + "models.phobert": ["PhobertTokenizer"], + "models.pix2struct": [ + "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Pix2StructConfig", + "Pix2StructProcessor", + "Pix2StructTextConfig", + "Pix2StructVisionConfig", + ], + "models.plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"], + "models.poolformer": [ + "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "PoolFormerConfig", + ], + "models.pop2piano": [ + "POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Pop2PianoConfig", + ], + "models.prophetnet": [ + "PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ProphetNetConfig", + "ProphetNetTokenizer", + ], + "models.pvt": ["PVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtConfig"], + "models.pvt_v2": ["PVT_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtV2Config"], + "models.qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"], + "models.qwen2": [ + "QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Qwen2Config", + "Qwen2Tokenizer", + ], + "models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"], + "models.realm": [ + "REALM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "RealmConfig", + "RealmTokenizer", + ], + "models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"], + "models.regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"], + "models.rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig"], + "models.resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig"], + "models.roberta": [ + "ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "RobertaConfig", + "RobertaTokenizer", + ], + "models.roberta_prelayernorm": [ + "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "RobertaPreLayerNormConfig", + ], + "models.roc_bert": [ + "ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "RoCBertConfig", + "RoCBertTokenizer", + ], + "models.roformer": [ + "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "RoFormerConfig", + "RoFormerTokenizer", + ], + "models.rwkv": ["RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP", "RwkvConfig"], + "models.sam": [ + "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SamConfig", + "SamMaskDecoderConfig", + "SamProcessor", + "SamPromptEncoderConfig", + "SamVisionConfig", + ], + "models.seamless_m4t": [ + "SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SeamlessM4TConfig", + "SeamlessM4TFeatureExtractor", + "SeamlessM4TProcessor", + ], + "models.seamless_m4t_v2": [ + "SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SeamlessM4Tv2Config", + ], + "models.segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig"], + "models.seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig"], + "models.sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"], + "models.sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"], + "models.siglip": [ + "SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SiglipConfig", + "SiglipProcessor", + "SiglipTextConfig", + "SiglipVisionConfig", + ], + "models.speech_encoder_decoder": ["SpeechEncoderDecoderConfig"], + "models.speech_to_text": [ + "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Speech2TextConfig", + "Speech2TextFeatureExtractor", + "Speech2TextProcessor", + ], + "models.speech_to_text_2": [ + "SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Speech2Text2Config", + "Speech2Text2Processor", + "Speech2Text2Tokenizer", + ], + "models.speecht5": [ + "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", + "SpeechT5Config", + "SpeechT5FeatureExtractor", + "SpeechT5HifiGanConfig", + "SpeechT5Processor", + ], + "models.splinter": [ + "SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SplinterConfig", + "SplinterTokenizer", + ], + "models.squeezebert": [ + "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SqueezeBertConfig", + "SqueezeBertTokenizer", + ], + "models.stablelm": ["STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP", "StableLmConfig"], + "models.starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"], + "models.superpoint": ["SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SuperPointConfig"], + "models.swiftformer": [ + "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SwiftFormerConfig", + ], + "models.swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig"], + "models.swin2sr": ["SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swin2SRConfig"], + "models.swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], + "models.switch_transformers": [ + "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SwitchTransformersConfig", + ], + "models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"], + "models.table_transformer": [ + "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TableTransformerConfig", + ], + "models.tapas": [ + "TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TapasConfig", + "TapasTokenizer", + ], + "models.time_series_transformer": [ + "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TimeSeriesTransformerConfig", + ], + "models.timesformer": [ + "TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TimesformerConfig", + ], + "models.timm_backbone": ["TimmBackboneConfig"], + "models.trocr": [ + "TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TrOCRConfig", + "TrOCRProcessor", + ], + "models.tvlt": [ + "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TvltConfig", + "TvltFeatureExtractor", + "TvltProcessor", + ], + "models.tvp": [ + "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TvpConfig", + "TvpProcessor", + ], + "models.udop": [ + "UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UdopConfig", + "UdopProcessor", + ], + "models.umt5": ["UMT5Config"], + "models.unispeech": [ + "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UniSpeechConfig", + ], + "models.unispeech_sat": [ + "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UniSpeechSatConfig", + ], + "models.univnet": [ + "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UnivNetConfig", + "UnivNetFeatureExtractor", + ], + "models.upernet": ["UperNetConfig"], + "models.videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], + "models.vilt": [ + "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ViltConfig", + "ViltFeatureExtractor", + "ViltImageProcessor", + "ViltProcessor", + ], + "models.vipllava": [ + "VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "VipLlavaConfig", + ], + "models.vision_encoder_decoder": ["VisionEncoderDecoderConfig"], + "models.vision_text_dual_encoder": [ + "VisionTextDualEncoderConfig", + "VisionTextDualEncoderProcessor", + ], + "models.visual_bert": [ + "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "VisualBertConfig", + ], + "models.vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], + "models.vit_hybrid": [ + "VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ViTHybridConfig", + ], + "models.vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"], + "models.vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"], + "models.vitdet": ["VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitDetConfig"], + "models.vitmatte": ["VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitMatteConfig"], + "models.vits": [ + "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP", + "VitsConfig", + "VitsTokenizer", + ], + "models.vivit": [ + "VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "VivitConfig", + ], + "models.wav2vec2": [ + "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Wav2Vec2Config", + "Wav2Vec2CTCTokenizer", + "Wav2Vec2FeatureExtractor", + "Wav2Vec2Processor", + "Wav2Vec2Tokenizer", + ], + "models.wav2vec2_bert": [ + "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Wav2Vec2BertConfig", + "Wav2Vec2BertProcessor", + ], + "models.wav2vec2_conformer": [ + "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Wav2Vec2ConformerConfig", + ], + "models.wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"], + "models.wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"], + "models.wavlm": [ + "WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "WavLMConfig", + ], + "models.whisper": [ + "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "WhisperConfig", + "WhisperFeatureExtractor", + "WhisperProcessor", + "WhisperTokenizer", + ], + "models.x_clip": [ + "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "XCLIPConfig", + "XCLIPProcessor", + "XCLIPTextConfig", + "XCLIPVisionConfig", + ], + "models.xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"], + "models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"], + "models.xlm_prophetnet": [ + "XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "XLMProphetNetConfig", + ], + "models.xlm_roberta": [ + "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", + "XLMRobertaConfig", + ], + "models.xlm_roberta_xl": [ + "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "XLMRobertaXLConfig", + ], + "models.xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"], + "models.xmod": ["XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig"], + "models.yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig"], + "models.yoso": ["YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP", "YosoConfig"], + "onnx": [], + "pipelines": [ + "AudioClassificationPipeline", + "AutomaticSpeechRecognitionPipeline", + "Conversation", + "ConversationalPipeline", + "CsvPipelineDataFormat", + "DepthEstimationPipeline", + "DocumentQuestionAnsweringPipeline", + "FeatureExtractionPipeline", + "FillMaskPipeline", + "ImageClassificationPipeline", + "ImageFeatureExtractionPipeline", + "ImageSegmentationPipeline", + "ImageToImagePipeline", + "ImageToTextPipeline", + "JsonPipelineDataFormat", + "MaskGenerationPipeline", + "NerPipeline", + "ObjectDetectionPipeline", + "PipedPipelineDataFormat", + "Pipeline", + "PipelineDataFormat", + "QuestionAnsweringPipeline", + "SummarizationPipeline", + "TableQuestionAnsweringPipeline", + "Text2TextGenerationPipeline", + "TextClassificationPipeline", + "TextGenerationPipeline", + "TextToAudioPipeline", + "TokenClassificationPipeline", + "TranslationPipeline", + "VideoClassificationPipeline", + "VisualQuestionAnsweringPipeline", + "ZeroShotAudioClassificationPipeline", + "ZeroShotClassificationPipeline", + "ZeroShotImageClassificationPipeline", + "ZeroShotObjectDetectionPipeline", + "pipeline", + ], + "processing_utils": ["ProcessorMixin"], + "quantizers": [], + "testing_utils": [], + "tokenization_utils": ["PreTrainedTokenizer"], + "tokenization_utils_base": [ + "AddedToken", + "BatchEncoding", + "CharSpan", + "PreTrainedTokenizerBase", + "SpecialTokensMixin", + "TokenSpan", + ], + "tools": [ + "Agent", + "AzureOpenAiAgent", + "HfAgent", + "LocalAgent", + "OpenAiAgent", + "PipelineTool", + "RemoteTool", + "Tool", + "launch_gradio_demo", + "load_tool", + ], + "trainer_callback": [ + "DefaultFlowCallback", + "EarlyStoppingCallback", + "PrinterCallback", + "ProgressCallback", + "TrainerCallback", + "TrainerControl", + "TrainerState", + ], + "trainer_utils": [ + "EvalPrediction", + "IntervalStrategy", + "SchedulerType", + "enable_full_determinism", + "set_seed", + ], + "training_args": ["TrainingArguments"], + "training_args_seq2seq": ["Seq2SeqTrainingArguments"], + "training_args_tf": ["TFTrainingArguments"], + "utils": [ + "CONFIG_NAME", + "MODEL_CARD_NAME", + "PYTORCH_PRETRAINED_BERT_CACHE", + "PYTORCH_TRANSFORMERS_CACHE", + "SPIECE_UNDERLINE", + "TF2_WEIGHTS_NAME", + "TF_WEIGHTS_NAME", + "TRANSFORMERS_CACHE", + "WEIGHTS_NAME", + "TensorType", + "add_end_docstrings", + "add_start_docstrings", + "is_apex_available", + "is_bitsandbytes_available", + "is_datasets_available", + "is_decord_available", + "is_faiss_available", + "is_flax_available", + "is_keras_nlp_available", + "is_phonemizer_available", + "is_psutil_available", + "is_py3nvml_available", + "is_pyctcdecode_available", + "is_sacremoses_available", + "is_safetensors_available", + "is_scipy_available", + "is_sentencepiece_available", + "is_sklearn_available", + "is_speech_available", + "is_tensorflow_text_available", + "is_tf_available", + "is_timm_available", + "is_tokenizers_available", + "is_torch_available", + "is_torch_neuroncore_available", + "is_torch_npu_available", + "is_torch_tpu_available", + "is_torchvision_available", + "is_torch_xla_available", + "is_torch_xpu_available", + "is_vision_available", + "logging", + ], + "utils.quantization_config": ["AqlmConfig", "AwqConfig", "BitsAndBytesConfig", "GPTQConfig", "QuantoConfig"], +} + +# sentencepiece-backed objects +try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_sentencepiece_objects + + _import_structure["utils.dummy_sentencepiece_objects"] = [ + name for name in dir(dummy_sentencepiece_objects) if not name.startswith("_") + ] +else: + _import_structure["models.albert"].append("AlbertTokenizer") + _import_structure["models.barthez"].append("BarthezTokenizer") + _import_structure["models.bartpho"].append("BartphoTokenizer") + _import_structure["models.bert_generation"].append("BertGenerationTokenizer") + _import_structure["models.big_bird"].append("BigBirdTokenizer") + _import_structure["models.camembert"].append("CamembertTokenizer") + _import_structure["models.code_llama"].append("CodeLlamaTokenizer") + _import_structure["models.cpm"].append("CpmTokenizer") + _import_structure["models.deberta_v2"].append("DebertaV2Tokenizer") + _import_structure["models.ernie_m"].append("ErnieMTokenizer") + _import_structure["models.fnet"].append("FNetTokenizer") + _import_structure["models.gemma"].append("GemmaTokenizer") + _import_structure["models.gpt_sw3"].append("GPTSw3Tokenizer") + _import_structure["models.layoutxlm"].append("LayoutXLMTokenizer") + _import_structure["models.llama"].append("LlamaTokenizer") + _import_structure["models.m2m_100"].append("M2M100Tokenizer") + _import_structure["models.marian"].append("MarianTokenizer") + _import_structure["models.mbart"].append("MBartTokenizer") + _import_structure["models.mbart50"].append("MBart50Tokenizer") + _import_structure["models.mluke"].append("MLukeTokenizer") + _import_structure["models.mt5"].append("MT5Tokenizer") + _import_structure["models.nllb"].append("NllbTokenizer") + _import_structure["models.pegasus"].append("PegasusTokenizer") + _import_structure["models.plbart"].append("PLBartTokenizer") + _import_structure["models.reformer"].append("ReformerTokenizer") + _import_structure["models.rembert"].append("RemBertTokenizer") + _import_structure["models.seamless_m4t"].append("SeamlessM4TTokenizer") + _import_structure["models.siglip"].append("SiglipTokenizer") + _import_structure["models.speech_to_text"].append("Speech2TextTokenizer") + _import_structure["models.speecht5"].append("SpeechT5Tokenizer") + _import_structure["models.t5"].append("T5Tokenizer") + _import_structure["models.udop"].append("UdopTokenizer") + _import_structure["models.xglm"].append("XGLMTokenizer") + _import_structure["models.xlm_prophetnet"].append("XLMProphetNetTokenizer") + _import_structure["models.xlm_roberta"].append("XLMRobertaTokenizer") + _import_structure["models.xlnet"].append("XLNetTokenizer") + +# tokenizers-backed objects +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_tokenizers_objects + + _import_structure["utils.dummy_tokenizers_objects"] = [ + name for name in dir(dummy_tokenizers_objects) if not name.startswith("_") + ] +else: + # Fast tokenizers structure + _import_structure["models.albert"].append("AlbertTokenizerFast") + _import_structure["models.bart"].append("BartTokenizerFast") + _import_structure["models.barthez"].append("BarthezTokenizerFast") + _import_structure["models.bert"].append("BertTokenizerFast") + _import_structure["models.big_bird"].append("BigBirdTokenizerFast") + _import_structure["models.blenderbot"].append("BlenderbotTokenizerFast") + _import_structure["models.blenderbot_small"].append("BlenderbotSmallTokenizerFast") + _import_structure["models.bloom"].append("BloomTokenizerFast") + _import_structure["models.camembert"].append("CamembertTokenizerFast") + _import_structure["models.clip"].append("CLIPTokenizerFast") + _import_structure["models.code_llama"].append("CodeLlamaTokenizerFast") + _import_structure["models.codegen"].append("CodeGenTokenizerFast") + _import_structure["models.cohere"].append("CohereTokenizerFast") + _import_structure["models.convbert"].append("ConvBertTokenizerFast") + _import_structure["models.cpm"].append("CpmTokenizerFast") + _import_structure["models.deberta"].append("DebertaTokenizerFast") + _import_structure["models.deberta_v2"].append("DebertaV2TokenizerFast") + _import_structure["models.deprecated.retribert"].append("RetriBertTokenizerFast") + _import_structure["models.distilbert"].append("DistilBertTokenizerFast") + _import_structure["models.dpr"].extend( + [ + "DPRContextEncoderTokenizerFast", + "DPRQuestionEncoderTokenizerFast", + "DPRReaderTokenizerFast", + ] + ) + _import_structure["models.electra"].append("ElectraTokenizerFast") + _import_structure["models.fnet"].append("FNetTokenizerFast") + _import_structure["models.funnel"].append("FunnelTokenizerFast") + _import_structure["models.gemma"].append("GemmaTokenizerFast") + _import_structure["models.gpt2"].append("GPT2TokenizerFast") + _import_structure["models.gpt_neox"].append("GPTNeoXTokenizerFast") + _import_structure["models.gpt_neox_japanese"].append("GPTNeoXJapaneseTokenizer") + _import_structure["models.herbert"].append("HerbertTokenizerFast") + _import_structure["models.layoutlm"].append("LayoutLMTokenizerFast") + _import_structure["models.layoutlmv2"].append("LayoutLMv2TokenizerFast") + _import_structure["models.layoutlmv3"].append("LayoutLMv3TokenizerFast") + _import_structure["models.layoutxlm"].append("LayoutXLMTokenizerFast") + _import_structure["models.led"].append("LEDTokenizerFast") + _import_structure["models.llama"].append("LlamaTokenizerFast") + _import_structure["models.longformer"].append("LongformerTokenizerFast") + _import_structure["models.lxmert"].append("LxmertTokenizerFast") + _import_structure["models.markuplm"].append("MarkupLMTokenizerFast") + _import_structure["models.mbart"].append("MBartTokenizerFast") + _import_structure["models.mbart50"].append("MBart50TokenizerFast") + _import_structure["models.mobilebert"].append("MobileBertTokenizerFast") + _import_structure["models.mpnet"].append("MPNetTokenizerFast") + _import_structure["models.mt5"].append("MT5TokenizerFast") + _import_structure["models.mvp"].append("MvpTokenizerFast") + _import_structure["models.nllb"].append("NllbTokenizerFast") + _import_structure["models.nougat"].append("NougatTokenizerFast") + _import_structure["models.openai"].append("OpenAIGPTTokenizerFast") + _import_structure["models.pegasus"].append("PegasusTokenizerFast") + _import_structure["models.qwen2"].append("Qwen2TokenizerFast") + _import_structure["models.realm"].append("RealmTokenizerFast") + _import_structure["models.reformer"].append("ReformerTokenizerFast") + _import_structure["models.rembert"].append("RemBertTokenizerFast") + _import_structure["models.roberta"].append("RobertaTokenizerFast") + _import_structure["models.roformer"].append("RoFormerTokenizerFast") + _import_structure["models.seamless_m4t"].append("SeamlessM4TTokenizerFast") + _import_structure["models.splinter"].append("SplinterTokenizerFast") + _import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast") + _import_structure["models.t5"].append("T5TokenizerFast") + _import_structure["models.udop"].append("UdopTokenizerFast") + _import_structure["models.whisper"].append("WhisperTokenizerFast") + _import_structure["models.xglm"].append("XGLMTokenizerFast") + _import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast") + _import_structure["models.xlnet"].append("XLNetTokenizerFast") + _import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"] + + +try: + if not (is_sentencepiece_available() and is_tokenizers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_sentencepiece_and_tokenizers_objects + + _import_structure["utils.dummy_sentencepiece_and_tokenizers_objects"] = [ + name for name in dir(dummy_sentencepiece_and_tokenizers_objects) if not name.startswith("_") + ] +else: + _import_structure["convert_slow_tokenizer"] = [ + "SLOW_TO_FAST_CONVERTERS", + "convert_slow_tokenizer", + ] + +# Tensorflow-text-specific objects +try: + if not is_tensorflow_text_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_tensorflow_text_objects + + _import_structure["utils.dummy_tensorflow_text_objects"] = [ + name for name in dir(dummy_tensorflow_text_objects) if not name.startswith("_") + ] +else: + _import_structure["models.bert"].append("TFBertTokenizer") + +# keras-nlp-specific objects +try: + if not is_keras_nlp_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_keras_nlp_objects + + _import_structure["utils.dummy_keras_nlp_objects"] = [ + name for name in dir(dummy_keras_nlp_objects) if not name.startswith("_") + ] +else: + _import_structure["models.gpt2"].append("TFGPT2Tokenizer") + +# Vision-specific objects +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_vision_objects + + _import_structure["utils.dummy_vision_objects"] = [ + name for name in dir(dummy_vision_objects) if not name.startswith("_") + ] +else: + _import_structure["image_processing_utils"] = ["ImageProcessingMixin"] + _import_structure["image_utils"] = ["ImageFeatureExtractionMixin"] + _import_structure["models.beit"].extend(["BeitFeatureExtractor", "BeitImageProcessor"]) + _import_structure["models.bit"].extend(["BitImageProcessor"]) + _import_structure["models.blip"].extend(["BlipImageProcessor"]) + _import_structure["models.bridgetower"].append("BridgeTowerImageProcessor") + _import_structure["models.chinese_clip"].extend(["ChineseCLIPFeatureExtractor", "ChineseCLIPImageProcessor"]) + _import_structure["models.clip"].extend(["CLIPFeatureExtractor", "CLIPImageProcessor"]) + _import_structure["models.conditional_detr"].extend( + ["ConditionalDetrFeatureExtractor", "ConditionalDetrImageProcessor"] + ) + _import_structure["models.convnext"].extend(["ConvNextFeatureExtractor", "ConvNextImageProcessor"]) + _import_structure["models.deformable_detr"].extend( + ["DeformableDetrFeatureExtractor", "DeformableDetrImageProcessor"] + ) + _import_structure["models.deit"].extend(["DeiTFeatureExtractor", "DeiTImageProcessor"]) + _import_structure["models.deta"].append("DetaImageProcessor") + _import_structure["models.detr"].extend(["DetrFeatureExtractor", "DetrImageProcessor"]) + _import_structure["models.donut"].extend(["DonutFeatureExtractor", "DonutImageProcessor"]) + _import_structure["models.dpt"].extend(["DPTFeatureExtractor", "DPTImageProcessor"]) + _import_structure["models.efficientformer"].append("EfficientFormerImageProcessor") + _import_structure["models.efficientnet"].append("EfficientNetImageProcessor") + _import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaImageProcessor", "FlavaProcessor"]) + _import_structure["models.fuyu"].extend(["FuyuImageProcessor", "FuyuProcessor"]) + _import_structure["models.glpn"].extend(["GLPNFeatureExtractor", "GLPNImageProcessor"]) + _import_structure["models.idefics"].extend(["IdeficsImageProcessor"]) + _import_structure["models.imagegpt"].extend(["ImageGPTFeatureExtractor", "ImageGPTImageProcessor"]) + _import_structure["models.layoutlmv2"].extend(["LayoutLMv2FeatureExtractor", "LayoutLMv2ImageProcessor"]) + _import_structure["models.layoutlmv3"].extend(["LayoutLMv3FeatureExtractor", "LayoutLMv3ImageProcessor"]) + _import_structure["models.levit"].extend(["LevitFeatureExtractor", "LevitImageProcessor"]) + _import_structure["models.llava_next"].append("LlavaNextImageProcessor") + _import_structure["models.mask2former"].append("Mask2FormerImageProcessor") + _import_structure["models.maskformer"].extend(["MaskFormerFeatureExtractor", "MaskFormerImageProcessor"]) + _import_structure["models.mobilenet_v1"].extend(["MobileNetV1FeatureExtractor", "MobileNetV1ImageProcessor"]) + _import_structure["models.mobilenet_v2"].extend(["MobileNetV2FeatureExtractor", "MobileNetV2ImageProcessor"]) + _import_structure["models.mobilevit"].extend(["MobileViTFeatureExtractor", "MobileViTImageProcessor"]) + _import_structure["models.nougat"].append("NougatImageProcessor") + _import_structure["models.oneformer"].extend(["OneFormerImageProcessor"]) + _import_structure["models.owlv2"].append("Owlv2ImageProcessor") + _import_structure["models.owlvit"].extend(["OwlViTFeatureExtractor", "OwlViTImageProcessor"]) + _import_structure["models.perceiver"].extend(["PerceiverFeatureExtractor", "PerceiverImageProcessor"]) + _import_structure["models.pix2struct"].extend(["Pix2StructImageProcessor"]) + _import_structure["models.poolformer"].extend(["PoolFormerFeatureExtractor", "PoolFormerImageProcessor"]) + _import_structure["models.pvt"].extend(["PvtImageProcessor"]) + _import_structure["models.sam"].extend(["SamImageProcessor"]) + _import_structure["models.segformer"].extend(["SegformerFeatureExtractor", "SegformerImageProcessor"]) + _import_structure["models.seggpt"].extend(["SegGptImageProcessor"]) + _import_structure["models.siglip"].append("SiglipImageProcessor") + _import_structure["models.superpoint"].extend(["SuperPointImageProcessor"]) + _import_structure["models.swin2sr"].append("Swin2SRImageProcessor") + _import_structure["models.tvlt"].append("TvltImageProcessor") + _import_structure["models.tvp"].append("TvpImageProcessor") + _import_structure["models.videomae"].extend(["VideoMAEFeatureExtractor", "VideoMAEImageProcessor"]) + _import_structure["models.vilt"].extend(["ViltFeatureExtractor", "ViltImageProcessor", "ViltProcessor"]) + _import_structure["models.vit"].extend(["ViTFeatureExtractor", "ViTImageProcessor"]) + _import_structure["models.vit_hybrid"].extend(["ViTHybridImageProcessor"]) + _import_structure["models.vitmatte"].append("VitMatteImageProcessor") + _import_structure["models.vivit"].append("VivitImageProcessor") + _import_structure["models.yolos"].extend(["YolosFeatureExtractor", "YolosImageProcessor"]) + + +# PyTorch-backed objects +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_pt_objects + + _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")] +else: + _import_structure["activations"] = [] + _import_structure["benchmark.benchmark"] = ["PyTorchBenchmark"] + _import_structure["benchmark.benchmark_args"] = ["PyTorchBenchmarkArguments"] + _import_structure["cache_utils"] = ["Cache", "DynamicCache", "SinkCache", "StaticCache"] + _import_structure["data.datasets"] = [ + "GlueDataset", + "GlueDataTrainingArguments", + "LineByLineTextDataset", + "LineByLineWithRefDataset", + "LineByLineWithSOPTextDataset", + "SquadDataset", + "SquadDataTrainingArguments", + "TextDataset", + "TextDatasetForNextSentencePrediction", + ] + _import_structure["generation"].extend( + [ + "AlternatingCodebooksLogitsProcessor", + "BeamScorer", + "BeamSearchScorer", + "ClassifierFreeGuidanceLogitsProcessor", + "ConstrainedBeamSearchScorer", + "Constraint", + "ConstraintListState", + "DisjunctiveConstraint", + "EncoderNoRepeatNGramLogitsProcessor", + "EncoderRepetitionPenaltyLogitsProcessor", + "EpsilonLogitsWarper", + "EtaLogitsWarper", + "ExponentialDecayLengthPenalty", + "ForcedBOSTokenLogitsProcessor", + "ForcedEOSTokenLogitsProcessor", + "ForceTokensLogitsProcessor", + "GenerationMixin", + "HammingDiversityLogitsProcessor", + "InfNanRemoveLogitsProcessor", + "LogitNormalization", + "LogitsProcessor", + "LogitsProcessorList", + "LogitsWarper", + "MaxLengthCriteria", + "MaxTimeCriteria", + "MinLengthLogitsProcessor", + "MinNewTokensLengthLogitsProcessor", + "NoBadWordsLogitsProcessor", + "NoRepeatNGramLogitsProcessor", + "PhrasalConstraint", + "PrefixConstrainedLogitsProcessor", + "RepetitionPenaltyLogitsProcessor", + "SequenceBiasLogitsProcessor", + "StoppingCriteria", + "StoppingCriteriaList", + "SuppressTokensAtBeginLogitsProcessor", + "SuppressTokensLogitsProcessor", + "TemperatureLogitsWarper", + "TopKLogitsWarper", + "TopPLogitsWarper", + "TypicalLogitsWarper", + "UnbatchedClassifierFreeGuidanceLogitsProcessor", + "WhisperTimeStampLogitsProcessor", + ] + ) + _import_structure["generation_utils"] = [] + _import_structure["modeling_outputs"] = [] + _import_structure["modeling_utils"] = ["PreTrainedModel"] + + # PyTorch models structure + + _import_structure["models.albert"].extend( + [ + "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "AlbertForMaskedLM", + "AlbertForMultipleChoice", + "AlbertForPreTraining", + "AlbertForQuestionAnswering", + "AlbertForSequenceClassification", + "AlbertForTokenClassification", + "AlbertModel", + "AlbertPreTrainedModel", + "load_tf_weights_in_albert", + ] + ) + + _import_structure["models.align"].extend( + [ + "ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST", + "AlignModel", + "AlignPreTrainedModel", + "AlignTextModel", + "AlignVisionModel", + ] + ) + _import_structure["models.altclip"].extend( + [ + "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "AltCLIPModel", + "AltCLIPPreTrainedModel", + "AltCLIPTextModel", + "AltCLIPVisionModel", + ] + ) + _import_structure["models.audio_spectrogram_transformer"].extend( + [ + "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "ASTForAudioClassification", + "ASTModel", + "ASTPreTrainedModel", + ] + ) + _import_structure["models.auto"].extend( + [ + "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING", + "MODEL_FOR_AUDIO_XVECTOR_MAPPING", + "MODEL_FOR_BACKBONE_MAPPING", + "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", + "MODEL_FOR_CAUSAL_LM_MAPPING", + "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DEPTH_ESTIMATION_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "MODEL_FOR_IMAGE_MAPPING", + "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", + "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING", + "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", + "MODEL_FOR_KEYPOINT_DETECTION_MAPPING", + "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", + "MODEL_FOR_MASKED_LM_MAPPING", + "MODEL_FOR_MASK_GENERATION_MAPPING", + "MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "MODEL_FOR_OBJECT_DETECTION_MAPPING", + "MODEL_FOR_PRETRAINING_MAPPING", + "MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", + "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_TEXT_ENCODING_MAPPING", + "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING", + "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING", + "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", + "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", + "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING", + "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", + "MODEL_FOR_VISION_2_SEQ_MAPPING", + "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", + "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", + "MODEL_MAPPING", + "MODEL_WITH_LM_HEAD_MAPPING", + "AutoBackbone", + "AutoModel", + "AutoModelForAudioClassification", + "AutoModelForAudioFrameClassification", + "AutoModelForAudioXVector", + "AutoModelForCausalLM", + "AutoModelForCTC", + "AutoModelForDepthEstimation", + "AutoModelForDocumentQuestionAnswering", + "AutoModelForImageClassification", + "AutoModelForImageSegmentation", + "AutoModelForImageToImage", + "AutoModelForInstanceSegmentation", + "AutoModelForKeypointDetection", + "AutoModelForMaskedImageModeling", + "AutoModelForMaskedLM", + "AutoModelForMaskGeneration", + "AutoModelForMultipleChoice", + "AutoModelForNextSentencePrediction", + "AutoModelForObjectDetection", + "AutoModelForPreTraining", + "AutoModelForQuestionAnswering", + "AutoModelForSemanticSegmentation", + "AutoModelForSeq2SeqLM", + "AutoModelForSequenceClassification", + "AutoModelForSpeechSeq2Seq", + "AutoModelForTableQuestionAnswering", + "AutoModelForTextEncoding", + "AutoModelForTextToSpectrogram", + "AutoModelForTextToWaveform", + "AutoModelForTokenClassification", + "AutoModelForUniversalSegmentation", + "AutoModelForVideoClassification", + "AutoModelForVision2Seq", + "AutoModelForVisualQuestionAnswering", + "AutoModelForZeroShotImageClassification", + "AutoModelForZeroShotObjectDetection", + "AutoModelWithLMHead", + ] + ) + _import_structure["models.autoformer"].extend( + [ + "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "AutoformerForPrediction", + "AutoformerModel", + "AutoformerPreTrainedModel", + ] + ) + _import_structure["models.bark"].extend( + [ + "BARK_PRETRAINED_MODEL_ARCHIVE_LIST", + "BarkCausalModel", + "BarkCoarseModel", + "BarkFineModel", + "BarkModel", + "BarkPreTrainedModel", + "BarkSemanticModel", + ] + ) + _import_structure["models.bart"].extend( + [ + "BART_PRETRAINED_MODEL_ARCHIVE_LIST", + "BartForCausalLM", + "BartForConditionalGeneration", + "BartForQuestionAnswering", + "BartForSequenceClassification", + "BartModel", + "BartPretrainedModel", + "BartPreTrainedModel", + "PretrainedBartModel", + ] + ) + _import_structure["models.beit"].extend( + [ + "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "BeitBackbone", + "BeitForImageClassification", + "BeitForMaskedImageModeling", + "BeitForSemanticSegmentation", + "BeitModel", + "BeitPreTrainedModel", + ] + ) + _import_structure["models.bert"].extend( + [ + "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "BertForMaskedLM", + "BertForMultipleChoice", + "BertForNextSentencePrediction", + "BertForPreTraining", + "BertForQuestionAnswering", + "BertForSequenceClassification", + "BertForTokenClassification", + "BertLayer", + "BertLMHeadModel", + "BertModel", + "BertPreTrainedModel", + "load_tf_weights_in_bert", + ] + ) + _import_structure["models.bert_generation"].extend( + [ + "BertGenerationDecoder", + "BertGenerationEncoder", + "BertGenerationPreTrainedModel", + "load_tf_weights_in_bert_generation", + ] + ) + _import_structure["models.big_bird"].extend( + [ + "BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST", + "BigBirdForCausalLM", + "BigBirdForMaskedLM", + "BigBirdForMultipleChoice", + "BigBirdForPreTraining", + "BigBirdForQuestionAnswering", + "BigBirdForSequenceClassification", + "BigBirdForTokenClassification", + "BigBirdLayer", + "BigBirdModel", + "BigBirdPreTrainedModel", + "load_tf_weights_in_big_bird", + ] + ) + _import_structure["models.bigbird_pegasus"].extend( + [ + "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", + "BigBirdPegasusForCausalLM", + "BigBirdPegasusForConditionalGeneration", + "BigBirdPegasusForQuestionAnswering", + "BigBirdPegasusForSequenceClassification", + "BigBirdPegasusModel", + "BigBirdPegasusPreTrainedModel", + ] + ) + _import_structure["models.biogpt"].extend( + [ + "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "BioGptForCausalLM", + "BioGptForSequenceClassification", + "BioGptForTokenClassification", + "BioGptModel", + "BioGptPreTrainedModel", + ] + ) + _import_structure["models.bit"].extend( + [ + "BIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "BitBackbone", + "BitForImageClassification", + "BitModel", + "BitPreTrainedModel", + ] + ) + _import_structure["models.blenderbot"].extend( + [ + "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", + "BlenderbotForCausalLM", + "BlenderbotForConditionalGeneration", + "BlenderbotModel", + "BlenderbotPreTrainedModel", + ] + ) + _import_structure["models.blenderbot_small"].extend( + [ + "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", + "BlenderbotSmallForCausalLM", + "BlenderbotSmallForConditionalGeneration", + "BlenderbotSmallModel", + "BlenderbotSmallPreTrainedModel", + ] + ) + _import_structure["models.blip"].extend( + [ + "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "BlipForConditionalGeneration", + "BlipForImageTextRetrieval", + "BlipForQuestionAnswering", + "BlipModel", + "BlipPreTrainedModel", + "BlipTextModel", + "BlipVisionModel", + ] + ) + _import_structure["models.blip_2"].extend( + [ + "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Blip2ForConditionalGeneration", + "Blip2Model", + "Blip2PreTrainedModel", + "Blip2QFormerModel", + "Blip2VisionModel", + ] + ) + _import_structure["models.bloom"].extend( + [ + "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", + "BloomForCausalLM", + "BloomForQuestionAnswering", + "BloomForSequenceClassification", + "BloomForTokenClassification", + "BloomModel", + "BloomPreTrainedModel", + ] + ) + _import_structure["models.bridgetower"].extend( + [ + "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", + "BridgeTowerForContrastiveLearning", + "BridgeTowerForImageAndTextRetrieval", + "BridgeTowerForMaskedLM", + "BridgeTowerModel", + "BridgeTowerPreTrainedModel", + ] + ) + _import_structure["models.bros"].extend( + [ + "BROS_PRETRAINED_MODEL_ARCHIVE_LIST", + "BrosForTokenClassification", + "BrosModel", + "BrosPreTrainedModel", + "BrosProcessor", + "BrosSpadeEEForTokenClassification", + "BrosSpadeELForTokenClassification", + ] + ) + _import_structure["models.camembert"].extend( + [ + "CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "CamembertForCausalLM", + "CamembertForMaskedLM", + "CamembertForMultipleChoice", + "CamembertForQuestionAnswering", + "CamembertForSequenceClassification", + "CamembertForTokenClassification", + "CamembertModel", + "CamembertPreTrainedModel", + ] + ) + _import_structure["models.canine"].extend( + [ + "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", + "CanineForMultipleChoice", + "CanineForQuestionAnswering", + "CanineForSequenceClassification", + "CanineForTokenClassification", + "CanineLayer", + "CanineModel", + "CaninePreTrainedModel", + "load_tf_weights_in_canine", + ] + ) + _import_structure["models.chinese_clip"].extend( + [ + "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ChineseCLIPModel", + "ChineseCLIPPreTrainedModel", + "ChineseCLIPTextModel", + "ChineseCLIPVisionModel", + ] + ) + _import_structure["models.clap"].extend( + [ + "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ClapAudioModel", + "ClapAudioModelWithProjection", + "ClapFeatureExtractor", + "ClapModel", + "ClapPreTrainedModel", + "ClapTextModel", + "ClapTextModelWithProjection", + ] + ) + _import_structure["models.clip"].extend( + [ + "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "CLIPForImageClassification", + "CLIPModel", + "CLIPPreTrainedModel", + "CLIPTextModel", + "CLIPTextModelWithProjection", + "CLIPVisionModel", + "CLIPVisionModelWithProjection", + ] + ) + _import_structure["models.clipseg"].extend( + [ + "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", + "CLIPSegForImageSegmentation", + "CLIPSegModel", + "CLIPSegPreTrainedModel", + "CLIPSegTextModel", + "CLIPSegVisionModel", + ] + ) + _import_structure["models.clvp"].extend( + [ + "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ClvpDecoder", + "ClvpEncoder", + "ClvpForCausalLM", + "ClvpModel", + "ClvpModelForConditionalGeneration", + "ClvpPreTrainedModel", + ] + ) + _import_structure["models.codegen"].extend( + [ + "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST", + "CodeGenForCausalLM", + "CodeGenModel", + "CodeGenPreTrainedModel", + ] + ) + _import_structure["models.cohere"].extend(["CohereForCausalLM", "CohereModel", "CoherePreTrainedModel"]) + _import_structure["models.conditional_detr"].extend( + [ + "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConditionalDetrForObjectDetection", + "ConditionalDetrForSegmentation", + "ConditionalDetrModel", + "ConditionalDetrPreTrainedModel", + ] + ) + _import_structure["models.convbert"].extend( + [ + "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConvBertForMaskedLM", + "ConvBertForMultipleChoice", + "ConvBertForQuestionAnswering", + "ConvBertForSequenceClassification", + "ConvBertForTokenClassification", + "ConvBertLayer", + "ConvBertModel", + "ConvBertPreTrainedModel", + "load_tf_weights_in_convbert", + ] + ) + _import_structure["models.convnext"].extend( + [ + "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConvNextBackbone", + "ConvNextForImageClassification", + "ConvNextModel", + "ConvNextPreTrainedModel", + ] + ) + _import_structure["models.convnextv2"].extend( + [ + "CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConvNextV2Backbone", + "ConvNextV2ForImageClassification", + "ConvNextV2Model", + "ConvNextV2PreTrainedModel", + ] + ) + _import_structure["models.cpmant"].extend( + [ + "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", + "CpmAntForCausalLM", + "CpmAntModel", + "CpmAntPreTrainedModel", + ] + ) + _import_structure["models.ctrl"].extend( + [ + "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", + "CTRLForSequenceClassification", + "CTRLLMHeadModel", + "CTRLModel", + "CTRLPreTrainedModel", + ] + ) + _import_structure["models.cvt"].extend( + [ + "CVT_PRETRAINED_MODEL_ARCHIVE_LIST", + "CvtForImageClassification", + "CvtModel", + "CvtPreTrainedModel", + ] + ) + _import_structure["models.data2vec"].extend( + [ + "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", + "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", + "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", + "Data2VecAudioForAudioFrameClassification", + "Data2VecAudioForCTC", + "Data2VecAudioForSequenceClassification", + "Data2VecAudioForXVector", + "Data2VecAudioModel", + "Data2VecAudioPreTrainedModel", + "Data2VecTextForCausalLM", + "Data2VecTextForMaskedLM", + "Data2VecTextForMultipleChoice", + "Data2VecTextForQuestionAnswering", + "Data2VecTextForSequenceClassification", + "Data2VecTextForTokenClassification", + "Data2VecTextModel", + "Data2VecTextPreTrainedModel", + "Data2VecVisionForImageClassification", + "Data2VecVisionForSemanticSegmentation", + "Data2VecVisionModel", + "Data2VecVisionPreTrainedModel", + ] + ) + _import_structure["models.deberta"].extend( + [ + "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "DebertaForMaskedLM", + "DebertaForQuestionAnswering", + "DebertaForSequenceClassification", + "DebertaForTokenClassification", + "DebertaModel", + "DebertaPreTrainedModel", + ] + ) + _import_structure["models.deberta_v2"].extend( + [ + "DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "DebertaV2ForMaskedLM", + "DebertaV2ForMultipleChoice", + "DebertaV2ForQuestionAnswering", + "DebertaV2ForSequenceClassification", + "DebertaV2ForTokenClassification", + "DebertaV2Model", + "DebertaV2PreTrainedModel", + ] + ) + _import_structure["models.decision_transformer"].extend( + [ + "DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "DecisionTransformerGPT2Model", + "DecisionTransformerGPT2PreTrainedModel", + "DecisionTransformerModel", + "DecisionTransformerPreTrainedModel", + ] + ) + _import_structure["models.deformable_detr"].extend( + [ + "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "DeformableDetrForObjectDetection", + "DeformableDetrModel", + "DeformableDetrPreTrainedModel", + ] + ) + _import_structure["models.deit"].extend( + [ + "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "DeiTForImageClassification", + "DeiTForImageClassificationWithTeacher", + "DeiTForMaskedImageModeling", + "DeiTModel", + "DeiTPreTrainedModel", + ] + ) + _import_structure["models.deprecated.mctct"].extend( + [ + "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", + "MCTCTForCTC", + "MCTCTModel", + "MCTCTPreTrainedModel", + ] + ) + _import_structure["models.deprecated.mmbt"].extend(["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]) + _import_structure["models.deprecated.open_llama"].extend( + [ + "OpenLlamaForCausalLM", + "OpenLlamaForSequenceClassification", + "OpenLlamaModel", + "OpenLlamaPreTrainedModel", + ] + ) + _import_structure["models.deprecated.retribert"].extend( + [ + "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "RetriBertModel", + "RetriBertPreTrainedModel", + ] + ) + _import_structure["models.deprecated.trajectory_transformer"].extend( + [ + "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TrajectoryTransformerModel", + "TrajectoryTransformerPreTrainedModel", + ] + ) + _import_structure["models.deprecated.transfo_xl"].extend( + [ + "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "AdaptiveEmbedding", + "TransfoXLForSequenceClassification", + "TransfoXLLMHeadModel", + "TransfoXLModel", + "TransfoXLPreTrainedModel", + "load_tf_weights_in_transfo_xl", + ] + ) + _import_structure["models.deprecated.van"].extend( + [ + "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", + "VanForImageClassification", + "VanModel", + "VanPreTrainedModel", + ] + ) + _import_structure["models.depth_anything"].extend( + [ + "DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST", + "DepthAnythingForDepthEstimation", + "DepthAnythingPreTrainedModel", + ] + ) + _import_structure["models.deta"].extend( + [ + "DETA_PRETRAINED_MODEL_ARCHIVE_LIST", + "DetaForObjectDetection", + "DetaModel", + "DetaPreTrainedModel", + ] + ) + _import_structure["models.detr"].extend( + [ + "DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "DetrForObjectDetection", + "DetrForSegmentation", + "DetrModel", + "DetrPreTrainedModel", + ] + ) + _import_structure["models.dinat"].extend( + [ + "DINAT_PRETRAINED_MODEL_ARCHIVE_LIST", + "DinatBackbone", + "DinatForImageClassification", + "DinatModel", + "DinatPreTrainedModel", + ] + ) + _import_structure["models.dinov2"].extend( + [ + "DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Dinov2Backbone", + "Dinov2ForImageClassification", + "Dinov2Model", + "Dinov2PreTrainedModel", + ] + ) + _import_structure["models.distilbert"].extend( + [ + "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "DistilBertForMaskedLM", + "DistilBertForMultipleChoice", + "DistilBertForQuestionAnswering", + "DistilBertForSequenceClassification", + "DistilBertForTokenClassification", + "DistilBertModel", + "DistilBertPreTrainedModel", + ] + ) + _import_structure["models.donut"].extend( + [ + "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "DonutSwinModel", + "DonutSwinPreTrainedModel", + ] + ) + _import_structure["models.dpr"].extend( + [ + "DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", + "DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", + "DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", + "DPRContextEncoder", + "DPRPretrainedContextEncoder", + "DPRPreTrainedModel", + "DPRPretrainedQuestionEncoder", + "DPRPretrainedReader", + "DPRQuestionEncoder", + "DPRReader", + ] + ) + _import_structure["models.dpt"].extend( + [ + "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "DPTForDepthEstimation", + "DPTForSemanticSegmentation", + "DPTModel", + "DPTPreTrainedModel", + ] + ) + _import_structure["models.efficientformer"].extend( + [ + "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "EfficientFormerForImageClassification", + "EfficientFormerForImageClassificationWithTeacher", + "EfficientFormerModel", + "EfficientFormerPreTrainedModel", + ] + ) + _import_structure["models.efficientnet"].extend( + [ + "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "EfficientNetForImageClassification", + "EfficientNetModel", + "EfficientNetPreTrainedModel", + ] + ) + _import_structure["models.electra"].extend( + [ + "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", + "ElectraForCausalLM", + "ElectraForMaskedLM", + "ElectraForMultipleChoice", + "ElectraForPreTraining", + "ElectraForQuestionAnswering", + "ElectraForSequenceClassification", + "ElectraForTokenClassification", + "ElectraModel", + "ElectraPreTrainedModel", + "load_tf_weights_in_electra", + ] + ) + _import_structure["models.encodec"].extend( + [ + "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", + "EncodecModel", + "EncodecPreTrainedModel", + ] + ) + _import_structure["models.encoder_decoder"].append("EncoderDecoderModel") + _import_structure["models.ernie"].extend( + [ + "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", + "ErnieForCausalLM", + "ErnieForMaskedLM", + "ErnieForMultipleChoice", + "ErnieForNextSentencePrediction", + "ErnieForPreTraining", + "ErnieForQuestionAnswering", + "ErnieForSequenceClassification", + "ErnieForTokenClassification", + "ErnieModel", + "ErniePreTrainedModel", + ] + ) + _import_structure["models.ernie_m"].extend( + [ + "ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST", + "ErnieMForInformationExtraction", + "ErnieMForMultipleChoice", + "ErnieMForQuestionAnswering", + "ErnieMForSequenceClassification", + "ErnieMForTokenClassification", + "ErnieMModel", + "ErnieMPreTrainedModel", + ] + ) + _import_structure["models.esm"].extend( + [ + "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", + "EsmFoldPreTrainedModel", + "EsmForMaskedLM", + "EsmForProteinFolding", + "EsmForSequenceClassification", + "EsmForTokenClassification", + "EsmModel", + "EsmPreTrainedModel", + ] + ) + _import_structure["models.falcon"].extend( + [ + "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", + "FalconForCausalLM", + "FalconForQuestionAnswering", + "FalconForSequenceClassification", + "FalconForTokenClassification", + "FalconModel", + "FalconPreTrainedModel", + ] + ) + _import_structure["models.fastspeech2_conformer"].extend( + [ + "FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "FastSpeech2ConformerHifiGan", + "FastSpeech2ConformerModel", + "FastSpeech2ConformerPreTrainedModel", + "FastSpeech2ConformerWithHifiGan", + ] + ) + _import_structure["models.flaubert"].extend( + [ + "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "FlaubertForMultipleChoice", + "FlaubertForQuestionAnswering", + "FlaubertForQuestionAnsweringSimple", + "FlaubertForSequenceClassification", + "FlaubertForTokenClassification", + "FlaubertModel", + "FlaubertPreTrainedModel", + "FlaubertWithLMHeadModel", + ] + ) + _import_structure["models.flava"].extend( + [ + "FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", + "FlavaForPreTraining", + "FlavaImageCodebook", + "FlavaImageModel", + "FlavaModel", + "FlavaMultimodalModel", + "FlavaPreTrainedModel", + "FlavaTextModel", + ] + ) + _import_structure["models.fnet"].extend( + [ + "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "FNetForMaskedLM", + "FNetForMultipleChoice", + "FNetForNextSentencePrediction", + "FNetForPreTraining", + "FNetForQuestionAnswering", + "FNetForSequenceClassification", + "FNetForTokenClassification", + "FNetLayer", + "FNetModel", + "FNetPreTrainedModel", + ] + ) + _import_structure["models.focalnet"].extend( + [ + "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "FocalNetBackbone", + "FocalNetForImageClassification", + "FocalNetForMaskedImageModeling", + "FocalNetModel", + "FocalNetPreTrainedModel", + ] + ) + _import_structure["models.fsmt"].extend(["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]) + _import_structure["models.funnel"].extend( + [ + "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", + "FunnelBaseModel", + "FunnelForMaskedLM", + "FunnelForMultipleChoice", + "FunnelForPreTraining", + "FunnelForQuestionAnswering", + "FunnelForSequenceClassification", + "FunnelForTokenClassification", + "FunnelModel", + "FunnelPreTrainedModel", + "load_tf_weights_in_funnel", + ] + ) + _import_structure["models.fuyu"].extend(["FuyuForCausalLM", "FuyuPreTrainedModel"]) + _import_structure["models.gemma"].extend( + [ + "GemmaForCausalLM", + "GemmaForSequenceClassification", + "GemmaModel", + "GemmaPreTrainedModel", + ] + ) + _import_structure["models.git"].extend( + [ + "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "GitForCausalLM", + "GitModel", + "GitPreTrainedModel", + "GitVisionModel", + ] + ) + _import_structure["models.glpn"].extend( + [ + "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", + "GLPNForDepthEstimation", + "GLPNModel", + "GLPNPreTrainedModel", + ] + ) + _import_structure["models.gpt2"].extend( + [ + "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPT2DoubleHeadsModel", + "GPT2ForQuestionAnswering", + "GPT2ForSequenceClassification", + "GPT2ForTokenClassification", + "GPT2LMHeadModel", + "GPT2Model", + "GPT2PreTrainedModel", + "load_tf_weights_in_gpt2", + ] + ) + _import_structure["models.gpt_bigcode"].extend( + [ + "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTBigCodeForCausalLM", + "GPTBigCodeForSequenceClassification", + "GPTBigCodeForTokenClassification", + "GPTBigCodeModel", + "GPTBigCodePreTrainedModel", + ] + ) + _import_structure["models.gpt_neo"].extend( + [ + "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTNeoForCausalLM", + "GPTNeoForQuestionAnswering", + "GPTNeoForSequenceClassification", + "GPTNeoForTokenClassification", + "GPTNeoModel", + "GPTNeoPreTrainedModel", + "load_tf_weights_in_gpt_neo", + ] + ) + _import_structure["models.gpt_neox"].extend( + [ + "GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTNeoXForCausalLM", + "GPTNeoXForQuestionAnswering", + "GPTNeoXForSequenceClassification", + "GPTNeoXForTokenClassification", + "GPTNeoXLayer", + "GPTNeoXModel", + "GPTNeoXPreTrainedModel", + ] + ) + _import_structure["models.gpt_neox_japanese"].extend( + [ + "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTNeoXJapaneseForCausalLM", + "GPTNeoXJapaneseLayer", + "GPTNeoXJapaneseModel", + "GPTNeoXJapanesePreTrainedModel", + ] + ) + _import_structure["models.gptj"].extend( + [ + "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTJForCausalLM", + "GPTJForQuestionAnswering", + "GPTJForSequenceClassification", + "GPTJModel", + "GPTJPreTrainedModel", + ] + ) + _import_structure["models.gptsan_japanese"].extend( + [ + "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTSanJapaneseForConditionalGeneration", + "GPTSanJapaneseModel", + "GPTSanJapanesePreTrainedModel", + ] + ) + _import_structure["models.graphormer"].extend( + [ + "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "GraphormerForGraphClassification", + "GraphormerModel", + "GraphormerPreTrainedModel", + ] + ) + _import_structure["models.groupvit"].extend( + [ + "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "GroupViTModel", + "GroupViTPreTrainedModel", + "GroupViTTextModel", + "GroupViTVisionModel", + ] + ) + _import_structure["models.hubert"].extend( + [ + "HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "HubertForCTC", + "HubertForSequenceClassification", + "HubertModel", + "HubertPreTrainedModel", + ] + ) + _import_structure["models.ibert"].extend( + [ + "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "IBertForMaskedLM", + "IBertForMultipleChoice", + "IBertForQuestionAnswering", + "IBertForSequenceClassification", + "IBertForTokenClassification", + "IBertModel", + "IBertPreTrainedModel", + ] + ) + _import_structure["models.idefics"].extend( + [ + "IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST", + "IdeficsForVisionText2Text", + "IdeficsModel", + "IdeficsPreTrainedModel", + "IdeficsProcessor", + ] + ) + _import_structure["models.imagegpt"].extend( + [ + "IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "ImageGPTForCausalImageModeling", + "ImageGPTForImageClassification", + "ImageGPTModel", + "ImageGPTPreTrainedModel", + "load_tf_weights_in_imagegpt", + ] + ) + _import_structure["models.informer"].extend( + [ + "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "InformerForPrediction", + "InformerModel", + "InformerPreTrainedModel", + ] + ) + _import_structure["models.instructblip"].extend( + [ + "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "InstructBlipForConditionalGeneration", + "InstructBlipPreTrainedModel", + "InstructBlipQFormerModel", + "InstructBlipVisionModel", + ] + ) + _import_structure["models.jukebox"].extend( + [ + "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", + "JukeboxModel", + "JukeboxPreTrainedModel", + "JukeboxPrior", + "JukeboxVQVAE", + ] + ) + _import_structure["models.kosmos2"].extend( + [ + "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Kosmos2ForConditionalGeneration", + "Kosmos2Model", + "Kosmos2PreTrainedModel", + ] + ) + _import_structure["models.layoutlm"].extend( + [ + "LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "LayoutLMForMaskedLM", + "LayoutLMForQuestionAnswering", + "LayoutLMForSequenceClassification", + "LayoutLMForTokenClassification", + "LayoutLMModel", + "LayoutLMPreTrainedModel", + ] + ) + _import_structure["models.layoutlmv2"].extend( + [ + "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "LayoutLMv2ForQuestionAnswering", + "LayoutLMv2ForSequenceClassification", + "LayoutLMv2ForTokenClassification", + "LayoutLMv2Model", + "LayoutLMv2PreTrainedModel", + ] + ) + _import_structure["models.layoutlmv3"].extend( + [ + "LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", + "LayoutLMv3ForQuestionAnswering", + "LayoutLMv3ForSequenceClassification", + "LayoutLMv3ForTokenClassification", + "LayoutLMv3Model", + "LayoutLMv3PreTrainedModel", + ] + ) + _import_structure["models.led"].extend( + [ + "LED_PRETRAINED_MODEL_ARCHIVE_LIST", + "LEDForConditionalGeneration", + "LEDForQuestionAnswering", + "LEDForSequenceClassification", + "LEDModel", + "LEDPreTrainedModel", + ] + ) + _import_structure["models.levit"].extend( + [ + "LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "LevitForImageClassification", + "LevitForImageClassificationWithTeacher", + "LevitModel", + "LevitPreTrainedModel", + ] + ) + _import_structure["models.lilt"].extend( + [ + "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", + "LiltForQuestionAnswering", + "LiltForSequenceClassification", + "LiltForTokenClassification", + "LiltModel", + "LiltPreTrainedModel", + ] + ) + _import_structure["models.llama"].extend( + [ + "LlamaForCausalLM", + "LlamaForQuestionAnswering", + "LlamaForSequenceClassification", + "LlamaModel", + "LlamaPreTrainedModel", + ] + ) + _import_structure["models.llava"].extend( + [ + "LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", + "LlavaForConditionalGeneration", + "LlavaPreTrainedModel", + ] + ) + _import_structure["models.llava_next"].extend( + [ + "LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST", + "LlavaNextForConditionalGeneration", + "LlavaNextPreTrainedModel", + ] + ) + _import_structure["models.longformer"].extend( + [ + "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "LongformerForMaskedLM", + "LongformerForMultipleChoice", + "LongformerForQuestionAnswering", + "LongformerForSequenceClassification", + "LongformerForTokenClassification", + "LongformerModel", + "LongformerPreTrainedModel", + "LongformerSelfAttention", + ] + ) + _import_structure["models.longt5"].extend( + [ + "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", + "LongT5EncoderModel", + "LongT5ForConditionalGeneration", + "LongT5Model", + "LongT5PreTrainedModel", + ] + ) + _import_structure["models.luke"].extend( + [ + "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", + "LukeForEntityClassification", + "LukeForEntityPairClassification", + "LukeForEntitySpanClassification", + "LukeForMaskedLM", + "LukeForMultipleChoice", + "LukeForQuestionAnswering", + "LukeForSequenceClassification", + "LukeForTokenClassification", + "LukeModel", + "LukePreTrainedModel", + ] + ) + _import_structure["models.lxmert"].extend( + [ + "LxmertEncoder", + "LxmertForPreTraining", + "LxmertForQuestionAnswering", + "LxmertModel", + "LxmertPreTrainedModel", + "LxmertVisualFeatureEncoder", + "LxmertXLayer", + ] + ) + _import_structure["models.m2m_100"].extend( + [ + "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", + "M2M100ForConditionalGeneration", + "M2M100Model", + "M2M100PreTrainedModel", + ] + ) + _import_structure["models.mamba"].extend( + [ + "MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST", + "MambaForCausalLM", + "MambaModel", + "MambaPreTrainedModel", + ] + ) + _import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"]) + _import_structure["models.markuplm"].extend( + [ + "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "MarkupLMForQuestionAnswering", + "MarkupLMForSequenceClassification", + "MarkupLMForTokenClassification", + "MarkupLMModel", + "MarkupLMPreTrainedModel", + ] + ) + _import_structure["models.mask2former"].extend( + [ + "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "Mask2FormerForUniversalSegmentation", + "Mask2FormerModel", + "Mask2FormerPreTrainedModel", + ] + ) + _import_structure["models.maskformer"].extend( + [ + "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "MaskFormerForInstanceSegmentation", + "MaskFormerModel", + "MaskFormerPreTrainedModel", + "MaskFormerSwinBackbone", + ] + ) + _import_structure["models.mbart"].extend( + [ + "MBartForCausalLM", + "MBartForConditionalGeneration", + "MBartForQuestionAnswering", + "MBartForSequenceClassification", + "MBartModel", + "MBartPreTrainedModel", + ] + ) + _import_structure["models.mega"].extend( + [ + "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", + "MegaForCausalLM", + "MegaForMaskedLM", + "MegaForMultipleChoice", + "MegaForQuestionAnswering", + "MegaForSequenceClassification", + "MegaForTokenClassification", + "MegaModel", + "MegaPreTrainedModel", + ] + ) + _import_structure["models.megatron_bert"].extend( + [ + "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "MegatronBertForCausalLM", + "MegatronBertForMaskedLM", + "MegatronBertForMultipleChoice", + "MegatronBertForNextSentencePrediction", + "MegatronBertForPreTraining", + "MegatronBertForQuestionAnswering", + "MegatronBertForSequenceClassification", + "MegatronBertForTokenClassification", + "MegatronBertModel", + "MegatronBertPreTrainedModel", + ] + ) + _import_structure["models.mgp_str"].extend( + [ + "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", + "MgpstrForSceneTextRecognition", + "MgpstrModel", + "MgpstrPreTrainedModel", + ] + ) + _import_structure["models.mistral"].extend( + [ + "MistralForCausalLM", + "MistralForSequenceClassification", + "MistralModel", + "MistralPreTrainedModel", + ] + ) + _import_structure["models.mixtral"].extend( + ["MixtralForCausalLM", "MixtralForSequenceClassification", "MixtralModel", "MixtralPreTrainedModel"] + ) + _import_structure["models.mobilebert"].extend( + [ + "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "MobileBertForMaskedLM", + "MobileBertForMultipleChoice", + "MobileBertForNextSentencePrediction", + "MobileBertForPreTraining", + "MobileBertForQuestionAnswering", + "MobileBertForSequenceClassification", + "MobileBertForTokenClassification", + "MobileBertLayer", + "MobileBertModel", + "MobileBertPreTrainedModel", + "load_tf_weights_in_mobilebert", + ] + ) + _import_structure["models.mobilenet_v1"].extend( + [ + "MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST", + "MobileNetV1ForImageClassification", + "MobileNetV1Model", + "MobileNetV1PreTrainedModel", + "load_tf_weights_in_mobilenet_v1", + ] + ) + _import_structure["models.mobilenet_v2"].extend( + [ + "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "MobileNetV2ForImageClassification", + "MobileNetV2ForSemanticSegmentation", + "MobileNetV2Model", + "MobileNetV2PreTrainedModel", + "load_tf_weights_in_mobilenet_v2", + ] + ) + _import_structure["models.mobilevit"].extend( + [ + "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "MobileViTForImageClassification", + "MobileViTForSemanticSegmentation", + "MobileViTModel", + "MobileViTPreTrainedModel", + ] + ) + _import_structure["models.mobilevitv2"].extend( + [ + "MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "MobileViTV2ForImageClassification", + "MobileViTV2ForSemanticSegmentation", + "MobileViTV2Model", + "MobileViTV2PreTrainedModel", + ] + ) + _import_structure["models.mpnet"].extend( + [ + "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "MPNetForMaskedLM", + "MPNetForMultipleChoice", + "MPNetForQuestionAnswering", + "MPNetForSequenceClassification", + "MPNetForTokenClassification", + "MPNetLayer", + "MPNetModel", + "MPNetPreTrainedModel", + ] + ) + _import_structure["models.mpt"].extend( + [ + "MPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "MptForCausalLM", + "MptForQuestionAnswering", + "MptForSequenceClassification", + "MptForTokenClassification", + "MptModel", + "MptPreTrainedModel", + ] + ) + _import_structure["models.mra"].extend( + [ + "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", + "MraForMaskedLM", + "MraForMultipleChoice", + "MraForQuestionAnswering", + "MraForSequenceClassification", + "MraForTokenClassification", + "MraModel", + "MraPreTrainedModel", + ] + ) + _import_structure["models.mt5"].extend( + [ + "MT5EncoderModel", + "MT5ForConditionalGeneration", + "MT5ForQuestionAnswering", + "MT5ForSequenceClassification", + "MT5ForTokenClassification", + "MT5Model", + "MT5PreTrainedModel", + ] + ) + _import_structure["models.musicgen"].extend( + [ + "MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST", + "MusicgenForCausalLM", + "MusicgenForConditionalGeneration", + "MusicgenModel", + "MusicgenPreTrainedModel", + "MusicgenProcessor", + ] + ) + _import_structure["models.musicgen_melody"].extend( + [ + "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", + "MusicgenMelodyForCausalLM", + "MusicgenMelodyForConditionalGeneration", + "MusicgenMelodyModel", + "MusicgenMelodyPreTrainedModel", + ] + ) + _import_structure["models.mvp"].extend( + [ + "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "MvpForCausalLM", + "MvpForConditionalGeneration", + "MvpForQuestionAnswering", + "MvpForSequenceClassification", + "MvpModel", + "MvpPreTrainedModel", + ] + ) + _import_structure["models.nat"].extend( + [ + "NAT_PRETRAINED_MODEL_ARCHIVE_LIST", + "NatBackbone", + "NatForImageClassification", + "NatModel", + "NatPreTrainedModel", + ] + ) + _import_structure["models.nezha"].extend( + [ + "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", + "NezhaForMaskedLM", + "NezhaForMultipleChoice", + "NezhaForNextSentencePrediction", + "NezhaForPreTraining", + "NezhaForQuestionAnswering", + "NezhaForSequenceClassification", + "NezhaForTokenClassification", + "NezhaModel", + "NezhaPreTrainedModel", + ] + ) + _import_structure["models.nllb_moe"].extend( + [ + "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", + "NllbMoeForConditionalGeneration", + "NllbMoeModel", + "NllbMoePreTrainedModel", + "NllbMoeSparseMLP", + "NllbMoeTop2Router", + ] + ) + _import_structure["models.nystromformer"].extend( + [ + "NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "NystromformerForMaskedLM", + "NystromformerForMultipleChoice", + "NystromformerForQuestionAnswering", + "NystromformerForSequenceClassification", + "NystromformerForTokenClassification", + "NystromformerLayer", + "NystromformerModel", + "NystromformerPreTrainedModel", + ] + ) + _import_structure["models.oneformer"].extend( + [ + "ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "OneFormerForUniversalSegmentation", + "OneFormerModel", + "OneFormerPreTrainedModel", + ] + ) + _import_structure["models.openai"].extend( + [ + "OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "OpenAIGPTDoubleHeadsModel", + "OpenAIGPTForSequenceClassification", + "OpenAIGPTLMHeadModel", + "OpenAIGPTModel", + "OpenAIGPTPreTrainedModel", + "load_tf_weights_in_openai_gpt", + ] + ) + _import_structure["models.opt"].extend( + [ + "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "OPTForCausalLM", + "OPTForQuestionAnswering", + "OPTForSequenceClassification", + "OPTModel", + "OPTPreTrainedModel", + ] + ) + _import_structure["models.owlv2"].extend( + [ + "OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Owlv2ForObjectDetection", + "Owlv2Model", + "Owlv2PreTrainedModel", + "Owlv2TextModel", + "Owlv2VisionModel", + ] + ) + _import_structure["models.owlvit"].extend( + [ + "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "OwlViTForObjectDetection", + "OwlViTModel", + "OwlViTPreTrainedModel", + "OwlViTTextModel", + "OwlViTVisionModel", + ] + ) + _import_structure["models.patchtsmixer"].extend( + [ + "PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST", + "PatchTSMixerForPrediction", + "PatchTSMixerForPretraining", + "PatchTSMixerForRegression", + "PatchTSMixerForTimeSeriesClassification", + "PatchTSMixerModel", + "PatchTSMixerPreTrainedModel", + ] + ) + _import_structure["models.patchtst"].extend( + [ + "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", + "PatchTSTForClassification", + "PatchTSTForPrediction", + "PatchTSTForPretraining", + "PatchTSTForRegression", + "PatchTSTModel", + "PatchTSTPreTrainedModel", + ] + ) + _import_structure["models.pegasus"].extend( + [ + "PegasusForCausalLM", + "PegasusForConditionalGeneration", + "PegasusModel", + "PegasusPreTrainedModel", + ] + ) + _import_structure["models.pegasus_x"].extend( + [ + "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", + "PegasusXForConditionalGeneration", + "PegasusXModel", + "PegasusXPreTrainedModel", + ] + ) + _import_structure["models.perceiver"].extend( + [ + "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", + "PerceiverForImageClassificationConvProcessing", + "PerceiverForImageClassificationFourier", + "PerceiverForImageClassificationLearned", + "PerceiverForMaskedLM", + "PerceiverForMultimodalAutoencoding", + "PerceiverForOpticalFlow", + "PerceiverForSequenceClassification", + "PerceiverLayer", + "PerceiverModel", + "PerceiverPreTrainedModel", + ] + ) + _import_structure["models.persimmon"].extend( + [ + "PersimmonForCausalLM", + "PersimmonForSequenceClassification", + "PersimmonModel", + "PersimmonPreTrainedModel", + ] + ) + _import_structure["models.phi"].extend( + [ + "PHI_PRETRAINED_MODEL_ARCHIVE_LIST", + "PhiForCausalLM", + "PhiForSequenceClassification", + "PhiForTokenClassification", + "PhiModel", + "PhiPreTrainedModel", + ] + ) + _import_structure["models.pix2struct"].extend( + [ + "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", + "Pix2StructForConditionalGeneration", + "Pix2StructPreTrainedModel", + "Pix2StructTextModel", + "Pix2StructVisionModel", + ] + ) + _import_structure["models.plbart"].extend( + [ + "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", + "PLBartForCausalLM", + "PLBartForConditionalGeneration", + "PLBartForSequenceClassification", + "PLBartModel", + "PLBartPreTrainedModel", + ] + ) + _import_structure["models.poolformer"].extend( + [ + "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "PoolFormerForImageClassification", + "PoolFormerModel", + "PoolFormerPreTrainedModel", + ] + ) + _import_structure["models.pop2piano"].extend( + [ + "POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST", + "Pop2PianoForConditionalGeneration", + "Pop2PianoPreTrainedModel", + ] + ) + _import_structure["models.prophetnet"].extend( + [ + "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "ProphetNetDecoder", + "ProphetNetEncoder", + "ProphetNetForCausalLM", + "ProphetNetForConditionalGeneration", + "ProphetNetModel", + "ProphetNetPreTrainedModel", + ] + ) + _import_structure["models.pvt"].extend( + [ + "PVT_PRETRAINED_MODEL_ARCHIVE_LIST", + "PvtForImageClassification", + "PvtModel", + "PvtPreTrainedModel", + ] + ) + _import_structure["models.pvt_v2"].extend( + [ + "PVT_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "PvtV2Backbone", + "PvtV2ForImageClassification", + "PvtV2Model", + "PvtV2PreTrainedModel", + ] + ) + _import_structure["models.qdqbert"].extend( + [ + "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "QDQBertForMaskedLM", + "QDQBertForMultipleChoice", + "QDQBertForNextSentencePrediction", + "QDQBertForQuestionAnswering", + "QDQBertForSequenceClassification", + "QDQBertForTokenClassification", + "QDQBertLayer", + "QDQBertLMHeadModel", + "QDQBertModel", + "QDQBertPreTrainedModel", + "load_tf_weights_in_qdqbert", + ] + ) + _import_structure["models.qwen2"].extend( + [ + "Qwen2ForCausalLM", + "Qwen2ForSequenceClassification", + "Qwen2Model", + "Qwen2PreTrainedModel", + ] + ) + _import_structure["models.rag"].extend( + [ + "RagModel", + "RagPreTrainedModel", + "RagSequenceForGeneration", + "RagTokenForGeneration", + ] + ) + _import_structure["models.realm"].extend( + [ + "REALM_PRETRAINED_MODEL_ARCHIVE_LIST", + "RealmEmbedder", + "RealmForOpenQA", + "RealmKnowledgeAugEncoder", + "RealmPreTrainedModel", + "RealmReader", + "RealmRetriever", + "RealmScorer", + "load_tf_weights_in_realm", + ] + ) + _import_structure["models.reformer"].extend( + [ + "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "ReformerAttention", + "ReformerForMaskedLM", + "ReformerForQuestionAnswering", + "ReformerForSequenceClassification", + "ReformerLayer", + "ReformerModel", + "ReformerModelWithLMHead", + "ReformerPreTrainedModel", + ] + ) + _import_structure["models.regnet"].extend( + [ + "REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "RegNetForImageClassification", + "RegNetModel", + "RegNetPreTrainedModel", + ] + ) + _import_structure["models.rembert"].extend( + [ + "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "RemBertForCausalLM", + "RemBertForMaskedLM", + "RemBertForMultipleChoice", + "RemBertForQuestionAnswering", + "RemBertForSequenceClassification", + "RemBertForTokenClassification", + "RemBertLayer", + "RemBertModel", + "RemBertPreTrainedModel", + "load_tf_weights_in_rembert", + ] + ) + _import_structure["models.resnet"].extend( + [ + "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "ResNetBackbone", + "ResNetForImageClassification", + "ResNetModel", + "ResNetPreTrainedModel", + ] + ) + _import_structure["models.roberta"].extend( + [ + "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "RobertaForCausalLM", + "RobertaForMaskedLM", + "RobertaForMultipleChoice", + "RobertaForQuestionAnswering", + "RobertaForSequenceClassification", + "RobertaForTokenClassification", + "RobertaModel", + "RobertaPreTrainedModel", + ] + ) + _import_structure["models.roberta_prelayernorm"].extend( + [ + "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", + "RobertaPreLayerNormForCausalLM", + "RobertaPreLayerNormForMaskedLM", + "RobertaPreLayerNormForMultipleChoice", + "RobertaPreLayerNormForQuestionAnswering", + "RobertaPreLayerNormForSequenceClassification", + "RobertaPreLayerNormForTokenClassification", + "RobertaPreLayerNormModel", + "RobertaPreLayerNormPreTrainedModel", + ] + ) + _import_structure["models.roc_bert"].extend( + [ + "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "RoCBertForCausalLM", + "RoCBertForMaskedLM", + "RoCBertForMultipleChoice", + "RoCBertForPreTraining", + "RoCBertForQuestionAnswering", + "RoCBertForSequenceClassification", + "RoCBertForTokenClassification", + "RoCBertLayer", + "RoCBertModel", + "RoCBertPreTrainedModel", + "load_tf_weights_in_roc_bert", + ] + ) + _import_structure["models.roformer"].extend( + [ + "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "RoFormerForCausalLM", + "RoFormerForMaskedLM", + "RoFormerForMultipleChoice", + "RoFormerForQuestionAnswering", + "RoFormerForSequenceClassification", + "RoFormerForTokenClassification", + "RoFormerLayer", + "RoFormerModel", + "RoFormerPreTrainedModel", + "load_tf_weights_in_roformer", + ] + ) + _import_structure["models.rwkv"].extend( + [ + "RWKV_PRETRAINED_MODEL_ARCHIVE_LIST", + "RwkvForCausalLM", + "RwkvModel", + "RwkvPreTrainedModel", + ] + ) + _import_structure["models.sam"].extend( + [ + "SAM_PRETRAINED_MODEL_ARCHIVE_LIST", + "SamModel", + "SamPreTrainedModel", + ] + ) + _import_structure["models.seamless_m4t"].extend( + [ + "SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST", + "SeamlessM4TCodeHifiGan", + "SeamlessM4TForSpeechToSpeech", + "SeamlessM4TForSpeechToText", + "SeamlessM4TForTextToSpeech", + "SeamlessM4TForTextToText", + "SeamlessM4THifiGan", + "SeamlessM4TModel", + "SeamlessM4TPreTrainedModel", + "SeamlessM4TTextToUnitForConditionalGeneration", + "SeamlessM4TTextToUnitModel", + ] + ) + _import_structure["models.seamless_m4t_v2"].extend( + [ + "SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "SeamlessM4Tv2ForSpeechToSpeech", + "SeamlessM4Tv2ForSpeechToText", + "SeamlessM4Tv2ForTextToSpeech", + "SeamlessM4Tv2ForTextToText", + "SeamlessM4Tv2Model", + "SeamlessM4Tv2PreTrainedModel", + ] + ) + _import_structure["models.segformer"].extend( + [ + "SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "SegformerDecodeHead", + "SegformerForImageClassification", + "SegformerForSemanticSegmentation", + "SegformerLayer", + "SegformerModel", + "SegformerPreTrainedModel", + ] + ) + _import_structure["models.seggpt"].extend( + [ + "SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "SegGptForImageSegmentation", + "SegGptModel", + "SegGptPreTrainedModel", + ] + ) + _import_structure["models.sew"].extend( + [ + "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", + "SEWForCTC", + "SEWForSequenceClassification", + "SEWModel", + "SEWPreTrainedModel", + ] + ) + _import_structure["models.sew_d"].extend( + [ + "SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST", + "SEWDForCTC", + "SEWDForSequenceClassification", + "SEWDModel", + "SEWDPreTrainedModel", + ] + ) + _import_structure["models.siglip"].extend( + [ + "SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "SiglipForImageClassification", + "SiglipModel", + "SiglipPreTrainedModel", + "SiglipTextModel", + "SiglipVisionModel", + ] + ) + _import_structure["models.speech_encoder_decoder"].extend(["SpeechEncoderDecoderModel"]) + _import_structure["models.speech_to_text"].extend( + [ + "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", + "Speech2TextForConditionalGeneration", + "Speech2TextModel", + "Speech2TextPreTrainedModel", + ] + ) + _import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"]) + _import_structure["models.speecht5"].extend( + [ + "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", + "SpeechT5ForSpeechToSpeech", + "SpeechT5ForSpeechToText", + "SpeechT5ForTextToSpeech", + "SpeechT5HifiGan", + "SpeechT5Model", + "SpeechT5PreTrainedModel", + ] + ) + _import_structure["models.splinter"].extend( + [ + "SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST", + "SplinterForPreTraining", + "SplinterForQuestionAnswering", + "SplinterLayer", + "SplinterModel", + "SplinterPreTrainedModel", + ] + ) + _import_structure["models.squeezebert"].extend( + [ + "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "SqueezeBertForMaskedLM", + "SqueezeBertForMultipleChoice", + "SqueezeBertForQuestionAnswering", + "SqueezeBertForSequenceClassification", + "SqueezeBertForTokenClassification", + "SqueezeBertModel", + "SqueezeBertModule", + "SqueezeBertPreTrainedModel", + ] + ) + _import_structure["models.stablelm"].extend( + [ + "StableLmForCausalLM", + "StableLmForSequenceClassification", + "StableLmModel", + "StableLmPreTrainedModel", + ] + ) + _import_structure["models.starcoder2"].extend( + [ + "Starcoder2ForCausalLM", + "Starcoder2ForSequenceClassification", + "Starcoder2Model", + "Starcoder2PreTrainedModel", + ] + ) + _import_structure["models.superpoint"].extend( + [ + "SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST", + "SuperPointForKeypointDetection", + "SuperPointPreTrainedModel", + ] + ) + _import_structure["models.swiftformer"].extend( + [ + "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "SwiftFormerForImageClassification", + "SwiftFormerModel", + "SwiftFormerPreTrainedModel", + ] + ) + _import_structure["models.swin"].extend( + [ + "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "SwinBackbone", + "SwinForImageClassification", + "SwinForMaskedImageModeling", + "SwinModel", + "SwinPreTrainedModel", + ] + ) + _import_structure["models.swin2sr"].extend( + [ + "SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST", + "Swin2SRForImageSuperResolution", + "Swin2SRModel", + "Swin2SRPreTrainedModel", + ] + ) + _import_structure["models.swinv2"].extend( + [ + "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Swinv2Backbone", + "Swinv2ForImageClassification", + "Swinv2ForMaskedImageModeling", + "Swinv2Model", + "Swinv2PreTrainedModel", + ] + ) + _import_structure["models.switch_transformers"].extend( + [ + "SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST", + "SwitchTransformersEncoderModel", + "SwitchTransformersForConditionalGeneration", + "SwitchTransformersModel", + "SwitchTransformersPreTrainedModel", + "SwitchTransformersSparseMLP", + "SwitchTransformersTop1Router", + ] + ) + _import_structure["models.t5"].extend( + [ + "T5_PRETRAINED_MODEL_ARCHIVE_LIST", + "T5EncoderModel", + "T5ForConditionalGeneration", + "T5ForQuestionAnswering", + "T5ForSequenceClassification", + "T5ForTokenClassification", + "T5Model", + "T5PreTrainedModel", + "load_tf_weights_in_t5", + ] + ) + _import_structure["models.table_transformer"].extend( + [ + "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TableTransformerForObjectDetection", + "TableTransformerModel", + "TableTransformerPreTrainedModel", + ] + ) + _import_structure["models.tapas"].extend( + [ + "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", + "TapasForMaskedLM", + "TapasForQuestionAnswering", + "TapasForSequenceClassification", + "TapasModel", + "TapasPreTrainedModel", + "load_tf_weights_in_tapas", + ] + ) + _import_structure["models.time_series_transformer"].extend( + [ + "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TimeSeriesTransformerForPrediction", + "TimeSeriesTransformerModel", + "TimeSeriesTransformerPreTrainedModel", + ] + ) + _import_structure["models.timesformer"].extend( + [ + "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TimesformerForVideoClassification", + "TimesformerModel", + "TimesformerPreTrainedModel", + ] + ) + _import_structure["models.timm_backbone"].extend(["TimmBackbone"]) + _import_structure["models.trocr"].extend( + [ + "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", + "TrOCRForCausalLM", + "TrOCRPreTrainedModel", + ] + ) + _import_structure["models.tvlt"].extend( + [ + "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TvltForAudioVisualClassification", + "TvltForPreTraining", + "TvltModel", + "TvltPreTrainedModel", + ] + ) + _import_structure["models.tvp"].extend( + [ + "TVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TvpForVideoGrounding", + "TvpModel", + "TvpPreTrainedModel", + ] + ) + _import_structure["models.udop"].extend( + [ + "UDOP_PRETRAINED_MODEL_ARCHIVE_LIST", + "UdopEncoderModel", + "UdopForConditionalGeneration", + "UdopModel", + "UdopPreTrainedModel", + ], + ) + _import_structure["models.umt5"].extend( + [ + "UMT5EncoderModel", + "UMT5ForConditionalGeneration", + "UMT5ForQuestionAnswering", + "UMT5ForSequenceClassification", + "UMT5ForTokenClassification", + "UMT5Model", + "UMT5PreTrainedModel", + ] + ) + _import_structure["models.unispeech"].extend( + [ + "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", + "UniSpeechForCTC", + "UniSpeechForPreTraining", + "UniSpeechForSequenceClassification", + "UniSpeechModel", + "UniSpeechPreTrainedModel", + ] + ) + _import_structure["models.unispeech_sat"].extend( + [ + "UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST", + "UniSpeechSatForAudioFrameClassification", + "UniSpeechSatForCTC", + "UniSpeechSatForPreTraining", + "UniSpeechSatForSequenceClassification", + "UniSpeechSatForXVector", + "UniSpeechSatModel", + "UniSpeechSatPreTrainedModel", + ] + ) + _import_structure["models.univnet"].extend( + [ + "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "UnivNetModel", + ] + ) + _import_structure["models.upernet"].extend( + [ + "UperNetForSemanticSegmentation", + "UperNetPreTrainedModel", + ] + ) + _import_structure["models.videomae"].extend( + [ + "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", + "VideoMAEForPreTraining", + "VideoMAEForVideoClassification", + "VideoMAEModel", + "VideoMAEPreTrainedModel", + ] + ) + _import_structure["models.vilt"].extend( + [ + "VILT_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViltForImageAndTextRetrieval", + "ViltForImagesAndTextClassification", + "ViltForMaskedLM", + "ViltForQuestionAnswering", + "ViltForTokenClassification", + "ViltLayer", + "ViltModel", + "ViltPreTrainedModel", + ] + ) + _import_structure["models.vipllava"].extend( + [ + "VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", + "VipLlavaForConditionalGeneration", + "VipLlavaPreTrainedModel", + ] + ) + _import_structure["models.vision_encoder_decoder"].extend(["VisionEncoderDecoderModel"]) + _import_structure["models.vision_text_dual_encoder"].extend(["VisionTextDualEncoderModel"]) + _import_structure["models.visual_bert"].extend( + [ + "VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "VisualBertForMultipleChoice", + "VisualBertForPreTraining", + "VisualBertForQuestionAnswering", + "VisualBertForRegionToPhraseAlignment", + "VisualBertForVisualReasoning", + "VisualBertLayer", + "VisualBertModel", + "VisualBertPreTrainedModel", + ] + ) + _import_structure["models.vit"].extend( + [ + "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTForImageClassification", + "ViTForMaskedImageModeling", + "ViTModel", + "ViTPreTrainedModel", + ] + ) + _import_structure["models.vit_hybrid"].extend( + [ + "VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTHybridForImageClassification", + "ViTHybridModel", + "ViTHybridPreTrainedModel", + ] + ) + _import_structure["models.vit_mae"].extend( + [ + "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTMAEForPreTraining", + "ViTMAELayer", + "ViTMAEModel", + "ViTMAEPreTrainedModel", + ] + ) + _import_structure["models.vit_msn"].extend( + [ + "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTMSNForImageClassification", + "ViTMSNModel", + "ViTMSNPreTrainedModel", + ] + ) + _import_structure["models.vitdet"].extend( + [ + "VITDET_PRETRAINED_MODEL_ARCHIVE_LIST", + "VitDetBackbone", + "VitDetModel", + "VitDetPreTrainedModel", + ] + ) + _import_structure["models.vitmatte"].extend( + [ + "VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST", + "VitMatteForImageMatting", + "VitMattePreTrainedModel", + ] + ) + _import_structure["models.vits"].extend( + [ + "VITS_PRETRAINED_MODEL_ARCHIVE_LIST", + "VitsModel", + "VitsPreTrainedModel", + ] + ) + _import_structure["models.vivit"].extend( + [ + "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "VivitForVideoClassification", + "VivitModel", + "VivitPreTrainedModel", + ] + ) + _import_structure["models.wav2vec2"].extend( + [ + "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Wav2Vec2ForAudioFrameClassification", + "Wav2Vec2ForCTC", + "Wav2Vec2ForMaskedLM", + "Wav2Vec2ForPreTraining", + "Wav2Vec2ForSequenceClassification", + "Wav2Vec2ForXVector", + "Wav2Vec2Model", + "Wav2Vec2PreTrainedModel", + ] + ) + _import_structure["models.wav2vec2_bert"].extend( + [ + "WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "Wav2Vec2BertForAudioFrameClassification", + "Wav2Vec2BertForCTC", + "Wav2Vec2BertForSequenceClassification", + "Wav2Vec2BertForXVector", + "Wav2Vec2BertModel", + "Wav2Vec2BertPreTrainedModel", + ] + ) + _import_structure["models.wav2vec2_conformer"].extend( + [ + "WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "Wav2Vec2ConformerForAudioFrameClassification", + "Wav2Vec2ConformerForCTC", + "Wav2Vec2ConformerForPreTraining", + "Wav2Vec2ConformerForSequenceClassification", + "Wav2Vec2ConformerForXVector", + "Wav2Vec2ConformerModel", + "Wav2Vec2ConformerPreTrainedModel", + ] + ) + _import_structure["models.wavlm"].extend( + [ + "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "WavLMForAudioFrameClassification", + "WavLMForCTC", + "WavLMForSequenceClassification", + "WavLMForXVector", + "WavLMModel", + "WavLMPreTrainedModel", + ] + ) + _import_structure["models.whisper"].extend( + [ + "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "WhisperForAudioClassification", + "WhisperForCausalLM", + "WhisperForConditionalGeneration", + "WhisperModel", + "WhisperPreTrainedModel", + ] + ) + _import_structure["models.x_clip"].extend( + [ + "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "XCLIPModel", + "XCLIPPreTrainedModel", + "XCLIPTextModel", + "XCLIPVisionModel", + ] + ) + _import_structure["models.xglm"].extend( + [ + "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "XGLMForCausalLM", + "XGLMModel", + "XGLMPreTrainedModel", + ] + ) + _import_structure["models.xlm"].extend( + [ + "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "XLMForMultipleChoice", + "XLMForQuestionAnswering", + "XLMForQuestionAnsweringSimple", + "XLMForSequenceClassification", + "XLMForTokenClassification", + "XLMModel", + "XLMPreTrainedModel", + "XLMWithLMHeadModel", + ] + ) + _import_structure["models.xlm_prophetnet"].extend( + [ + "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "XLMProphetNetDecoder", + "XLMProphetNetEncoder", + "XLMProphetNetForCausalLM", + "XLMProphetNetForConditionalGeneration", + "XLMProphetNetModel", + "XLMProphetNetPreTrainedModel", + ] + ) + _import_structure["models.xlm_roberta"].extend( + [ + "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "XLMRobertaForCausalLM", + "XLMRobertaForMaskedLM", + "XLMRobertaForMultipleChoice", + "XLMRobertaForQuestionAnswering", + "XLMRobertaForSequenceClassification", + "XLMRobertaForTokenClassification", + "XLMRobertaModel", + "XLMRobertaPreTrainedModel", + ] + ) + _import_structure["models.xlm_roberta_xl"].extend( + [ + "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "XLMRobertaXLForCausalLM", + "XLMRobertaXLForMaskedLM", + "XLMRobertaXLForMultipleChoice", + "XLMRobertaXLForQuestionAnswering", + "XLMRobertaXLForSequenceClassification", + "XLMRobertaXLForTokenClassification", + "XLMRobertaXLModel", + "XLMRobertaXLPreTrainedModel", + ] + ) + _import_structure["models.xlnet"].extend( + [ + "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "XLNetForMultipleChoice", + "XLNetForQuestionAnswering", + "XLNetForQuestionAnsweringSimple", + "XLNetForSequenceClassification", + "XLNetForTokenClassification", + "XLNetLMHeadModel", + "XLNetModel", + "XLNetPreTrainedModel", + "load_tf_weights_in_xlnet", + ] + ) + _import_structure["models.xmod"].extend( + [ + "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", + "XmodForCausalLM", + "XmodForMaskedLM", + "XmodForMultipleChoice", + "XmodForQuestionAnswering", + "XmodForSequenceClassification", + "XmodForTokenClassification", + "XmodModel", + "XmodPreTrainedModel", + ] + ) + _import_structure["models.yolos"].extend( + [ + "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", + "YolosForObjectDetection", + "YolosModel", + "YolosPreTrainedModel", + ] + ) + _import_structure["models.yoso"].extend( + [ + "YOSO_PRETRAINED_MODEL_ARCHIVE_LIST", + "YosoForMaskedLM", + "YosoForMultipleChoice", + "YosoForQuestionAnswering", + "YosoForSequenceClassification", + "YosoForTokenClassification", + "YosoLayer", + "YosoModel", + "YosoPreTrainedModel", + ] + ) + _import_structure["optimization"] = [ + "Adafactor", + "AdamW", + "get_constant_schedule", + "get_constant_schedule_with_warmup", + "get_cosine_schedule_with_warmup", + "get_cosine_with_hard_restarts_schedule_with_warmup", + "get_inverse_sqrt_schedule", + "get_linear_schedule_with_warmup", + "get_polynomial_decay_schedule_with_warmup", + "get_scheduler", + ] + _import_structure["pytorch_utils"] = [ + "Conv1D", + "apply_chunking_to_forward", + "prune_layer", + ] + _import_structure["sagemaker"] = [] + _import_structure["time_series_utils"] = [] + _import_structure["trainer"] = ["Trainer"] + _import_structure["trainer_pt_utils"] = ["torch_distributed_zero_first"] + _import_structure["trainer_seq2seq"] = ["Seq2SeqTrainer"] + +# TensorFlow-backed objects +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_tf_objects + + _import_structure["utils.dummy_tf_objects"] = [name for name in dir(dummy_tf_objects) if not name.startswith("_")] +else: + _import_structure["activations_tf"] = [] + _import_structure["benchmark.benchmark_args_tf"] = ["TensorFlowBenchmarkArguments"] + _import_structure["benchmark.benchmark_tf"] = ["TensorFlowBenchmark"] + _import_structure["generation"].extend( + [ + "TFForcedBOSTokenLogitsProcessor", + "TFForcedEOSTokenLogitsProcessor", + "TFForceTokensLogitsProcessor", + "TFGenerationMixin", + "TFLogitsProcessor", + "TFLogitsProcessorList", + "TFLogitsWarper", + "TFMinLengthLogitsProcessor", + "TFNoBadWordsLogitsProcessor", + "TFNoRepeatNGramLogitsProcessor", + "TFRepetitionPenaltyLogitsProcessor", + "TFSuppressTokensAtBeginLogitsProcessor", + "TFSuppressTokensLogitsProcessor", + "TFTemperatureLogitsWarper", + "TFTopKLogitsWarper", + "TFTopPLogitsWarper", + ] + ) + _import_structure["generation_tf_utils"] = [] + _import_structure["keras_callbacks"] = ["KerasMetricCallback", "PushToHubCallback"] + _import_structure["modeling_tf_outputs"] = [] + _import_structure["modeling_tf_utils"] = [ + "TFPreTrainedModel", + "TFSequenceSummary", + "TFSharedEmbeddings", + "shape_list", + ] + # TensorFlow models structure + _import_structure["models.albert"].extend( + [ + "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFAlbertForMaskedLM", + "TFAlbertForMultipleChoice", + "TFAlbertForPreTraining", + "TFAlbertForQuestionAnswering", + "TFAlbertForSequenceClassification", + "TFAlbertForTokenClassification", + "TFAlbertMainLayer", + "TFAlbertModel", + "TFAlbertPreTrainedModel", + ] + ) + _import_structure["models.auto"].extend( + [ + "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_CAUSAL_LM_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", + "TF_MODEL_FOR_MASKED_LM_MAPPING", + "TF_MODEL_FOR_MASK_GENERATION_MAPPING", + "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "TF_MODEL_FOR_PRETRAINING_MAPPING", + "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", + "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_TEXT_ENCODING_MAPPING", + "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_VISION_2_SEQ_MAPPING", + "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", + "TF_MODEL_MAPPING", + "TF_MODEL_WITH_LM_HEAD_MAPPING", + "TFAutoModel", + "TFAutoModelForAudioClassification", + "TFAutoModelForCausalLM", + "TFAutoModelForDocumentQuestionAnswering", + "TFAutoModelForImageClassification", + "TFAutoModelForMaskedImageModeling", + "TFAutoModelForMaskedLM", + "TFAutoModelForMaskGeneration", + "TFAutoModelForMultipleChoice", + "TFAutoModelForNextSentencePrediction", + "TFAutoModelForPreTraining", + "TFAutoModelForQuestionAnswering", + "TFAutoModelForSemanticSegmentation", + "TFAutoModelForSeq2SeqLM", + "TFAutoModelForSequenceClassification", + "TFAutoModelForSpeechSeq2Seq", + "TFAutoModelForTableQuestionAnswering", + "TFAutoModelForTextEncoding", + "TFAutoModelForTokenClassification", + "TFAutoModelForVision2Seq", + "TFAutoModelForZeroShotImageClassification", + "TFAutoModelWithLMHead", + ] + ) + _import_structure["models.bart"].extend( + [ + "TFBartForConditionalGeneration", + "TFBartForSequenceClassification", + "TFBartModel", + "TFBartPretrainedModel", + ] + ) + _import_structure["models.bert"].extend( + [ + "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFBertEmbeddings", + "TFBertForMaskedLM", + "TFBertForMultipleChoice", + "TFBertForNextSentencePrediction", + "TFBertForPreTraining", + "TFBertForQuestionAnswering", + "TFBertForSequenceClassification", + "TFBertForTokenClassification", + "TFBertLMHeadModel", + "TFBertMainLayer", + "TFBertModel", + "TFBertPreTrainedModel", + ] + ) + _import_structure["models.blenderbot"].extend( + [ + "TFBlenderbotForConditionalGeneration", + "TFBlenderbotModel", + "TFBlenderbotPreTrainedModel", + ] + ) + _import_structure["models.blenderbot_small"].extend( + [ + "TFBlenderbotSmallForConditionalGeneration", + "TFBlenderbotSmallModel", + "TFBlenderbotSmallPreTrainedModel", + ] + ) + _import_structure["models.blip"].extend( + [ + "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFBlipForConditionalGeneration", + "TFBlipForImageTextRetrieval", + "TFBlipForQuestionAnswering", + "TFBlipModel", + "TFBlipPreTrainedModel", + "TFBlipTextModel", + "TFBlipVisionModel", + ] + ) + _import_structure["models.camembert"].extend( + [ + "TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFCamembertForCausalLM", + "TFCamembertForMaskedLM", + "TFCamembertForMultipleChoice", + "TFCamembertForQuestionAnswering", + "TFCamembertForSequenceClassification", + "TFCamembertForTokenClassification", + "TFCamembertModel", + "TFCamembertPreTrainedModel", + ] + ) + _import_structure["models.clip"].extend( + [ + "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFCLIPModel", + "TFCLIPPreTrainedModel", + "TFCLIPTextModel", + "TFCLIPVisionModel", + ] + ) + _import_structure["models.convbert"].extend( + [ + "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFConvBertForMaskedLM", + "TFConvBertForMultipleChoice", + "TFConvBertForQuestionAnswering", + "TFConvBertForSequenceClassification", + "TFConvBertForTokenClassification", + "TFConvBertLayer", + "TFConvBertModel", + "TFConvBertPreTrainedModel", + ] + ) + _import_structure["models.convnext"].extend( + [ + "TFConvNextForImageClassification", + "TFConvNextModel", + "TFConvNextPreTrainedModel", + ] + ) + _import_structure["models.convnextv2"].extend( + [ + "TFConvNextV2ForImageClassification", + "TFConvNextV2Model", + "TFConvNextV2PreTrainedModel", + ] + ) + _import_structure["models.ctrl"].extend( + [ + "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFCTRLForSequenceClassification", + "TFCTRLLMHeadModel", + "TFCTRLModel", + "TFCTRLPreTrainedModel", + ] + ) + _import_structure["models.cvt"].extend( + [ + "TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFCvtForImageClassification", + "TFCvtModel", + "TFCvtPreTrainedModel", + ] + ) + _import_structure["models.data2vec"].extend( + [ + "TFData2VecVisionForImageClassification", + "TFData2VecVisionForSemanticSegmentation", + "TFData2VecVisionModel", + "TFData2VecVisionPreTrainedModel", + ] + ) + _import_structure["models.deberta"].extend( + [ + "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFDebertaForMaskedLM", + "TFDebertaForQuestionAnswering", + "TFDebertaForSequenceClassification", + "TFDebertaForTokenClassification", + "TFDebertaModel", + "TFDebertaPreTrainedModel", + ] + ) + _import_structure["models.deberta_v2"].extend( + [ + "TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFDebertaV2ForMaskedLM", + "TFDebertaV2ForMultipleChoice", + "TFDebertaV2ForQuestionAnswering", + "TFDebertaV2ForSequenceClassification", + "TFDebertaV2ForTokenClassification", + "TFDebertaV2Model", + "TFDebertaV2PreTrainedModel", + ] + ) + _import_structure["models.deit"].extend( + [ + "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFDeiTForImageClassification", + "TFDeiTForImageClassificationWithTeacher", + "TFDeiTForMaskedImageModeling", + "TFDeiTModel", + "TFDeiTPreTrainedModel", + ] + ) + _import_structure["models.deprecated.transfo_xl"].extend( + [ + "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFAdaptiveEmbedding", + "TFTransfoXLForSequenceClassification", + "TFTransfoXLLMHeadModel", + "TFTransfoXLMainLayer", + "TFTransfoXLModel", + "TFTransfoXLPreTrainedModel", + ] + ) + _import_structure["models.distilbert"].extend( + [ + "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFDistilBertForMaskedLM", + "TFDistilBertForMultipleChoice", + "TFDistilBertForQuestionAnswering", + "TFDistilBertForSequenceClassification", + "TFDistilBertForTokenClassification", + "TFDistilBertMainLayer", + "TFDistilBertModel", + "TFDistilBertPreTrainedModel", + ] + ) + _import_structure["models.dpr"].extend( + [ + "TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFDPRContextEncoder", + "TFDPRPretrainedContextEncoder", + "TFDPRPretrainedQuestionEncoder", + "TFDPRPretrainedReader", + "TFDPRQuestionEncoder", + "TFDPRReader", + ] + ) + _import_structure["models.efficientformer"].extend( + [ + "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFEfficientFormerForImageClassification", + "TFEfficientFormerForImageClassificationWithTeacher", + "TFEfficientFormerModel", + "TFEfficientFormerPreTrainedModel", + ] + ) + _import_structure["models.electra"].extend( + [ + "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFElectraForMaskedLM", + "TFElectraForMultipleChoice", + "TFElectraForPreTraining", + "TFElectraForQuestionAnswering", + "TFElectraForSequenceClassification", + "TFElectraForTokenClassification", + "TFElectraModel", + "TFElectraPreTrainedModel", + ] + ) + _import_structure["models.encoder_decoder"].append("TFEncoderDecoderModel") + _import_structure["models.esm"].extend( + [ + "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFEsmForMaskedLM", + "TFEsmForSequenceClassification", + "TFEsmForTokenClassification", + "TFEsmModel", + "TFEsmPreTrainedModel", + ] + ) + _import_structure["models.flaubert"].extend( + [ + "TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFFlaubertForMultipleChoice", + "TFFlaubertForQuestionAnsweringSimple", + "TFFlaubertForSequenceClassification", + "TFFlaubertForTokenClassification", + "TFFlaubertModel", + "TFFlaubertPreTrainedModel", + "TFFlaubertWithLMHeadModel", + ] + ) + _import_structure["models.funnel"].extend( + [ + "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFFunnelBaseModel", + "TFFunnelForMaskedLM", + "TFFunnelForMultipleChoice", + "TFFunnelForPreTraining", + "TFFunnelForQuestionAnswering", + "TFFunnelForSequenceClassification", + "TFFunnelForTokenClassification", + "TFFunnelModel", + "TFFunnelPreTrainedModel", + ] + ) + _import_structure["models.gpt2"].extend( + [ + "TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFGPT2DoubleHeadsModel", + "TFGPT2ForSequenceClassification", + "TFGPT2LMHeadModel", + "TFGPT2MainLayer", + "TFGPT2Model", + "TFGPT2PreTrainedModel", + ] + ) + _import_structure["models.gptj"].extend( + [ + "TFGPTJForCausalLM", + "TFGPTJForQuestionAnswering", + "TFGPTJForSequenceClassification", + "TFGPTJModel", + "TFGPTJPreTrainedModel", + ] + ) + _import_structure["models.groupvit"].extend( + [ + "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFGroupViTModel", + "TFGroupViTPreTrainedModel", + "TFGroupViTTextModel", + "TFGroupViTVisionModel", + ] + ) + _import_structure["models.hubert"].extend( + [ + "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFHubertForCTC", + "TFHubertModel", + "TFHubertPreTrainedModel", + ] + ) + _import_structure["models.layoutlm"].extend( + [ + "TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLayoutLMForMaskedLM", + "TFLayoutLMForQuestionAnswering", + "TFLayoutLMForSequenceClassification", + "TFLayoutLMForTokenClassification", + "TFLayoutLMMainLayer", + "TFLayoutLMModel", + "TFLayoutLMPreTrainedModel", + ] + ) + _import_structure["models.layoutlmv3"].extend( + [ + "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLayoutLMv3ForQuestionAnswering", + "TFLayoutLMv3ForSequenceClassification", + "TFLayoutLMv3ForTokenClassification", + "TFLayoutLMv3Model", + "TFLayoutLMv3PreTrainedModel", + ] + ) + _import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]) + _import_structure["models.longformer"].extend( + [ + "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLongformerForMaskedLM", + "TFLongformerForMultipleChoice", + "TFLongformerForQuestionAnswering", + "TFLongformerForSequenceClassification", + "TFLongformerForTokenClassification", + "TFLongformerModel", + "TFLongformerPreTrainedModel", + "TFLongformerSelfAttention", + ] + ) + _import_structure["models.lxmert"].extend( + [ + "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLxmertForPreTraining", + "TFLxmertMainLayer", + "TFLxmertModel", + "TFLxmertPreTrainedModel", + "TFLxmertVisualFeatureEncoder", + ] + ) + _import_structure["models.marian"].extend(["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"]) + _import_structure["models.mbart"].extend( + ["TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel"] + ) + _import_structure["models.mobilebert"].extend( + [ + "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFMobileBertForMaskedLM", + "TFMobileBertForMultipleChoice", + "TFMobileBertForNextSentencePrediction", + "TFMobileBertForPreTraining", + "TFMobileBertForQuestionAnswering", + "TFMobileBertForSequenceClassification", + "TFMobileBertForTokenClassification", + "TFMobileBertMainLayer", + "TFMobileBertModel", + "TFMobileBertPreTrainedModel", + ] + ) + _import_structure["models.mobilevit"].extend( + [ + "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFMobileViTForImageClassification", + "TFMobileViTForSemanticSegmentation", + "TFMobileViTModel", + "TFMobileViTPreTrainedModel", + ] + ) + _import_structure["models.mpnet"].extend( + [ + "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFMPNetForMaskedLM", + "TFMPNetForMultipleChoice", + "TFMPNetForQuestionAnswering", + "TFMPNetForSequenceClassification", + "TFMPNetForTokenClassification", + "TFMPNetMainLayer", + "TFMPNetModel", + "TFMPNetPreTrainedModel", + ] + ) + _import_structure["models.mt5"].extend(["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]) + _import_structure["models.openai"].extend( + [ + "TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFOpenAIGPTDoubleHeadsModel", + "TFOpenAIGPTForSequenceClassification", + "TFOpenAIGPTLMHeadModel", + "TFOpenAIGPTMainLayer", + "TFOpenAIGPTModel", + "TFOpenAIGPTPreTrainedModel", + ] + ) + _import_structure["models.opt"].extend( + [ + "TFOPTForCausalLM", + "TFOPTModel", + "TFOPTPreTrainedModel", + ] + ) + _import_structure["models.pegasus"].extend( + [ + "TFPegasusForConditionalGeneration", + "TFPegasusModel", + "TFPegasusPreTrainedModel", + ] + ) + _import_structure["models.rag"].extend( + [ + "TFRagModel", + "TFRagPreTrainedModel", + "TFRagSequenceForGeneration", + "TFRagTokenForGeneration", + ] + ) + _import_structure["models.regnet"].extend( + [ + "TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRegNetForImageClassification", + "TFRegNetModel", + "TFRegNetPreTrainedModel", + ] + ) + _import_structure["models.rembert"].extend( + [ + "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRemBertForCausalLM", + "TFRemBertForMaskedLM", + "TFRemBertForMultipleChoice", + "TFRemBertForQuestionAnswering", + "TFRemBertForSequenceClassification", + "TFRemBertForTokenClassification", + "TFRemBertLayer", + "TFRemBertModel", + "TFRemBertPreTrainedModel", + ] + ) + _import_structure["models.resnet"].extend( + [ + "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFResNetForImageClassification", + "TFResNetModel", + "TFResNetPreTrainedModel", + ] + ) + _import_structure["models.roberta"].extend( + [ + "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRobertaForCausalLM", + "TFRobertaForMaskedLM", + "TFRobertaForMultipleChoice", + "TFRobertaForQuestionAnswering", + "TFRobertaForSequenceClassification", + "TFRobertaForTokenClassification", + "TFRobertaMainLayer", + "TFRobertaModel", + "TFRobertaPreTrainedModel", + ] + ) + _import_structure["models.roberta_prelayernorm"].extend( + [ + "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRobertaPreLayerNormForCausalLM", + "TFRobertaPreLayerNormForMaskedLM", + "TFRobertaPreLayerNormForMultipleChoice", + "TFRobertaPreLayerNormForQuestionAnswering", + "TFRobertaPreLayerNormForSequenceClassification", + "TFRobertaPreLayerNormForTokenClassification", + "TFRobertaPreLayerNormMainLayer", + "TFRobertaPreLayerNormModel", + "TFRobertaPreLayerNormPreTrainedModel", + ] + ) + _import_structure["models.roformer"].extend( + [ + "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRoFormerForCausalLM", + "TFRoFormerForMaskedLM", + "TFRoFormerForMultipleChoice", + "TFRoFormerForQuestionAnswering", + "TFRoFormerForSequenceClassification", + "TFRoFormerForTokenClassification", + "TFRoFormerLayer", + "TFRoFormerModel", + "TFRoFormerPreTrainedModel", + ] + ) + _import_structure["models.sam"].extend( + [ + "TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFSamModel", + "TFSamPreTrainedModel", + ] + ) + _import_structure["models.segformer"].extend( + [ + "TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFSegformerDecodeHead", + "TFSegformerForImageClassification", + "TFSegformerForSemanticSegmentation", + "TFSegformerModel", + "TFSegformerPreTrainedModel", + ] + ) + _import_structure["models.speech_to_text"].extend( + [ + "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFSpeech2TextForConditionalGeneration", + "TFSpeech2TextModel", + "TFSpeech2TextPreTrainedModel", + ] + ) + _import_structure["models.swin"].extend( + [ + "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFSwinForImageClassification", + "TFSwinForMaskedImageModeling", + "TFSwinModel", + "TFSwinPreTrainedModel", + ] + ) + _import_structure["models.t5"].extend( + [ + "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFT5EncoderModel", + "TFT5ForConditionalGeneration", + "TFT5Model", + "TFT5PreTrainedModel", + ] + ) + _import_structure["models.tapas"].extend( + [ + "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFTapasForMaskedLM", + "TFTapasForQuestionAnswering", + "TFTapasForSequenceClassification", + "TFTapasModel", + "TFTapasPreTrainedModel", + ] + ) + _import_structure["models.vision_encoder_decoder"].extend(["TFVisionEncoderDecoderModel"]) + _import_structure["models.vision_text_dual_encoder"].extend(["TFVisionTextDualEncoderModel"]) + _import_structure["models.vit"].extend( + [ + "TFViTForImageClassification", + "TFViTModel", + "TFViTPreTrainedModel", + ] + ) + _import_structure["models.vit_mae"].extend( + [ + "TFViTMAEForPreTraining", + "TFViTMAEModel", + "TFViTMAEPreTrainedModel", + ] + ) + _import_structure["models.wav2vec2"].extend( + [ + "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFWav2Vec2ForCTC", + "TFWav2Vec2ForSequenceClassification", + "TFWav2Vec2Model", + "TFWav2Vec2PreTrainedModel", + ] + ) + _import_structure["models.whisper"].extend( + [ + "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFWhisperForConditionalGeneration", + "TFWhisperModel", + "TFWhisperPreTrainedModel", + ] + ) + _import_structure["models.xglm"].extend( + [ + "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFXGLMForCausalLM", + "TFXGLMModel", + "TFXGLMPreTrainedModel", + ] + ) + _import_structure["models.xlm"].extend( + [ + "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFXLMForMultipleChoice", + "TFXLMForQuestionAnsweringSimple", + "TFXLMForSequenceClassification", + "TFXLMForTokenClassification", + "TFXLMMainLayer", + "TFXLMModel", + "TFXLMPreTrainedModel", + "TFXLMWithLMHeadModel", + ] + ) + _import_structure["models.xlm_roberta"].extend( + [ + "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFXLMRobertaForCausalLM", + "TFXLMRobertaForMaskedLM", + "TFXLMRobertaForMultipleChoice", + "TFXLMRobertaForQuestionAnswering", + "TFXLMRobertaForSequenceClassification", + "TFXLMRobertaForTokenClassification", + "TFXLMRobertaModel", + "TFXLMRobertaPreTrainedModel", + ] + ) + _import_structure["models.xlnet"].extend( + [ + "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFXLNetForMultipleChoice", + "TFXLNetForQuestionAnsweringSimple", + "TFXLNetForSequenceClassification", + "TFXLNetForTokenClassification", + "TFXLNetLMHeadModel", + "TFXLNetMainLayer", + "TFXLNetModel", + "TFXLNetPreTrainedModel", + ] + ) + _import_structure["optimization_tf"] = [ + "AdamWeightDecay", + "GradientAccumulator", + "WarmUp", + "create_optimizer", + ] + _import_structure["tf_utils"] = [] + + +try: + if not ( + is_librosa_available() + and is_essentia_available() + and is_scipy_available() + and is_torch_available() + and is_pretty_midi_available() + ): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import ( + dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects, + ) + + _import_structure["utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects"] = [ + name + for name in dir(dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects) + if not name.startswith("_") + ] +else: + _import_structure["models.pop2piano"].append("Pop2PianoFeatureExtractor") + _import_structure["models.pop2piano"].append("Pop2PianoTokenizer") + _import_structure["models.pop2piano"].append("Pop2PianoProcessor") + +try: + if not is_torchaudio_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import ( + dummy_torchaudio_objects, + ) + + _import_structure["utils.dummy_torchaudio_objects"] = [ + name for name in dir(dummy_torchaudio_objects) if not name.startswith("_") + ] +else: + _import_structure["models.musicgen_melody"].append("MusicgenMelodyFeatureExtractor") + _import_structure["models.musicgen_melody"].append("MusicgenMelodyProcessor") + + +# FLAX-backed objects +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_flax_objects + + _import_structure["utils.dummy_flax_objects"] = [ + name for name in dir(dummy_flax_objects) if not name.startswith("_") + ] +else: + _import_structure["generation"].extend( + [ + "FlaxForcedBOSTokenLogitsProcessor", + "FlaxForcedEOSTokenLogitsProcessor", + "FlaxForceTokensLogitsProcessor", + "FlaxGenerationMixin", + "FlaxLogitsProcessor", + "FlaxLogitsProcessorList", + "FlaxLogitsWarper", + "FlaxMinLengthLogitsProcessor", + "FlaxTemperatureLogitsWarper", + "FlaxSuppressTokensAtBeginLogitsProcessor", + "FlaxSuppressTokensLogitsProcessor", + "FlaxTopKLogitsWarper", + "FlaxTopPLogitsWarper", + "FlaxWhisperTimeStampLogitsProcessor", + ] + ) + _import_structure["generation_flax_utils"] = [] + _import_structure["modeling_flax_outputs"] = [] + _import_structure["modeling_flax_utils"] = ["FlaxPreTrainedModel"] + _import_structure["models.albert"].extend( + [ + "FlaxAlbertForMaskedLM", + "FlaxAlbertForMultipleChoice", + "FlaxAlbertForPreTraining", + "FlaxAlbertForQuestionAnswering", + "FlaxAlbertForSequenceClassification", + "FlaxAlbertForTokenClassification", + "FlaxAlbertModel", + "FlaxAlbertPreTrainedModel", + ] + ) + _import_structure["models.auto"].extend( + [ + "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING", + "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_MASKED_LM_MAPPING", + "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "FLAX_MODEL_FOR_PRETRAINING_MAPPING", + "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING", + "FLAX_MODEL_MAPPING", + "FlaxAutoModel", + "FlaxAutoModelForCausalLM", + "FlaxAutoModelForImageClassification", + "FlaxAutoModelForMaskedLM", + "FlaxAutoModelForMultipleChoice", + "FlaxAutoModelForNextSentencePrediction", + "FlaxAutoModelForPreTraining", + "FlaxAutoModelForQuestionAnswering", + "FlaxAutoModelForSeq2SeqLM", + "FlaxAutoModelForSequenceClassification", + "FlaxAutoModelForSpeechSeq2Seq", + "FlaxAutoModelForTokenClassification", + "FlaxAutoModelForVision2Seq", + ] + ) + + # Flax models structure + + _import_structure["models.bart"].extend( + [ + "FlaxBartDecoderPreTrainedModel", + "FlaxBartForCausalLM", + "FlaxBartForConditionalGeneration", + "FlaxBartForQuestionAnswering", + "FlaxBartForSequenceClassification", + "FlaxBartModel", + "FlaxBartPreTrainedModel", + ] + ) + _import_structure["models.beit"].extend( + [ + "FlaxBeitForImageClassification", + "FlaxBeitForMaskedImageModeling", + "FlaxBeitModel", + "FlaxBeitPreTrainedModel", + ] + ) + + _import_structure["models.bert"].extend( + [ + "FlaxBertForCausalLM", + "FlaxBertForMaskedLM", + "FlaxBertForMultipleChoice", + "FlaxBertForNextSentencePrediction", + "FlaxBertForPreTraining", + "FlaxBertForQuestionAnswering", + "FlaxBertForSequenceClassification", + "FlaxBertForTokenClassification", + "FlaxBertModel", + "FlaxBertPreTrainedModel", + ] + ) + _import_structure["models.big_bird"].extend( + [ + "FlaxBigBirdForCausalLM", + "FlaxBigBirdForMaskedLM", + "FlaxBigBirdForMultipleChoice", + "FlaxBigBirdForPreTraining", + "FlaxBigBirdForQuestionAnswering", + "FlaxBigBirdForSequenceClassification", + "FlaxBigBirdForTokenClassification", + "FlaxBigBirdModel", + "FlaxBigBirdPreTrainedModel", + ] + ) + _import_structure["models.blenderbot"].extend( + [ + "FlaxBlenderbotForConditionalGeneration", + "FlaxBlenderbotModel", + "FlaxBlenderbotPreTrainedModel", + ] + ) + _import_structure["models.blenderbot_small"].extend( + [ + "FlaxBlenderbotSmallForConditionalGeneration", + "FlaxBlenderbotSmallModel", + "FlaxBlenderbotSmallPreTrainedModel", + ] + ) + _import_structure["models.bloom"].extend( + [ + "FlaxBloomForCausalLM", + "FlaxBloomModel", + "FlaxBloomPreTrainedModel", + ] + ) + _import_structure["models.clip"].extend( + [ + "FlaxCLIPModel", + "FlaxCLIPPreTrainedModel", + "FlaxCLIPTextModel", + "FlaxCLIPTextPreTrainedModel", + "FlaxCLIPTextModelWithProjection", + "FlaxCLIPVisionModel", + "FlaxCLIPVisionPreTrainedModel", + ] + ) + _import_structure["models.distilbert"].extend( + [ + "FlaxDistilBertForMaskedLM", + "FlaxDistilBertForMultipleChoice", + "FlaxDistilBertForQuestionAnswering", + "FlaxDistilBertForSequenceClassification", + "FlaxDistilBertForTokenClassification", + "FlaxDistilBertModel", + "FlaxDistilBertPreTrainedModel", + ] + ) + _import_structure["models.electra"].extend( + [ + "FlaxElectraForCausalLM", + "FlaxElectraForMaskedLM", + "FlaxElectraForMultipleChoice", + "FlaxElectraForPreTraining", + "FlaxElectraForQuestionAnswering", + "FlaxElectraForSequenceClassification", + "FlaxElectraForTokenClassification", + "FlaxElectraModel", + "FlaxElectraPreTrainedModel", + ] + ) + _import_structure["models.encoder_decoder"].append("FlaxEncoderDecoderModel") + _import_structure["models.gpt2"].extend(["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"]) + _import_structure["models.gpt_neo"].extend( + ["FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel"] + ) + _import_structure["models.gptj"].extend(["FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTJPreTrainedModel"]) + _import_structure["models.llama"].extend(["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"]) + _import_structure["models.gemma"].extend(["FlaxGemmaForCausalLM", "FlaxGemmaModel", "FlaxGemmaPreTrainedModel"]) + _import_structure["models.longt5"].extend( + [ + "FlaxLongT5ForConditionalGeneration", + "FlaxLongT5Model", + "FlaxLongT5PreTrainedModel", + ] + ) + _import_structure["models.marian"].extend( + [ + "FlaxMarianModel", + "FlaxMarianMTModel", + "FlaxMarianPreTrainedModel", + ] + ) + _import_structure["models.mbart"].extend( + [ + "FlaxMBartForConditionalGeneration", + "FlaxMBartForQuestionAnswering", + "FlaxMBartForSequenceClassification", + "FlaxMBartModel", + "FlaxMBartPreTrainedModel", + ] + ) + _import_structure["models.mistral"].extend( + [ + "FlaxMistralForCausalLM", + "FlaxMistralModel", + "FlaxMistralPreTrainedModel", + ] + ) + _import_structure["models.mt5"].extend(["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]) + _import_structure["models.opt"].extend( + [ + "FlaxOPTForCausalLM", + "FlaxOPTModel", + "FlaxOPTPreTrainedModel", + ] + ) + _import_structure["models.pegasus"].extend( + [ + "FlaxPegasusForConditionalGeneration", + "FlaxPegasusModel", + "FlaxPegasusPreTrainedModel", + ] + ) + _import_structure["models.regnet"].extend( + [ + "FlaxRegNetForImageClassification", + "FlaxRegNetModel", + "FlaxRegNetPreTrainedModel", + ] + ) + _import_structure["models.resnet"].extend( + [ + "FlaxResNetForImageClassification", + "FlaxResNetModel", + "FlaxResNetPreTrainedModel", + ] + ) + _import_structure["models.roberta"].extend( + [ + "FlaxRobertaForCausalLM", + "FlaxRobertaForMaskedLM", + "FlaxRobertaForMultipleChoice", + "FlaxRobertaForQuestionAnswering", + "FlaxRobertaForSequenceClassification", + "FlaxRobertaForTokenClassification", + "FlaxRobertaModel", + "FlaxRobertaPreTrainedModel", + ] + ) + _import_structure["models.roberta_prelayernorm"].extend( + [ + "FlaxRobertaPreLayerNormForCausalLM", + "FlaxRobertaPreLayerNormForMaskedLM", + "FlaxRobertaPreLayerNormForMultipleChoice", + "FlaxRobertaPreLayerNormForQuestionAnswering", + "FlaxRobertaPreLayerNormForSequenceClassification", + "FlaxRobertaPreLayerNormForTokenClassification", + "FlaxRobertaPreLayerNormModel", + "FlaxRobertaPreLayerNormPreTrainedModel", + ] + ) + _import_structure["models.roformer"].extend( + [ + "FlaxRoFormerForMaskedLM", + "FlaxRoFormerForMultipleChoice", + "FlaxRoFormerForQuestionAnswering", + "FlaxRoFormerForSequenceClassification", + "FlaxRoFormerForTokenClassification", + "FlaxRoFormerModel", + "FlaxRoFormerPreTrainedModel", + ] + ) + _import_structure["models.speech_encoder_decoder"].append("FlaxSpeechEncoderDecoderModel") + _import_structure["models.t5"].extend( + [ + "FlaxT5EncoderModel", + "FlaxT5ForConditionalGeneration", + "FlaxT5Model", + "FlaxT5PreTrainedModel", + ] + ) + _import_structure["models.vision_encoder_decoder"].append("FlaxVisionEncoderDecoderModel") + _import_structure["models.vision_text_dual_encoder"].extend(["FlaxVisionTextDualEncoderModel"]) + _import_structure["models.vit"].extend(["FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel"]) + _import_structure["models.wav2vec2"].extend( + [ + "FlaxWav2Vec2ForCTC", + "FlaxWav2Vec2ForPreTraining", + "FlaxWav2Vec2Model", + "FlaxWav2Vec2PreTrainedModel", + ] + ) + _import_structure["models.whisper"].extend( + [ + "FlaxWhisperForConditionalGeneration", + "FlaxWhisperModel", + "FlaxWhisperPreTrainedModel", + "FlaxWhisperForAudioClassification", + ] + ) + _import_structure["models.xglm"].extend( + [ + "FlaxXGLMForCausalLM", + "FlaxXGLMModel", + "FlaxXGLMPreTrainedModel", + ] + ) + _import_structure["models.xlm_roberta"].extend( + [ + "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "FlaxXLMRobertaForMaskedLM", + "FlaxXLMRobertaForMultipleChoice", + "FlaxXLMRobertaForQuestionAnswering", + "FlaxXLMRobertaForSequenceClassification", + "FlaxXLMRobertaForTokenClassification", + "FlaxXLMRobertaModel", + "FlaxXLMRobertaForCausalLM", + "FlaxXLMRobertaPreTrainedModel", + ] + ) + + +# Direct imports for type-checking +if TYPE_CHECKING: + # Configuration + from .configuration_utils import PretrainedConfig + + # Data + from .data import ( + DataProcessor, + InputExample, + InputFeatures, + SingleSentenceClassificationProcessor, + SquadExample, + SquadFeatures, + SquadV1Processor, + SquadV2Processor, + glue_compute_metrics, + glue_convert_examples_to_features, + glue_output_modes, + glue_processors, + glue_tasks_num_labels, + squad_convert_examples_to_features, + xnli_compute_metrics, + xnli_output_modes, + xnli_processors, + xnli_tasks_num_labels, + ) + from .data.data_collator import ( + DataCollator, + DataCollatorForLanguageModeling, + DataCollatorForPermutationLanguageModeling, + DataCollatorForSeq2Seq, + DataCollatorForSOP, + DataCollatorForTokenClassification, + DataCollatorForWholeWordMask, + DataCollatorWithPadding, + DefaultDataCollator, + default_data_collator, + ) + from .feature_extraction_sequence_utils import SequenceFeatureExtractor + + # Feature Extractor + from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin + + # Generation + from .generation import GenerationConfig, TextIteratorStreamer, TextStreamer + from .hf_argparser import HfArgumentParser + + # Integrations + from .integrations import ( + is_clearml_available, + is_comet_available, + is_dvclive_available, + is_neptune_available, + is_optuna_available, + is_ray_available, + is_ray_tune_available, + is_sigopt_available, + is_tensorboard_available, + is_wandb_available, + ) + + # Model Cards + from .modelcard import ModelCard + + # TF 2.0 <=> PyTorch conversion utilities + from .modeling_tf_pytorch_utils import ( + convert_tf_weight_name_to_pt_weight_name, + load_pytorch_checkpoint_in_tf2_model, + load_pytorch_model_in_tf2_model, + load_pytorch_weights_in_tf2_model, + load_tf2_checkpoint_in_pytorch_model, + load_tf2_model_in_pytorch_model, + load_tf2_weights_in_pytorch_model, + ) + from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig + from .models.align import ( + ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP, + AlignConfig, + AlignProcessor, + AlignTextConfig, + AlignVisionConfig, + ) + from .models.altclip import ( + ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + AltCLIPConfig, + AltCLIPProcessor, + AltCLIPTextConfig, + AltCLIPVisionConfig, + ) + from .models.audio_spectrogram_transformer import ( + AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + ASTConfig, + ASTFeatureExtractor, + ) + from .models.auto import ( + ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, + CONFIG_MAPPING, + FEATURE_EXTRACTOR_MAPPING, + IMAGE_PROCESSOR_MAPPING, + MODEL_NAMES_MAPPING, + PROCESSOR_MAPPING, + TOKENIZER_MAPPING, + AutoConfig, + AutoFeatureExtractor, + AutoImageProcessor, + AutoProcessor, + AutoTokenizer, + ) + from .models.autoformer import ( + AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + AutoformerConfig, + ) + from .models.bark import ( + BarkCoarseConfig, + BarkConfig, + BarkFineConfig, + BarkProcessor, + BarkSemanticConfig, + ) + from .models.bart import BartConfig, BartTokenizer + from .models.beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig + from .models.bert import ( + BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + BasicTokenizer, + BertConfig, + BertTokenizer, + WordpieceTokenizer, + ) + from .models.bert_generation import BertGenerationConfig + from .models.bert_japanese import ( + BertJapaneseTokenizer, + CharacterTokenizer, + MecabTokenizer, + ) + from .models.bertweet import BertweetTokenizer + from .models.big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig + from .models.bigbird_pegasus import ( + BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, + BigBirdPegasusConfig, + ) + from .models.biogpt import ( + BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, + BioGptConfig, + BioGptTokenizer, + ) + from .models.bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig + from .models.blenderbot import ( + BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, + BlenderbotConfig, + BlenderbotTokenizer, + ) + from .models.blenderbot_small import ( + BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, + BlenderbotSmallConfig, + BlenderbotSmallTokenizer, + ) + from .models.blip import ( + BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + BlipConfig, + BlipProcessor, + BlipTextConfig, + BlipVisionConfig, + ) + from .models.blip_2 import ( + BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, + Blip2Config, + Blip2Processor, + Blip2QFormerConfig, + Blip2VisionConfig, + ) + from .models.bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig + from .models.bridgetower import ( + BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, + BridgeTowerConfig, + BridgeTowerProcessor, + BridgeTowerTextConfig, + BridgeTowerVisionConfig, + ) + from .models.bros import ( + BROS_PRETRAINED_CONFIG_ARCHIVE_MAP, + BrosConfig, + BrosProcessor, + ) + from .models.byt5 import ByT5Tokenizer + from .models.camembert import ( + CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + CamembertConfig, + ) + from .models.canine import ( + CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, + CanineConfig, + CanineTokenizer, + ) + from .models.chinese_clip import ( + CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + ChineseCLIPConfig, + ChineseCLIPProcessor, + ChineseCLIPTextConfig, + ChineseCLIPVisionConfig, + ) + from .models.clap import ( + CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, + ClapAudioConfig, + ClapConfig, + ClapProcessor, + ClapTextConfig, + ) + from .models.clip import ( + CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + CLIPConfig, + CLIPProcessor, + CLIPTextConfig, + CLIPTokenizer, + CLIPVisionConfig, + ) + from .models.clipseg import ( + CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, + CLIPSegConfig, + CLIPSegProcessor, + CLIPSegTextConfig, + CLIPSegVisionConfig, + ) + from .models.clvp import ( + CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + ClvpConfig, + ClvpDecoderConfig, + ClvpEncoderConfig, + ClvpFeatureExtractor, + ClvpProcessor, + ClvpTokenizer, + ) + from .models.codegen import ( + CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, + CodeGenConfig, + CodeGenTokenizer, + ) + from .models.cohere import COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP, CohereConfig + from .models.conditional_detr import ( + CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, + ConditionalDetrConfig, + ) + from .models.convbert import ( + CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + ConvBertConfig, + ConvBertTokenizer, + ) + from .models.convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig + from .models.convnextv2 import ( + CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP, + ConvNextV2Config, + ) + from .models.cpmant import ( + CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, + CpmAntConfig, + CpmAntTokenizer, + ) + from .models.ctrl import ( + CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, + CTRLConfig, + CTRLTokenizer, + ) + from .models.cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig + from .models.data2vec import ( + DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, + DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, + Data2VecAudioConfig, + Data2VecTextConfig, + Data2VecVisionConfig, + ) + from .models.deberta import ( + DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, + DebertaConfig, + DebertaTokenizer, + ) + from .models.deberta_v2 import ( + DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, + DebertaV2Config, + ) + from .models.decision_transformer import ( + DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + DecisionTransformerConfig, + ) + from .models.deformable_detr import ( + DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, + DeformableDetrConfig, + ) + from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig + from .models.deprecated.mctct import ( + MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, + MCTCTConfig, + MCTCTFeatureExtractor, + MCTCTProcessor, + ) + from .models.deprecated.mmbt import MMBTConfig + from .models.deprecated.open_llama import ( + OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, + OpenLlamaConfig, + ) + from .models.deprecated.retribert import ( + RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + RetriBertConfig, + RetriBertTokenizer, + ) + from .models.deprecated.tapex import TapexTokenizer + from .models.deprecated.trajectory_transformer import ( + TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + TrajectoryTransformerConfig, + ) + from .models.deprecated.transfo_xl import ( + TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, + TransfoXLConfig, + TransfoXLCorpus, + TransfoXLTokenizer, + ) + from .models.deprecated.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig + from .models.depth_anything import DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP, DepthAnythingConfig + from .models.deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig + from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig + from .models.dinat import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP, DinatConfig + from .models.dinov2 import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Dinov2Config + from .models.distilbert import ( + DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + DistilBertConfig, + DistilBertTokenizer, + ) + from .models.donut import ( + DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, + DonutProcessor, + DonutSwinConfig, + ) + from .models.dpr import ( + DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, + DPRConfig, + DPRContextEncoderTokenizer, + DPRQuestionEncoderTokenizer, + DPRReaderOutput, + DPRReaderTokenizer, + ) + from .models.dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig + from .models.efficientformer import ( + EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + EfficientFormerConfig, + ) + from .models.efficientnet import ( + EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + EfficientNetConfig, + ) + from .models.electra import ( + ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, + ElectraConfig, + ElectraTokenizer, + ) + from .models.encodec import ( + ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, + EncodecConfig, + EncodecFeatureExtractor, + ) + from .models.encoder_decoder import EncoderDecoderConfig + from .models.ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig + from .models.ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig + from .models.esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig, EsmTokenizer + from .models.falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig + from .models.fastspeech2_conformer import ( + FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, + FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, + FastSpeech2ConformerConfig, + FastSpeech2ConformerHifiGanConfig, + FastSpeech2ConformerTokenizer, + FastSpeech2ConformerWithHifiGanConfig, + ) + from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer + from .models.flava import ( + FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, + FlavaConfig, + FlavaImageCodebookConfig, + FlavaImageConfig, + FlavaMultimodalConfig, + FlavaTextConfig, + ) + from .models.fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig + from .models.focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig + from .models.fsmt import ( + FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, + FSMTConfig, + FSMTTokenizer, + ) + from .models.funnel import ( + FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, + FunnelConfig, + FunnelTokenizer, + ) + from .models.fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig + from .models.gemma import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP, GemmaConfig + from .models.git import ( + GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, + GitConfig, + GitProcessor, + GitVisionConfig, + ) + from .models.glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig + from .models.gpt2 import ( + GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, + GPT2Config, + GPT2Tokenizer, + ) + from .models.gpt_bigcode import ( + GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, + GPTBigCodeConfig, + ) + from .models.gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig + from .models.gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig + from .models.gpt_neox_japanese import ( + GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, + GPTNeoXJapaneseConfig, + ) + from .models.gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig + from .models.gptsan_japanese import ( + GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, + GPTSanJapaneseConfig, + GPTSanJapaneseTokenizer, + ) + from .models.graphormer import ( + GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + GraphormerConfig, + ) + from .models.groupvit import ( + GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, + GroupViTConfig, + GroupViTTextConfig, + GroupViTVisionConfig, + ) + from .models.herbert import HerbertTokenizer + from .models.hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig + from .models.ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig + from .models.idefics import ( + IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP, + IdeficsConfig, + ) + from .models.imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig + from .models.informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig + from .models.instructblip import ( + INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + InstructBlipConfig, + InstructBlipProcessor, + InstructBlipQFormerConfig, + InstructBlipVisionConfig, + ) + from .models.jukebox import ( + JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, + JukeboxConfig, + JukeboxPriorConfig, + JukeboxTokenizer, + JukeboxVQVAEConfig, + ) + from .models.kosmos2 import ( + KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, + Kosmos2Config, + Kosmos2Processor, + ) + from .models.layoutlm import ( + LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, + LayoutLMConfig, + LayoutLMTokenizer, + ) + from .models.layoutlmv2 import ( + LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, + LayoutLMv2Config, + LayoutLMv2FeatureExtractor, + LayoutLMv2ImageProcessor, + LayoutLMv2Processor, + LayoutLMv2Tokenizer, + ) + from .models.layoutlmv3 import ( + LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, + LayoutLMv3Config, + LayoutLMv3FeatureExtractor, + LayoutLMv3ImageProcessor, + LayoutLMv3Processor, + LayoutLMv3Tokenizer, + ) + from .models.layoutxlm import LayoutXLMProcessor + from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer + from .models.levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig + from .models.lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig + from .models.llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig + from .models.llava import ( + LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, + LlavaConfig, + LlavaProcessor, + ) + from .models.llava_next import ( + LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, + LlavaNextConfig, + LlavaNextProcessor, + ) + from .models.longformer import ( + LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + LongformerConfig, + LongformerTokenizer, + ) + from .models.longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config + from .models.luke import ( + LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, + LukeConfig, + LukeTokenizer, + ) + from .models.lxmert import ( + LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + LxmertConfig, + LxmertTokenizer, + ) + from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config + from .models.mamba import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP, MambaConfig + from .models.marian import MarianConfig + from .models.markuplm import ( + MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, + MarkupLMConfig, + MarkupLMFeatureExtractor, + MarkupLMProcessor, + MarkupLMTokenizer, + ) + from .models.mask2former import ( + MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + Mask2FormerConfig, + ) + from .models.maskformer import ( + MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + MaskFormerConfig, + MaskFormerSwinConfig, + ) + from .models.mbart import MBartConfig + from .models.mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig + from .models.megatron_bert import ( + MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + MegatronBertConfig, + ) + from .models.mgp_str import ( + MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, + MgpstrConfig, + MgpstrProcessor, + MgpstrTokenizer, + ) + from .models.mistral import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MistralConfig + from .models.mixtral import MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MixtralConfig + from .models.mobilebert import ( + MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + MobileBertConfig, + MobileBertTokenizer, + ) + from .models.mobilenet_v1 import ( + MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP, + MobileNetV1Config, + ) + from .models.mobilenet_v2 import ( + MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, + MobileNetV2Config, + ) + from .models.mobilevit import ( + MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, + MobileViTConfig, + ) + from .models.mobilevitv2 import ( + MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP, + MobileViTV2Config, + ) + from .models.mpnet import ( + MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + MPNetConfig, + MPNetTokenizer, + ) + from .models.mpt import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP, MptConfig + from .models.mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig + from .models.mt5 import MT5Config + from .models.musicgen import ( + MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, + MusicgenConfig, + MusicgenDecoderConfig, + ) + from .models.musicgen_melody import ( + MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, + MusicgenMelodyConfig, + MusicgenMelodyDecoderConfig, + ) + from .models.mvp import MvpConfig, MvpTokenizer + from .models.nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig + from .models.nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig + from .models.nllb_moe import NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig + from .models.nougat import NougatProcessor + from .models.nystromformer import ( + NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + NystromformerConfig, + ) + from .models.oneformer import ( + ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + OneFormerConfig, + OneFormerProcessor, + ) + from .models.openai import ( + OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, + OpenAIGPTConfig, + OpenAIGPTTokenizer, + ) + from .models.opt import OPTConfig + from .models.owlv2 import ( + OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP, + Owlv2Config, + Owlv2Processor, + Owlv2TextConfig, + Owlv2VisionConfig, + ) + from .models.owlvit import ( + OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, + OwlViTConfig, + OwlViTProcessor, + OwlViTTextConfig, + OwlViTVisionConfig, + ) + from .models.patchtsmixer import ( + PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP, + PatchTSMixerConfig, + ) + from .models.patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig + from .models.pegasus import ( + PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, + PegasusConfig, + PegasusTokenizer, + ) + from .models.pegasus_x import ( + PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, + PegasusXConfig, + ) + from .models.perceiver import ( + PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, + PerceiverConfig, + PerceiverTokenizer, + ) + from .models.persimmon import ( + PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP, + PersimmonConfig, + ) + from .models.phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig + from .models.phobert import PhobertTokenizer + from .models.pix2struct import ( + PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, + Pix2StructConfig, + Pix2StructProcessor, + Pix2StructTextConfig, + Pix2StructVisionConfig, + ) + from .models.plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig + from .models.poolformer import ( + POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + PoolFormerConfig, + ) + from .models.pop2piano import ( + POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP, + Pop2PianoConfig, + ) + from .models.prophetnet import ( + PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + ProphetNetConfig, + ProphetNetTokenizer, + ) + from .models.pvt import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtConfig + from .models.pvt_v2 import PVT_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtV2Config + from .models.qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig + from .models.qwen2 import QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2Config, Qwen2Tokenizer + from .models.rag import RagConfig, RagRetriever, RagTokenizer + from .models.realm import ( + REALM_PRETRAINED_CONFIG_ARCHIVE_MAP, + RealmConfig, + RealmTokenizer, + ) + from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig + from .models.regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig + from .models.rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig + from .models.resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig + from .models.roberta import ( + ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, + RobertaConfig, + RobertaTokenizer, + ) + from .models.roberta_prelayernorm import ( + ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, + RobertaPreLayerNormConfig, + ) + from .models.roc_bert import ( + ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + RoCBertConfig, + RoCBertTokenizer, + ) + from .models.roformer import ( + ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + RoFormerConfig, + RoFormerTokenizer, + ) + from .models.rwkv import RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP, RwkvConfig + from .models.sam import ( + SAM_PRETRAINED_CONFIG_ARCHIVE_MAP, + SamConfig, + SamMaskDecoderConfig, + SamProcessor, + SamPromptEncoderConfig, + SamVisionConfig, + ) + from .models.seamless_m4t import ( + SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP, + SeamlessM4TConfig, + SeamlessM4TFeatureExtractor, + SeamlessM4TProcessor, + ) + from .models.seamless_m4t_v2 import ( + SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, + SeamlessM4Tv2Config, + ) + from .models.segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig + from .models.seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig + from .models.sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig + from .models.sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig + from .models.siglip import ( + SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + SiglipConfig, + SiglipProcessor, + SiglipTextConfig, + SiglipVisionConfig, + ) + from .models.speech_encoder_decoder import SpeechEncoderDecoderConfig + from .models.speech_to_text import ( + SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, + Speech2TextConfig, + Speech2TextFeatureExtractor, + Speech2TextProcessor, + ) + from .models.speech_to_text_2 import ( + SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP, + Speech2Text2Config, + Speech2Text2Processor, + Speech2Text2Tokenizer, + ) + from .models.speecht5 import ( + SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, + SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, + SpeechT5Config, + SpeechT5FeatureExtractor, + SpeechT5HifiGanConfig, + SpeechT5Processor, + ) + from .models.splinter import ( + SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, + SplinterConfig, + SplinterTokenizer, + ) + from .models.squeezebert import ( + SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + SqueezeBertConfig, + SqueezeBertTokenizer, + ) + from .models.stablelm import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP, StableLmConfig + from .models.starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config + from .models.superpoint import SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP, SuperPointConfig + from .models.swiftformer import ( + SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + SwiftFormerConfig, + ) + from .models.swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig + from .models.swin2sr import SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP, Swin2SRConfig + from .models.swinv2 import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Swinv2Config + from .models.switch_transformers import ( + SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP, + SwitchTransformersConfig, + ) + from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config + from .models.table_transformer import ( + TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + TableTransformerConfig, + ) + from .models.tapas import ( + TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, + TapasConfig, + TapasTokenizer, + ) + from .models.time_series_transformer import ( + TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + TimeSeriesTransformerConfig, + ) + from .models.timesformer import ( + TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + TimesformerConfig, + ) + from .models.timm_backbone import TimmBackboneConfig + from .models.trocr import ( + TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, + TrOCRConfig, + TrOCRProcessor, + ) + from .models.tvlt import ( + TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, + TvltConfig, + TvltFeatureExtractor, + TvltProcessor, + ) + from .models.tvp import ( + TVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + TvpConfig, + TvpProcessor, + ) + from .models.udop import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP, UdopConfig, UdopProcessor + from .models.umt5 import UMT5Config + from .models.unispeech import ( + UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, + UniSpeechConfig, + ) + from .models.unispeech_sat import ( + UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, + UniSpeechSatConfig, + ) + from .models.univnet import ( + UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + UnivNetConfig, + UnivNetFeatureExtractor, + ) + from .models.upernet import UperNetConfig + from .models.videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig + from .models.vilt import ( + VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, + ViltConfig, + ViltFeatureExtractor, + ViltImageProcessor, + ViltProcessor, + ) + from .models.vipllava import ( + VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, + VipLlavaConfig, + ) + from .models.vision_encoder_decoder import VisionEncoderDecoderConfig + from .models.vision_text_dual_encoder import ( + VisionTextDualEncoderConfig, + VisionTextDualEncoderProcessor, + ) + from .models.visual_bert import ( + VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + VisualBertConfig, + ) + from .models.vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig + from .models.vit_hybrid import ( + VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP, + ViTHybridConfig, + ) + from .models.vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig + from .models.vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig + from .models.vitdet import VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP, VitDetConfig + from .models.vitmatte import VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP, VitMatteConfig + from .models.vits import ( + VITS_PRETRAINED_CONFIG_ARCHIVE_MAP, + VitsConfig, + VitsTokenizer, + ) + from .models.vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig + from .models.wav2vec2 import ( + WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, + Wav2Vec2Config, + Wav2Vec2CTCTokenizer, + Wav2Vec2FeatureExtractor, + Wav2Vec2Processor, + Wav2Vec2Tokenizer, + ) + from .models.wav2vec2_bert import ( + WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + Wav2Vec2BertConfig, + Wav2Vec2BertProcessor, + ) + from .models.wav2vec2_conformer import ( + WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + Wav2Vec2ConformerConfig, + ) + from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer + from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM + from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig + from .models.whisper import ( + WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, + WhisperConfig, + WhisperFeatureExtractor, + WhisperProcessor, + WhisperTokenizer, + ) + from .models.x_clip import ( + XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + XCLIPConfig, + XCLIPProcessor, + XCLIPTextConfig, + XCLIPVisionConfig, + ) + from .models.xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig + from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer + from .models.xlm_prophetnet import ( + XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + XLMProphetNetConfig, + ) + from .models.xlm_roberta import ( + XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, + XLMRobertaConfig, + ) + from .models.xlm_roberta_xl import ( + XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, + XLMRobertaXLConfig, + ) + from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig + from .models.xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig + from .models.yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig + from .models.yoso import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP, YosoConfig + + # Pipelines + from .pipelines import ( + AudioClassificationPipeline, + AutomaticSpeechRecognitionPipeline, + Conversation, + ConversationalPipeline, + CsvPipelineDataFormat, + DepthEstimationPipeline, + DocumentQuestionAnsweringPipeline, + FeatureExtractionPipeline, + FillMaskPipeline, + ImageClassificationPipeline, + ImageFeatureExtractionPipeline, + ImageSegmentationPipeline, + ImageToImagePipeline, + ImageToTextPipeline, + JsonPipelineDataFormat, + MaskGenerationPipeline, + NerPipeline, + ObjectDetectionPipeline, + PipedPipelineDataFormat, + Pipeline, + PipelineDataFormat, + QuestionAnsweringPipeline, + SummarizationPipeline, + TableQuestionAnsweringPipeline, + Text2TextGenerationPipeline, + TextClassificationPipeline, + TextGenerationPipeline, + TextToAudioPipeline, + TokenClassificationPipeline, + TranslationPipeline, + VideoClassificationPipeline, + VisualQuestionAnsweringPipeline, + ZeroShotAudioClassificationPipeline, + ZeroShotClassificationPipeline, + ZeroShotImageClassificationPipeline, + ZeroShotObjectDetectionPipeline, + pipeline, + ) + from .processing_utils import ProcessorMixin + + # Tokenization + from .tokenization_utils import PreTrainedTokenizer + from .tokenization_utils_base import ( + AddedToken, + BatchEncoding, + CharSpan, + PreTrainedTokenizerBase, + SpecialTokensMixin, + TokenSpan, + ) + + # Tools + from .tools import ( + Agent, + AzureOpenAiAgent, + HfAgent, + LocalAgent, + OpenAiAgent, + PipelineTool, + RemoteTool, + Tool, + launch_gradio_demo, + load_tool, + ) + + # Trainer + from .trainer_callback import ( + DefaultFlowCallback, + EarlyStoppingCallback, + PrinterCallback, + ProgressCallback, + TrainerCallback, + TrainerControl, + TrainerState, + ) + from .trainer_utils import ( + EvalPrediction, + IntervalStrategy, + SchedulerType, + enable_full_determinism, + set_seed, + ) + from .training_args import TrainingArguments + from .training_args_seq2seq import Seq2SeqTrainingArguments + from .training_args_tf import TFTrainingArguments + + # Files and general utilities + from .utils import ( + CONFIG_NAME, + MODEL_CARD_NAME, + PYTORCH_PRETRAINED_BERT_CACHE, + PYTORCH_TRANSFORMERS_CACHE, + SPIECE_UNDERLINE, + TF2_WEIGHTS_NAME, + TF_WEIGHTS_NAME, + TRANSFORMERS_CACHE, + WEIGHTS_NAME, + TensorType, + add_end_docstrings, + add_start_docstrings, + is_apex_available, + is_bitsandbytes_available, + is_datasets_available, + is_decord_available, + is_faiss_available, + is_flax_available, + is_keras_nlp_available, + is_phonemizer_available, + is_psutil_available, + is_py3nvml_available, + is_pyctcdecode_available, + is_sacremoses_available, + is_safetensors_available, + is_scipy_available, + is_sentencepiece_available, + is_sklearn_available, + is_speech_available, + is_tensorflow_text_available, + is_tf_available, + is_timm_available, + is_tokenizers_available, + is_torch_available, + is_torch_neuroncore_available, + is_torch_npu_available, + is_torch_tpu_available, + is_torch_xla_available, + is_torch_xpu_available, + is_torchvision_available, + is_vision_available, + logging, + ) + + # bitsandbytes config + from .utils.quantization_config import AqlmConfig, AwqConfig, BitsAndBytesConfig, GPTQConfig, QuantoConfig + + try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_sentencepiece_objects import * + else: + from .models.albert import AlbertTokenizer + from .models.barthez import BarthezTokenizer + from .models.bartpho import BartphoTokenizer + from .models.bert_generation import BertGenerationTokenizer + from .models.big_bird import BigBirdTokenizer + from .models.camembert import CamembertTokenizer + from .models.code_llama import CodeLlamaTokenizer + from .models.cpm import CpmTokenizer + from .models.deberta_v2 import DebertaV2Tokenizer + from .models.ernie_m import ErnieMTokenizer + from .models.fnet import FNetTokenizer + from .models.gemma import GemmaTokenizer + from .models.gpt_sw3 import GPTSw3Tokenizer + from .models.layoutxlm import LayoutXLMTokenizer + from .models.llama import LlamaTokenizer + from .models.m2m_100 import M2M100Tokenizer + from .models.marian import MarianTokenizer + from .models.mbart import MBart50Tokenizer, MBartTokenizer + from .models.mluke import MLukeTokenizer + from .models.mt5 import MT5Tokenizer + from .models.nllb import NllbTokenizer + from .models.pegasus import PegasusTokenizer + from .models.plbart import PLBartTokenizer + from .models.reformer import ReformerTokenizer + from .models.rembert import RemBertTokenizer + from .models.seamless_m4t import SeamlessM4TTokenizer + from .models.siglip import SiglipTokenizer + from .models.speech_to_text import Speech2TextTokenizer + from .models.speecht5 import SpeechT5Tokenizer + from .models.t5 import T5Tokenizer + from .models.udop import UdopTokenizer + from .models.xglm import XGLMTokenizer + from .models.xlm_prophetnet import XLMProphetNetTokenizer + from .models.xlm_roberta import XLMRobertaTokenizer + from .models.xlnet import XLNetTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_tokenizers_objects import * + else: + # Fast tokenizers imports + from .models.albert import AlbertTokenizerFast + from .models.bart import BartTokenizerFast + from .models.barthez import BarthezTokenizerFast + from .models.bert import BertTokenizerFast + from .models.big_bird import BigBirdTokenizerFast + from .models.blenderbot import BlenderbotTokenizerFast + from .models.blenderbot_small import BlenderbotSmallTokenizerFast + from .models.bloom import BloomTokenizerFast + from .models.camembert import CamembertTokenizerFast + from .models.clip import CLIPTokenizerFast + from .models.code_llama import CodeLlamaTokenizerFast + from .models.codegen import CodeGenTokenizerFast + from .models.cohere import CohereTokenizerFast + from .models.convbert import ConvBertTokenizerFast + from .models.cpm import CpmTokenizerFast + from .models.deberta import DebertaTokenizerFast + from .models.deberta_v2 import DebertaV2TokenizerFast + from .models.deprecated.retribert import RetriBertTokenizerFast + from .models.distilbert import DistilBertTokenizerFast + from .models.dpr import ( + DPRContextEncoderTokenizerFast, + DPRQuestionEncoderTokenizerFast, + DPRReaderTokenizerFast, + ) + from .models.electra import ElectraTokenizerFast + from .models.fnet import FNetTokenizerFast + from .models.funnel import FunnelTokenizerFast + from .models.gemma import GemmaTokenizerFast + from .models.gpt2 import GPT2TokenizerFast + from .models.gpt_neox import GPTNeoXTokenizerFast + from .models.gpt_neox_japanese import GPTNeoXJapaneseTokenizer + from .models.herbert import HerbertTokenizerFast + from .models.layoutlm import LayoutLMTokenizerFast + from .models.layoutlmv2 import LayoutLMv2TokenizerFast + from .models.layoutlmv3 import LayoutLMv3TokenizerFast + from .models.layoutxlm import LayoutXLMTokenizerFast + from .models.led import LEDTokenizerFast + from .models.llama import LlamaTokenizerFast + from .models.longformer import LongformerTokenizerFast + from .models.lxmert import LxmertTokenizerFast + from .models.markuplm import MarkupLMTokenizerFast + from .models.mbart import MBartTokenizerFast + from .models.mbart50 import MBart50TokenizerFast + from .models.mobilebert import MobileBertTokenizerFast + from .models.mpnet import MPNetTokenizerFast + from .models.mt5 import MT5TokenizerFast + from .models.mvp import MvpTokenizerFast + from .models.nllb import NllbTokenizerFast + from .models.nougat import NougatTokenizerFast + from .models.openai import OpenAIGPTTokenizerFast + from .models.pegasus import PegasusTokenizerFast + from .models.qwen2 import Qwen2TokenizerFast + from .models.realm import RealmTokenizerFast + from .models.reformer import ReformerTokenizerFast + from .models.rembert import RemBertTokenizerFast + from .models.roberta import RobertaTokenizerFast + from .models.roformer import RoFormerTokenizerFast + from .models.seamless_m4t import SeamlessM4TTokenizerFast + from .models.splinter import SplinterTokenizerFast + from .models.squeezebert import SqueezeBertTokenizerFast + from .models.t5 import T5TokenizerFast + from .models.udop import UdopTokenizerFast + from .models.whisper import WhisperTokenizerFast + from .models.xglm import XGLMTokenizerFast + from .models.xlm_roberta import XLMRobertaTokenizerFast + from .models.xlnet import XLNetTokenizerFast + from .tokenization_utils_fast import PreTrainedTokenizerFast + + try: + if not (is_sentencepiece_available() and is_tokenizers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummies_sentencepiece_and_tokenizers_objects import * + else: + from .convert_slow_tokenizer import ( + SLOW_TO_FAST_CONVERTERS, + convert_slow_tokenizer, + ) + + try: + if not is_tensorflow_text_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_tensorflow_text_objects import * + else: + from .models.bert import TFBertTokenizer + + try: + if not is_keras_nlp_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_keras_nlp_objects import * + else: + from .models.gpt2 import TFGPT2Tokenizer + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_vision_objects import * + else: + from .image_processing_utils import ImageProcessingMixin + from .image_utils import ImageFeatureExtractionMixin + from .models.beit import BeitFeatureExtractor, BeitImageProcessor + from .models.bit import BitImageProcessor + from .models.blip import BlipImageProcessor + from .models.bridgetower import BridgeTowerImageProcessor + from .models.chinese_clip import ( + ChineseCLIPFeatureExtractor, + ChineseCLIPImageProcessor, + ) + from .models.clip import CLIPFeatureExtractor, CLIPImageProcessor + from .models.conditional_detr import ( + ConditionalDetrFeatureExtractor, + ConditionalDetrImageProcessor, + ) + from .models.convnext import ConvNextFeatureExtractor, ConvNextImageProcessor + from .models.deformable_detr import ( + DeformableDetrFeatureExtractor, + DeformableDetrImageProcessor, + ) + from .models.deit import DeiTFeatureExtractor, DeiTImageProcessor + from .models.deta import DetaImageProcessor + from .models.detr import DetrFeatureExtractor, DetrImageProcessor + from .models.donut import DonutFeatureExtractor, DonutImageProcessor + from .models.dpt import DPTFeatureExtractor, DPTImageProcessor + from .models.efficientformer import EfficientFormerImageProcessor + from .models.efficientnet import EfficientNetImageProcessor + from .models.flava import ( + FlavaFeatureExtractor, + FlavaImageProcessor, + FlavaProcessor, + ) + from .models.fuyu import FuyuImageProcessor, FuyuProcessor + from .models.glpn import GLPNFeatureExtractor, GLPNImageProcessor + from .models.idefics import IdeficsImageProcessor + from .models.imagegpt import ImageGPTFeatureExtractor, ImageGPTImageProcessor + from .models.layoutlmv2 import ( + LayoutLMv2FeatureExtractor, + LayoutLMv2ImageProcessor, + ) + from .models.layoutlmv3 import ( + LayoutLMv3FeatureExtractor, + LayoutLMv3ImageProcessor, + ) + from .models.levit import LevitFeatureExtractor, LevitImageProcessor + from .models.llava_next import LlavaNextImageProcessor + from .models.mask2former import Mask2FormerImageProcessor + from .models.maskformer import ( + MaskFormerFeatureExtractor, + MaskFormerImageProcessor, + ) + from .models.mobilenet_v1 import ( + MobileNetV1FeatureExtractor, + MobileNetV1ImageProcessor, + ) + from .models.mobilenet_v2 import ( + MobileNetV2FeatureExtractor, + MobileNetV2ImageProcessor, + ) + from .models.mobilevit import MobileViTFeatureExtractor, MobileViTImageProcessor + from .models.nougat import NougatImageProcessor + from .models.oneformer import OneFormerImageProcessor + from .models.owlv2 import Owlv2ImageProcessor + from .models.owlvit import OwlViTFeatureExtractor, OwlViTImageProcessor + from .models.perceiver import PerceiverFeatureExtractor, PerceiverImageProcessor + from .models.pix2struct import Pix2StructImageProcessor + from .models.poolformer import ( + PoolFormerFeatureExtractor, + PoolFormerImageProcessor, + ) + from .models.pvt import PvtImageProcessor + from .models.sam import SamImageProcessor + from .models.segformer import SegformerFeatureExtractor, SegformerImageProcessor + from .models.seggpt import SegGptImageProcessor + from .models.siglip import SiglipImageProcessor + from .models.superpoint import SuperPointImageProcessor + from .models.swin2sr import Swin2SRImageProcessor + from .models.tvlt import TvltImageProcessor + from .models.tvp import TvpImageProcessor + from .models.videomae import VideoMAEFeatureExtractor, VideoMAEImageProcessor + from .models.vilt import ViltFeatureExtractor, ViltImageProcessor, ViltProcessor + from .models.vit import ViTFeatureExtractor, ViTImageProcessor + from .models.vit_hybrid import ViTHybridImageProcessor + from .models.vitmatte import VitMatteImageProcessor + from .models.vivit import VivitImageProcessor + from .models.yolos import YolosFeatureExtractor, YolosImageProcessor + + # Modeling + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_pt_objects import * + else: + # Benchmarks + from .benchmark.benchmark import PyTorchBenchmark + from .benchmark.benchmark_args import PyTorchBenchmarkArguments + from .cache_utils import Cache, DynamicCache, SinkCache, StaticCache + from .data.datasets import ( + GlueDataset, + GlueDataTrainingArguments, + LineByLineTextDataset, + LineByLineWithRefDataset, + LineByLineWithSOPTextDataset, + SquadDataset, + SquadDataTrainingArguments, + TextDataset, + TextDatasetForNextSentencePrediction, + ) + from .generation import ( + AlternatingCodebooksLogitsProcessor, + BeamScorer, + BeamSearchScorer, + ClassifierFreeGuidanceLogitsProcessor, + ConstrainedBeamSearchScorer, + Constraint, + ConstraintListState, + DisjunctiveConstraint, + EncoderNoRepeatNGramLogitsProcessor, + EncoderRepetitionPenaltyLogitsProcessor, + EpsilonLogitsWarper, + EtaLogitsWarper, + ExponentialDecayLengthPenalty, + ForcedBOSTokenLogitsProcessor, + ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, + GenerationMixin, + HammingDiversityLogitsProcessor, + InfNanRemoveLogitsProcessor, + LogitNormalization, + LogitsProcessor, + LogitsProcessorList, + LogitsWarper, + MaxLengthCriteria, + MaxTimeCriteria, + MinLengthLogitsProcessor, + MinNewTokensLengthLogitsProcessor, + NoBadWordsLogitsProcessor, + NoRepeatNGramLogitsProcessor, + PhrasalConstraint, + PrefixConstrainedLogitsProcessor, + RepetitionPenaltyLogitsProcessor, + SequenceBiasLogitsProcessor, + StoppingCriteria, + StoppingCriteriaList, + SuppressTokensAtBeginLogitsProcessor, + SuppressTokensLogitsProcessor, + TemperatureLogitsWarper, + TopKLogitsWarper, + TopPLogitsWarper, + TypicalLogitsWarper, + UnbatchedClassifierFreeGuidanceLogitsProcessor, + WhisperTimeStampLogitsProcessor, + ) + from .modeling_utils import PreTrainedModel + from .models.albert import ( + ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + AlbertForMaskedLM, + AlbertForMultipleChoice, + AlbertForPreTraining, + AlbertForQuestionAnswering, + AlbertForSequenceClassification, + AlbertForTokenClassification, + AlbertModel, + AlbertPreTrainedModel, + load_tf_weights_in_albert, + ) + from .models.align import ( + ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST, + AlignModel, + AlignPreTrainedModel, + AlignTextModel, + AlignVisionModel, + ) + from .models.altclip import ( + ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + AltCLIPModel, + AltCLIPPreTrainedModel, + AltCLIPTextModel, + AltCLIPVisionModel, + ) + from .models.audio_spectrogram_transformer import ( + AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + ASTForAudioClassification, + ASTModel, + ASTPreTrainedModel, + ) + from .models.auto import ( + MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING, + MODEL_FOR_AUDIO_XVECTOR_MAPPING, + MODEL_FOR_BACKBONE_MAPPING, + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DEPTH_ESTIMATION_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + MODEL_FOR_IMAGE_MAPPING, + MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, + MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, + MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, + MODEL_FOR_KEYPOINT_DETECTION_MAPPING, + MODEL_FOR_MASK_GENERATION_MAPPING, + MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, + MODEL_FOR_MASKED_LM_MAPPING, + MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + MODEL_FOR_OBJECT_DETECTION_MAPPING, + MODEL_FOR_PRETRAINING_MAPPING, + MODEL_FOR_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_TEXT_ENCODING_MAPPING, + MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, + MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, + MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, + MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, + MODEL_MAPPING, + MODEL_WITH_LM_HEAD_MAPPING, + AutoBackbone, + AutoModel, + AutoModelForAudioClassification, + AutoModelForAudioFrameClassification, + AutoModelForAudioXVector, + AutoModelForCausalLM, + AutoModelForCTC, + AutoModelForDepthEstimation, + AutoModelForDocumentQuestionAnswering, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForImageToImage, + AutoModelForInstanceSegmentation, + AutoModelForKeypointDetection, + AutoModelForMaskedImageModeling, + AutoModelForMaskedLM, + AutoModelForMaskGeneration, + AutoModelForMultipleChoice, + AutoModelForNextSentencePrediction, + AutoModelForObjectDetection, + AutoModelForPreTraining, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTableQuestionAnswering, + AutoModelForTextEncoding, + AutoModelForTextToSpectrogram, + AutoModelForTextToWaveform, + AutoModelForTokenClassification, + AutoModelForUniversalSegmentation, + AutoModelForVideoClassification, + AutoModelForVision2Seq, + AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotImageClassification, + AutoModelForZeroShotObjectDetection, + AutoModelWithLMHead, + ) + from .models.autoformer import ( + AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + AutoformerForPrediction, + AutoformerModel, + AutoformerPreTrainedModel, + ) + from .models.bark import ( + BARK_PRETRAINED_MODEL_ARCHIVE_LIST, + BarkCausalModel, + BarkCoarseModel, + BarkFineModel, + BarkModel, + BarkPreTrainedModel, + BarkSemanticModel, + ) + from .models.bart import ( + BART_PRETRAINED_MODEL_ARCHIVE_LIST, + BartForCausalLM, + BartForConditionalGeneration, + BartForQuestionAnswering, + BartForSequenceClassification, + BartModel, + BartPreTrainedModel, + BartPretrainedModel, + PretrainedBartModel, + ) + from .models.beit import ( + BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, + BeitBackbone, + BeitForImageClassification, + BeitForMaskedImageModeling, + BeitForSemanticSegmentation, + BeitModel, + BeitPreTrainedModel, + ) + from .models.bert import ( + BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + BertForMaskedLM, + BertForMultipleChoice, + BertForNextSentencePrediction, + BertForPreTraining, + BertForQuestionAnswering, + BertForSequenceClassification, + BertForTokenClassification, + BertLayer, + BertLMHeadModel, + BertModel, + BertPreTrainedModel, + load_tf_weights_in_bert, + ) + from .models.bert_generation import ( + BertGenerationDecoder, + BertGenerationEncoder, + BertGenerationPreTrainedModel, + load_tf_weights_in_bert_generation, + ) + from .models.big_bird import ( + BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST, + BigBirdForCausalLM, + BigBirdForMaskedLM, + BigBirdForMultipleChoice, + BigBirdForPreTraining, + BigBirdForQuestionAnswering, + BigBirdForSequenceClassification, + BigBirdForTokenClassification, + BigBirdLayer, + BigBirdModel, + BigBirdPreTrainedModel, + load_tf_weights_in_big_bird, + ) + from .models.bigbird_pegasus import ( + BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, + BigBirdPegasusForCausalLM, + BigBirdPegasusForConditionalGeneration, + BigBirdPegasusForQuestionAnswering, + BigBirdPegasusForSequenceClassification, + BigBirdPegasusModel, + BigBirdPegasusPreTrainedModel, + ) + from .models.biogpt import ( + BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, + BioGptForCausalLM, + BioGptForSequenceClassification, + BioGptForTokenClassification, + BioGptModel, + BioGptPreTrainedModel, + ) + from .models.bit import ( + BIT_PRETRAINED_MODEL_ARCHIVE_LIST, + BitBackbone, + BitForImageClassification, + BitModel, + BitPreTrainedModel, + ) + from .models.blenderbot import ( + BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, + BlenderbotForCausalLM, + BlenderbotForConditionalGeneration, + BlenderbotModel, + BlenderbotPreTrainedModel, + ) + from .models.blenderbot_small import ( + BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, + BlenderbotSmallForCausalLM, + BlenderbotSmallForConditionalGeneration, + BlenderbotSmallModel, + BlenderbotSmallPreTrainedModel, + ) + from .models.blip import ( + BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + BlipForConditionalGeneration, + BlipForImageTextRetrieval, + BlipForQuestionAnswering, + BlipModel, + BlipPreTrainedModel, + BlipTextModel, + BlipVisionModel, + ) + from .models.blip_2 import ( + BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, + Blip2ForConditionalGeneration, + Blip2Model, + Blip2PreTrainedModel, + Blip2QFormerModel, + Blip2VisionModel, + ) + from .models.bloom import ( + BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, + BloomForCausalLM, + BloomForQuestionAnswering, + BloomForSequenceClassification, + BloomForTokenClassification, + BloomModel, + BloomPreTrainedModel, + ) + from .models.bridgetower import ( + BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, + BridgeTowerForContrastiveLearning, + BridgeTowerForImageAndTextRetrieval, + BridgeTowerForMaskedLM, + BridgeTowerModel, + BridgeTowerPreTrainedModel, + ) + from .models.bros import ( + BROS_PRETRAINED_MODEL_ARCHIVE_LIST, + BrosForTokenClassification, + BrosModel, + BrosPreTrainedModel, + BrosProcessor, + BrosSpadeEEForTokenClassification, + BrosSpadeELForTokenClassification, + ) + from .models.camembert import ( + CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + CamembertForCausalLM, + CamembertForMaskedLM, + CamembertForMultipleChoice, + CamembertForQuestionAnswering, + CamembertForSequenceClassification, + CamembertForTokenClassification, + CamembertModel, + CamembertPreTrainedModel, + ) + from .models.canine import ( + CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, + CanineForMultipleChoice, + CanineForQuestionAnswering, + CanineForSequenceClassification, + CanineForTokenClassification, + CanineLayer, + CanineModel, + CaninePreTrainedModel, + load_tf_weights_in_canine, + ) + from .models.chinese_clip import ( + CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + ChineseCLIPModel, + ChineseCLIPPreTrainedModel, + ChineseCLIPTextModel, + ChineseCLIPVisionModel, + ) + from .models.clap import ( + CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, + ClapAudioModel, + ClapAudioModelWithProjection, + ClapFeatureExtractor, + ClapModel, + ClapPreTrainedModel, + ClapTextModel, + ClapTextModelWithProjection, + ) + from .models.clip import ( + CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + CLIPForImageClassification, + CLIPModel, + CLIPPreTrainedModel, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPVisionModel, + CLIPVisionModelWithProjection, + ) + from .models.clipseg import ( + CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, + CLIPSegForImageSegmentation, + CLIPSegModel, + CLIPSegPreTrainedModel, + CLIPSegTextModel, + CLIPSegVisionModel, + ) + from .models.clvp import ( + CLVP_PRETRAINED_MODEL_ARCHIVE_LIST, + ClvpDecoder, + ClvpEncoder, + ClvpForCausalLM, + ClvpModel, + ClvpModelForConditionalGeneration, + ClvpPreTrainedModel, + ) + from .models.codegen import ( + CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST, + CodeGenForCausalLM, + CodeGenModel, + CodeGenPreTrainedModel, + ) + from .models.cohere import ( + CohereForCausalLM, + CohereModel, + CoherePreTrainedModel, + ) + from .models.conditional_detr import ( + CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + ConditionalDetrForObjectDetection, + ConditionalDetrForSegmentation, + ConditionalDetrModel, + ConditionalDetrPreTrainedModel, + ) + from .models.convbert import ( + CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + ConvBertForMaskedLM, + ConvBertForMultipleChoice, + ConvBertForQuestionAnswering, + ConvBertForSequenceClassification, + ConvBertForTokenClassification, + ConvBertLayer, + ConvBertModel, + ConvBertPreTrainedModel, + load_tf_weights_in_convbert, + ) + from .models.convnext import ( + CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, + ConvNextBackbone, + ConvNextForImageClassification, + ConvNextModel, + ConvNextPreTrainedModel, + ) + from .models.convnextv2 import ( + CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST, + ConvNextV2Backbone, + ConvNextV2ForImageClassification, + ConvNextV2Model, + ConvNextV2PreTrainedModel, + ) + from .models.cpmant import ( + CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, + CpmAntForCausalLM, + CpmAntModel, + CpmAntPreTrainedModel, + ) + from .models.ctrl import ( + CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, + CTRLForSequenceClassification, + CTRLLMHeadModel, + CTRLModel, + CTRLPreTrainedModel, + ) + from .models.cvt import ( + CVT_PRETRAINED_MODEL_ARCHIVE_LIST, + CvtForImageClassification, + CvtModel, + CvtPreTrainedModel, + ) + from .models.data2vec import ( + DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, + DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, + DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, + Data2VecAudioForAudioFrameClassification, + Data2VecAudioForCTC, + Data2VecAudioForSequenceClassification, + Data2VecAudioForXVector, + Data2VecAudioModel, + Data2VecAudioPreTrainedModel, + Data2VecTextForCausalLM, + Data2VecTextForMaskedLM, + Data2VecTextForMultipleChoice, + Data2VecTextForQuestionAnswering, + Data2VecTextForSequenceClassification, + Data2VecTextForTokenClassification, + Data2VecTextModel, + Data2VecTextPreTrainedModel, + Data2VecVisionForImageClassification, + Data2VecVisionForSemanticSegmentation, + Data2VecVisionModel, + Data2VecVisionPreTrainedModel, + ) + from .models.deberta import ( + DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + DebertaForMaskedLM, + DebertaForQuestionAnswering, + DebertaForSequenceClassification, + DebertaForTokenClassification, + DebertaModel, + DebertaPreTrainedModel, + ) + from .models.deberta_v2 import ( + DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + DebertaV2ForMaskedLM, + DebertaV2ForMultipleChoice, + DebertaV2ForQuestionAnswering, + DebertaV2ForSequenceClassification, + DebertaV2ForTokenClassification, + DebertaV2Model, + DebertaV2PreTrainedModel, + ) + from .models.decision_transformer import ( + DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + DecisionTransformerGPT2Model, + DecisionTransformerGPT2PreTrainedModel, + DecisionTransformerModel, + DecisionTransformerPreTrainedModel, + ) + from .models.deformable_detr import ( + DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + DeformableDetrForObjectDetection, + DeformableDetrModel, + DeformableDetrPreTrainedModel, + ) + from .models.deit import ( + DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, + DeiTForImageClassification, + DeiTForImageClassificationWithTeacher, + DeiTForMaskedImageModeling, + DeiTModel, + DeiTPreTrainedModel, + ) + from .models.deprecated.mctct import ( + MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, + MCTCTForCTC, + MCTCTModel, + MCTCTPreTrainedModel, + ) + from .models.deprecated.mmbt import ( + MMBTForClassification, + MMBTModel, + ModalEmbeddings, + ) + from .models.deprecated.open_llama import ( + OpenLlamaForCausalLM, + OpenLlamaForSequenceClassification, + OpenLlamaModel, + OpenLlamaPreTrainedModel, + ) + from .models.deprecated.retribert import ( + RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + RetriBertModel, + RetriBertPreTrainedModel, + ) + from .models.deprecated.trajectory_transformer import ( + TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TrajectoryTransformerModel, + TrajectoryTransformerPreTrainedModel, + ) + from .models.deprecated.transfo_xl import ( + TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + AdaptiveEmbedding, + TransfoXLForSequenceClassification, + TransfoXLLMHeadModel, + TransfoXLModel, + TransfoXLPreTrainedModel, + load_tf_weights_in_transfo_xl, + ) + from .models.deprecated.van import ( + VAN_PRETRAINED_MODEL_ARCHIVE_LIST, + VanForImageClassification, + VanModel, + VanPreTrainedModel, + ) + from .models.depth_anything import ( + DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST, + DepthAnythingForDepthEstimation, + DepthAnythingPreTrainedModel, + ) + from .models.deta import ( + DETA_PRETRAINED_MODEL_ARCHIVE_LIST, + DetaForObjectDetection, + DetaModel, + DetaPreTrainedModel, + ) + from .models.detr import ( + DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + DetrForObjectDetection, + DetrForSegmentation, + DetrModel, + DetrPreTrainedModel, + ) + from .models.dinat import ( + DINAT_PRETRAINED_MODEL_ARCHIVE_LIST, + DinatBackbone, + DinatForImageClassification, + DinatModel, + DinatPreTrainedModel, + ) + from .models.dinov2 import ( + DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST, + Dinov2Backbone, + Dinov2ForImageClassification, + Dinov2Model, + Dinov2PreTrainedModel, + ) + from .models.distilbert import ( + DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + DistilBertForMaskedLM, + DistilBertForMultipleChoice, + DistilBertForQuestionAnswering, + DistilBertForSequenceClassification, + DistilBertForTokenClassification, + DistilBertModel, + DistilBertPreTrainedModel, + ) + from .models.donut import ( + DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, + DonutSwinModel, + DonutSwinPreTrainedModel, + ) + from .models.dpr import ( + DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, + DPRContextEncoder, + DPRPretrainedContextEncoder, + DPRPreTrainedModel, + DPRPretrainedQuestionEncoder, + DPRPretrainedReader, + DPRQuestionEncoder, + DPRReader, + ) + from .models.dpt import ( + DPT_PRETRAINED_MODEL_ARCHIVE_LIST, + DPTForDepthEstimation, + DPTForSemanticSegmentation, + DPTModel, + DPTPreTrainedModel, + ) + from .models.efficientformer import ( + EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + EfficientFormerForImageClassification, + EfficientFormerForImageClassificationWithTeacher, + EfficientFormerModel, + EfficientFormerPreTrainedModel, + ) + from .models.efficientnet import ( + EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, + EfficientNetForImageClassification, + EfficientNetModel, + EfficientNetPreTrainedModel, + ) + from .models.electra import ( + ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, + ElectraForCausalLM, + ElectraForMaskedLM, + ElectraForMultipleChoice, + ElectraForPreTraining, + ElectraForQuestionAnswering, + ElectraForSequenceClassification, + ElectraForTokenClassification, + ElectraModel, + ElectraPreTrainedModel, + load_tf_weights_in_electra, + ) + from .models.encodec import ( + ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, + EncodecModel, + EncodecPreTrainedModel, + ) + from .models.encoder_decoder import EncoderDecoderModel + from .models.ernie import ( + ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, + ErnieForCausalLM, + ErnieForMaskedLM, + ErnieForMultipleChoice, + ErnieForNextSentencePrediction, + ErnieForPreTraining, + ErnieForQuestionAnswering, + ErnieForSequenceClassification, + ErnieForTokenClassification, + ErnieModel, + ErniePreTrainedModel, + ) + from .models.ernie_m import ( + ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST, + ErnieMForInformationExtraction, + ErnieMForMultipleChoice, + ErnieMForQuestionAnswering, + ErnieMForSequenceClassification, + ErnieMForTokenClassification, + ErnieMModel, + ErnieMPreTrainedModel, + ) + from .models.esm import ( + ESM_PRETRAINED_MODEL_ARCHIVE_LIST, + EsmFoldPreTrainedModel, + EsmForMaskedLM, + EsmForProteinFolding, + EsmForSequenceClassification, + EsmForTokenClassification, + EsmModel, + EsmPreTrainedModel, + ) + from .models.falcon import ( + FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, + FalconForCausalLM, + FalconForQuestionAnswering, + FalconForSequenceClassification, + FalconForTokenClassification, + FalconModel, + FalconPreTrainedModel, + ) + from .models.fastspeech2_conformer import ( + FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + FastSpeech2ConformerHifiGan, + FastSpeech2ConformerModel, + FastSpeech2ConformerPreTrainedModel, + FastSpeech2ConformerWithHifiGan, + ) + from .models.flaubert import ( + FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + FlaubertForMultipleChoice, + FlaubertForQuestionAnswering, + FlaubertForQuestionAnsweringSimple, + FlaubertForSequenceClassification, + FlaubertForTokenClassification, + FlaubertModel, + FlaubertPreTrainedModel, + FlaubertWithLMHeadModel, + ) + from .models.flava import ( + FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, + FlavaForPreTraining, + FlavaImageCodebook, + FlavaImageModel, + FlavaModel, + FlavaMultimodalModel, + FlavaPreTrainedModel, + FlavaTextModel, + ) + from .models.fnet import ( + FNET_PRETRAINED_MODEL_ARCHIVE_LIST, + FNetForMaskedLM, + FNetForMultipleChoice, + FNetForNextSentencePrediction, + FNetForPreTraining, + FNetForQuestionAnswering, + FNetForSequenceClassification, + FNetForTokenClassification, + FNetLayer, + FNetModel, + FNetPreTrainedModel, + ) + from .models.focalnet import ( + FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, + FocalNetBackbone, + FocalNetForImageClassification, + FocalNetForMaskedImageModeling, + FocalNetModel, + FocalNetPreTrainedModel, + ) + from .models.fsmt import ( + FSMTForConditionalGeneration, + FSMTModel, + PretrainedFSMTModel, + ) + from .models.funnel import ( + FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, + FunnelBaseModel, + FunnelForMaskedLM, + FunnelForMultipleChoice, + FunnelForPreTraining, + FunnelForQuestionAnswering, + FunnelForSequenceClassification, + FunnelForTokenClassification, + FunnelModel, + FunnelPreTrainedModel, + load_tf_weights_in_funnel, + ) + from .models.fuyu import ( + FuyuForCausalLM, + FuyuPreTrainedModel, + ) + from .models.gemma import ( + GemmaForCausalLM, + GemmaForSequenceClassification, + GemmaModel, + GemmaPreTrainedModel, + ) + from .models.git import ( + GIT_PRETRAINED_MODEL_ARCHIVE_LIST, + GitForCausalLM, + GitModel, + GitPreTrainedModel, + GitVisionModel, + ) + from .models.glpn import ( + GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, + GLPNForDepthEstimation, + GLPNModel, + GLPNPreTrainedModel, + ) + from .models.gpt2 import ( + GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, + GPT2DoubleHeadsModel, + GPT2ForQuestionAnswering, + GPT2ForSequenceClassification, + GPT2ForTokenClassification, + GPT2LMHeadModel, + GPT2Model, + GPT2PreTrainedModel, + load_tf_weights_in_gpt2, + ) + from .models.gpt_bigcode import ( + GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTBigCodeForCausalLM, + GPTBigCodeForSequenceClassification, + GPTBigCodeForTokenClassification, + GPTBigCodeModel, + GPTBigCodePreTrainedModel, + ) + from .models.gpt_neo import ( + GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTNeoForCausalLM, + GPTNeoForQuestionAnswering, + GPTNeoForSequenceClassification, + GPTNeoForTokenClassification, + GPTNeoModel, + GPTNeoPreTrainedModel, + load_tf_weights_in_gpt_neo, + ) + from .models.gpt_neox import ( + GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTNeoXForCausalLM, + GPTNeoXForQuestionAnswering, + GPTNeoXForSequenceClassification, + GPTNeoXForTokenClassification, + GPTNeoXLayer, + GPTNeoXModel, + GPTNeoXPreTrainedModel, + ) + from .models.gpt_neox_japanese import ( + GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTNeoXJapaneseForCausalLM, + GPTNeoXJapaneseLayer, + GPTNeoXJapaneseModel, + GPTNeoXJapanesePreTrainedModel, + ) + from .models.gptj import ( + GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTJForCausalLM, + GPTJForQuestionAnswering, + GPTJForSequenceClassification, + GPTJModel, + GPTJPreTrainedModel, + ) + from .models.gptsan_japanese import ( + GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTSanJapaneseForConditionalGeneration, + GPTSanJapaneseModel, + GPTSanJapanesePreTrainedModel, + ) + from .models.graphormer import ( + GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + GraphormerForGraphClassification, + GraphormerModel, + GraphormerPreTrainedModel, + ) + from .models.groupvit import ( + GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + GroupViTModel, + GroupViTPreTrainedModel, + GroupViTTextModel, + GroupViTVisionModel, + ) + from .models.hubert import ( + HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + HubertForCTC, + HubertForSequenceClassification, + HubertModel, + HubertPreTrainedModel, + ) + from .models.ibert import ( + IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + IBertForMaskedLM, + IBertForMultipleChoice, + IBertForQuestionAnswering, + IBertForSequenceClassification, + IBertForTokenClassification, + IBertModel, + IBertPreTrainedModel, + ) + from .models.idefics import ( + IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST, + IdeficsForVisionText2Text, + IdeficsModel, + IdeficsPreTrainedModel, + IdeficsProcessor, + ) + from .models.imagegpt import ( + IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, + ImageGPTForCausalImageModeling, + ImageGPTForImageClassification, + ImageGPTModel, + ImageGPTPreTrainedModel, + load_tf_weights_in_imagegpt, + ) + from .models.informer import ( + INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + InformerForPrediction, + InformerModel, + InformerPreTrainedModel, + ) + from .models.instructblip import ( + INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + InstructBlipForConditionalGeneration, + InstructBlipPreTrainedModel, + InstructBlipQFormerModel, + InstructBlipVisionModel, + ) + from .models.jukebox import ( + JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, + JukeboxModel, + JukeboxPreTrainedModel, + JukeboxPrior, + JukeboxVQVAE, + ) + from .models.kosmos2 import ( + KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST, + Kosmos2ForConditionalGeneration, + Kosmos2Model, + Kosmos2PreTrainedModel, + ) + from .models.layoutlm import ( + LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, + LayoutLMForMaskedLM, + LayoutLMForQuestionAnswering, + LayoutLMForSequenceClassification, + LayoutLMForTokenClassification, + LayoutLMModel, + LayoutLMPreTrainedModel, + ) + from .models.layoutlmv2 import ( + LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, + LayoutLMv2ForQuestionAnswering, + LayoutLMv2ForSequenceClassification, + LayoutLMv2ForTokenClassification, + LayoutLMv2Model, + LayoutLMv2PreTrainedModel, + ) + from .models.layoutlmv3 import ( + LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, + LayoutLMv3ForQuestionAnswering, + LayoutLMv3ForSequenceClassification, + LayoutLMv3ForTokenClassification, + LayoutLMv3Model, + LayoutLMv3PreTrainedModel, + ) + from .models.led import ( + LED_PRETRAINED_MODEL_ARCHIVE_LIST, + LEDForConditionalGeneration, + LEDForQuestionAnswering, + LEDForSequenceClassification, + LEDModel, + LEDPreTrainedModel, + ) + from .models.levit import ( + LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + LevitForImageClassification, + LevitForImageClassificationWithTeacher, + LevitModel, + LevitPreTrainedModel, + ) + from .models.lilt import ( + LILT_PRETRAINED_MODEL_ARCHIVE_LIST, + LiltForQuestionAnswering, + LiltForSequenceClassification, + LiltForTokenClassification, + LiltModel, + LiltPreTrainedModel, + ) + from .models.llama import ( + LlamaForCausalLM, + LlamaForQuestionAnswering, + LlamaForSequenceClassification, + LlamaModel, + LlamaPreTrainedModel, + ) + from .models.llava import ( + LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, + LlavaForConditionalGeneration, + LlavaPreTrainedModel, + ) + from .models.llava_next import ( + LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST, + LlavaNextForConditionalGeneration, + LlavaNextPreTrainedModel, + ) + from .models.longformer import ( + LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + LongformerForMaskedLM, + LongformerForMultipleChoice, + LongformerForQuestionAnswering, + LongformerForSequenceClassification, + LongformerForTokenClassification, + LongformerModel, + LongformerPreTrainedModel, + LongformerSelfAttention, + ) + from .models.longt5 import ( + LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, + LongT5EncoderModel, + LongT5ForConditionalGeneration, + LongT5Model, + LongT5PreTrainedModel, + ) + from .models.luke import ( + LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, + LukeForEntityClassification, + LukeForEntityPairClassification, + LukeForEntitySpanClassification, + LukeForMaskedLM, + LukeForMultipleChoice, + LukeForQuestionAnswering, + LukeForSequenceClassification, + LukeForTokenClassification, + LukeModel, + LukePreTrainedModel, + ) + from .models.lxmert import ( + LxmertEncoder, + LxmertForPreTraining, + LxmertForQuestionAnswering, + LxmertModel, + LxmertPreTrainedModel, + LxmertVisualFeatureEncoder, + LxmertXLayer, + ) + from .models.m2m_100 import ( + M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, + M2M100ForConditionalGeneration, + M2M100Model, + M2M100PreTrainedModel, + ) + from .models.mamba import ( + MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST, + MambaForCausalLM, + MambaModel, + MambaPreTrainedModel, + ) + from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel + from .models.markuplm import ( + MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST, + MarkupLMForQuestionAnswering, + MarkupLMForSequenceClassification, + MarkupLMForTokenClassification, + MarkupLMModel, + MarkupLMPreTrainedModel, + ) + from .models.mask2former import ( + MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + Mask2FormerForUniversalSegmentation, + Mask2FormerModel, + Mask2FormerPreTrainedModel, + ) + from .models.maskformer import ( + MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + MaskFormerForInstanceSegmentation, + MaskFormerModel, + MaskFormerPreTrainedModel, + MaskFormerSwinBackbone, + ) + from .models.mbart import ( + MBartForCausalLM, + MBartForConditionalGeneration, + MBartForQuestionAnswering, + MBartForSequenceClassification, + MBartModel, + MBartPreTrainedModel, + ) + from .models.mega import ( + MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, + MegaForCausalLM, + MegaForMaskedLM, + MegaForMultipleChoice, + MegaForQuestionAnswering, + MegaForSequenceClassification, + MegaForTokenClassification, + MegaModel, + MegaPreTrainedModel, + ) + from .models.megatron_bert import ( + MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + MegatronBertForCausalLM, + MegatronBertForMaskedLM, + MegatronBertForMultipleChoice, + MegatronBertForNextSentencePrediction, + MegatronBertForPreTraining, + MegatronBertForQuestionAnswering, + MegatronBertForSequenceClassification, + MegatronBertForTokenClassification, + MegatronBertModel, + MegatronBertPreTrainedModel, + ) + from .models.mgp_str import ( + MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, + MgpstrForSceneTextRecognition, + MgpstrModel, + MgpstrPreTrainedModel, + ) + from .models.mistral import ( + MistralForCausalLM, + MistralForSequenceClassification, + MistralModel, + MistralPreTrainedModel, + ) + from .models.mixtral import ( + MixtralForCausalLM, + MixtralForSequenceClassification, + MixtralModel, + MixtralPreTrainedModel, + ) + from .models.mobilebert import ( + MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileBertForMaskedLM, + MobileBertForMultipleChoice, + MobileBertForNextSentencePrediction, + MobileBertForPreTraining, + MobileBertForQuestionAnswering, + MobileBertForSequenceClassification, + MobileBertForTokenClassification, + MobileBertLayer, + MobileBertModel, + MobileBertPreTrainedModel, + load_tf_weights_in_mobilebert, + ) + from .models.mobilenet_v1 import ( + MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileNetV1ForImageClassification, + MobileNetV1Model, + MobileNetV1PreTrainedModel, + load_tf_weights_in_mobilenet_v1, + ) + from .models.mobilenet_v2 import ( + MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileNetV2ForImageClassification, + MobileNetV2ForSemanticSegmentation, + MobileNetV2Model, + MobileNetV2PreTrainedModel, + load_tf_weights_in_mobilenet_v2, + ) + from .models.mobilevit import ( + MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileViTForImageClassification, + MobileViTForSemanticSegmentation, + MobileViTModel, + MobileViTPreTrainedModel, + ) + from .models.mobilevitv2 import ( + MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileViTV2ForImageClassification, + MobileViTV2ForSemanticSegmentation, + MobileViTV2Model, + MobileViTV2PreTrainedModel, + ) + from .models.mpnet import ( + MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, + MPNetForMaskedLM, + MPNetForMultipleChoice, + MPNetForQuestionAnswering, + MPNetForSequenceClassification, + MPNetForTokenClassification, + MPNetLayer, + MPNetModel, + MPNetPreTrainedModel, + ) + from .models.mpt import ( + MPT_PRETRAINED_MODEL_ARCHIVE_LIST, + MptForCausalLM, + MptForQuestionAnswering, + MptForSequenceClassification, + MptForTokenClassification, + MptModel, + MptPreTrainedModel, + ) + from .models.mra import ( + MRA_PRETRAINED_MODEL_ARCHIVE_LIST, + MraForMaskedLM, + MraForMultipleChoice, + MraForQuestionAnswering, + MraForSequenceClassification, + MraForTokenClassification, + MraModel, + MraPreTrainedModel, + ) + from .models.mt5 import ( + MT5EncoderModel, + MT5ForConditionalGeneration, + MT5ForQuestionAnswering, + MT5ForSequenceClassification, + MT5ForTokenClassification, + MT5Model, + MT5PreTrainedModel, + ) + from .models.musicgen import ( + MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST, + MusicgenForCausalLM, + MusicgenForConditionalGeneration, + MusicgenModel, + MusicgenPreTrainedModel, + MusicgenProcessor, + ) + from .models.musicgen_melody import ( + MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, + MusicgenMelodyForCausalLM, + MusicgenMelodyForConditionalGeneration, + MusicgenMelodyModel, + MusicgenMelodyPreTrainedModel, + ) + from .models.mvp import ( + MVP_PRETRAINED_MODEL_ARCHIVE_LIST, + MvpForCausalLM, + MvpForConditionalGeneration, + MvpForQuestionAnswering, + MvpForSequenceClassification, + MvpModel, + MvpPreTrainedModel, + ) + from .models.nat import ( + NAT_PRETRAINED_MODEL_ARCHIVE_LIST, + NatBackbone, + NatForImageClassification, + NatModel, + NatPreTrainedModel, + ) + from .models.nezha import ( + NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, + NezhaForMaskedLM, + NezhaForMultipleChoice, + NezhaForNextSentencePrediction, + NezhaForPreTraining, + NezhaForQuestionAnswering, + NezhaForSequenceClassification, + NezhaForTokenClassification, + NezhaModel, + NezhaPreTrainedModel, + ) + from .models.nllb_moe import ( + NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, + NllbMoeForConditionalGeneration, + NllbMoeModel, + NllbMoePreTrainedModel, + NllbMoeSparseMLP, + NllbMoeTop2Router, + ) + from .models.nystromformer import ( + NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + NystromformerForMaskedLM, + NystromformerForMultipleChoice, + NystromformerForQuestionAnswering, + NystromformerForSequenceClassification, + NystromformerForTokenClassification, + NystromformerLayer, + NystromformerModel, + NystromformerPreTrainedModel, + ) + from .models.oneformer import ( + ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + OneFormerForUniversalSegmentation, + OneFormerModel, + OneFormerPreTrainedModel, + ) + from .models.openai import ( + OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, + OpenAIGPTDoubleHeadsModel, + OpenAIGPTForSequenceClassification, + OpenAIGPTLMHeadModel, + OpenAIGPTModel, + OpenAIGPTPreTrainedModel, + load_tf_weights_in_openai_gpt, + ) + from .models.opt import ( + OPT_PRETRAINED_MODEL_ARCHIVE_LIST, + OPTForCausalLM, + OPTForQuestionAnswering, + OPTForSequenceClassification, + OPTModel, + OPTPreTrainedModel, + ) + from .models.owlv2 import ( + OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST, + Owlv2ForObjectDetection, + Owlv2Model, + Owlv2PreTrainedModel, + Owlv2TextModel, + Owlv2VisionModel, + ) + from .models.owlvit import ( + OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + OwlViTForObjectDetection, + OwlViTModel, + OwlViTPreTrainedModel, + OwlViTTextModel, + OwlViTVisionModel, + ) + from .models.patchtsmixer import ( + PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST, + PatchTSMixerForPrediction, + PatchTSMixerForPretraining, + PatchTSMixerForRegression, + PatchTSMixerForTimeSeriesClassification, + PatchTSMixerModel, + PatchTSMixerPreTrainedModel, + ) + from .models.patchtst import ( + PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, + PatchTSTForClassification, + PatchTSTForPrediction, + PatchTSTForPretraining, + PatchTSTForRegression, + PatchTSTModel, + PatchTSTPreTrainedModel, + ) + from .models.pegasus import ( + PegasusForCausalLM, + PegasusForConditionalGeneration, + PegasusModel, + PegasusPreTrainedModel, + ) + from .models.pegasus_x import ( + PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, + PegasusXForConditionalGeneration, + PegasusXModel, + PegasusXPreTrainedModel, + ) + from .models.perceiver import ( + PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, + PerceiverForImageClassificationConvProcessing, + PerceiverForImageClassificationFourier, + PerceiverForImageClassificationLearned, + PerceiverForMaskedLM, + PerceiverForMultimodalAutoencoding, + PerceiverForOpticalFlow, + PerceiverForSequenceClassification, + PerceiverLayer, + PerceiverModel, + PerceiverPreTrainedModel, + ) + from .models.persimmon import ( + PersimmonForCausalLM, + PersimmonForSequenceClassification, + PersimmonModel, + PersimmonPreTrainedModel, + ) + from .models.phi import ( + PHI_PRETRAINED_MODEL_ARCHIVE_LIST, + PhiForCausalLM, + PhiForSequenceClassification, + PhiForTokenClassification, + PhiModel, + PhiPreTrainedModel, + ) + from .models.pix2struct import ( + PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, + Pix2StructForConditionalGeneration, + Pix2StructPreTrainedModel, + Pix2StructTextModel, + Pix2StructVisionModel, + ) + from .models.plbart import ( + PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, + PLBartForCausalLM, + PLBartForConditionalGeneration, + PLBartForSequenceClassification, + PLBartModel, + PLBartPreTrainedModel, + ) + from .models.poolformer import ( + POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + PoolFormerForImageClassification, + PoolFormerModel, + PoolFormerPreTrainedModel, + ) + from .models.pop2piano import ( + POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST, + Pop2PianoForConditionalGeneration, + Pop2PianoPreTrainedModel, + ) + from .models.prophetnet import ( + PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, + ProphetNetDecoder, + ProphetNetEncoder, + ProphetNetForCausalLM, + ProphetNetForConditionalGeneration, + ProphetNetModel, + ProphetNetPreTrainedModel, + ) + from .models.pvt import ( + PVT_PRETRAINED_MODEL_ARCHIVE_LIST, + PvtForImageClassification, + PvtModel, + PvtPreTrainedModel, + ) + from .models.pvt_v2 import ( + PVT_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + PvtV2Backbone, + PvtV2ForImageClassification, + PvtV2Model, + PvtV2PreTrainedModel, + ) + from .models.qdqbert import ( + QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + QDQBertForMaskedLM, + QDQBertForMultipleChoice, + QDQBertForNextSentencePrediction, + QDQBertForQuestionAnswering, + QDQBertForSequenceClassification, + QDQBertForTokenClassification, + QDQBertLayer, + QDQBertLMHeadModel, + QDQBertModel, + QDQBertPreTrainedModel, + load_tf_weights_in_qdqbert, + ) + from .models.qwen2 import ( + Qwen2ForCausalLM, + Qwen2ForSequenceClassification, + Qwen2Model, + Qwen2PreTrainedModel, + ) + from .models.rag import ( + RagModel, + RagPreTrainedModel, + RagSequenceForGeneration, + RagTokenForGeneration, + ) + from .models.realm import ( + REALM_PRETRAINED_MODEL_ARCHIVE_LIST, + RealmEmbedder, + RealmForOpenQA, + RealmKnowledgeAugEncoder, + RealmPreTrainedModel, + RealmReader, + RealmRetriever, + RealmScorer, + load_tf_weights_in_realm, + ) + from .models.reformer import ( + REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + ReformerAttention, + ReformerForMaskedLM, + ReformerForQuestionAnswering, + ReformerForSequenceClassification, + ReformerLayer, + ReformerModel, + ReformerModelWithLMHead, + ReformerPreTrainedModel, + ) + from .models.regnet import ( + REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, + RegNetForImageClassification, + RegNetModel, + RegNetPreTrainedModel, + ) + from .models.rembert import ( + REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + RemBertForCausalLM, + RemBertForMaskedLM, + RemBertForMultipleChoice, + RemBertForQuestionAnswering, + RemBertForSequenceClassification, + RemBertForTokenClassification, + RemBertLayer, + RemBertModel, + RemBertPreTrainedModel, + load_tf_weights_in_rembert, + ) + from .models.resnet import ( + RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, + ResNetBackbone, + ResNetForImageClassification, + ResNetModel, + ResNetPreTrainedModel, + ) + from .models.roberta import ( + ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + RobertaForCausalLM, + RobertaForMaskedLM, + RobertaForMultipleChoice, + RobertaForQuestionAnswering, + RobertaForSequenceClassification, + RobertaForTokenClassification, + RobertaModel, + RobertaPreTrainedModel, + ) + from .models.roberta_prelayernorm import ( + ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, + RobertaPreLayerNormForCausalLM, + RobertaPreLayerNormForMaskedLM, + RobertaPreLayerNormForMultipleChoice, + RobertaPreLayerNormForQuestionAnswering, + RobertaPreLayerNormForSequenceClassification, + RobertaPreLayerNormForTokenClassification, + RobertaPreLayerNormModel, + RobertaPreLayerNormPreTrainedModel, + ) + from .models.roc_bert import ( + ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + RoCBertForCausalLM, + RoCBertForMaskedLM, + RoCBertForMultipleChoice, + RoCBertForPreTraining, + RoCBertForQuestionAnswering, + RoCBertForSequenceClassification, + RoCBertForTokenClassification, + RoCBertLayer, + RoCBertModel, + RoCBertPreTrainedModel, + load_tf_weights_in_roc_bert, + ) + from .models.roformer import ( + ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + RoFormerForCausalLM, + RoFormerForMaskedLM, + RoFormerForMultipleChoice, + RoFormerForQuestionAnswering, + RoFormerForSequenceClassification, + RoFormerForTokenClassification, + RoFormerLayer, + RoFormerModel, + RoFormerPreTrainedModel, + load_tf_weights_in_roformer, + ) + from .models.rwkv import ( + RWKV_PRETRAINED_MODEL_ARCHIVE_LIST, + RwkvForCausalLM, + RwkvModel, + RwkvPreTrainedModel, + ) + from .models.sam import ( + SAM_PRETRAINED_MODEL_ARCHIVE_LIST, + SamModel, + SamPreTrainedModel, + ) + + # PyTorch model imports + from .models.seamless_m4t import ( + SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST, + SeamlessM4TCodeHifiGan, + SeamlessM4TForSpeechToSpeech, + SeamlessM4TForSpeechToText, + SeamlessM4TForTextToSpeech, + SeamlessM4TForTextToText, + SeamlessM4THifiGan, + SeamlessM4TModel, + SeamlessM4TPreTrainedModel, + SeamlessM4TTextToUnitForConditionalGeneration, + SeamlessM4TTextToUnitModel, + ) + from .models.seamless_m4t_v2 import ( + SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + SeamlessM4Tv2ForSpeechToSpeech, + SeamlessM4Tv2ForSpeechToText, + SeamlessM4Tv2ForTextToSpeech, + SeamlessM4Tv2ForTextToText, + SeamlessM4Tv2Model, + SeamlessM4Tv2PreTrainedModel, + ) + from .models.segformer import ( + SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + SegformerDecodeHead, + SegformerForImageClassification, + SegformerForSemanticSegmentation, + SegformerLayer, + SegformerModel, + SegformerPreTrainedModel, + ) + from .models.seggpt import ( + SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST, + SegGptForImageSegmentation, + SegGptModel, + SegGptPreTrainedModel, + ) + from .models.sew import ( + SEW_PRETRAINED_MODEL_ARCHIVE_LIST, + SEWForCTC, + SEWForSequenceClassification, + SEWModel, + SEWPreTrainedModel, + ) + from .models.sew_d import ( + SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST, + SEWDForCTC, + SEWDForSequenceClassification, + SEWDModel, + SEWDPreTrainedModel, + ) + from .models.siglip import ( + SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + SiglipForImageClassification, + SiglipModel, + SiglipPreTrainedModel, + SiglipTextModel, + SiglipVisionModel, + ) + from .models.speech_encoder_decoder import SpeechEncoderDecoderModel + from .models.speech_to_text import ( + SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, + Speech2TextForConditionalGeneration, + Speech2TextModel, + Speech2TextPreTrainedModel, + ) + from .models.speech_to_text_2 import ( + Speech2Text2ForCausalLM, + Speech2Text2PreTrainedModel, + ) + from .models.speecht5 import ( + SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, + SpeechT5ForSpeechToSpeech, + SpeechT5ForSpeechToText, + SpeechT5ForTextToSpeech, + SpeechT5HifiGan, + SpeechT5Model, + SpeechT5PreTrainedModel, + ) + from .models.splinter import ( + SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST, + SplinterForPreTraining, + SplinterForQuestionAnswering, + SplinterLayer, + SplinterModel, + SplinterPreTrainedModel, + ) + from .models.squeezebert import ( + SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + SqueezeBertForMaskedLM, + SqueezeBertForMultipleChoice, + SqueezeBertForQuestionAnswering, + SqueezeBertForSequenceClassification, + SqueezeBertForTokenClassification, + SqueezeBertModel, + SqueezeBertModule, + SqueezeBertPreTrainedModel, + ) + from .models.stablelm import ( + StableLmForCausalLM, + StableLmForSequenceClassification, + StableLmModel, + StableLmPreTrainedModel, + ) + from .models.starcoder2 import ( + Starcoder2ForCausalLM, + Starcoder2ForSequenceClassification, + Starcoder2Model, + Starcoder2PreTrainedModel, + ) + from .models.superpoint import ( + SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST, + SuperPointForKeypointDetection, + SuperPointPreTrainedModel, + ) + from .models.swiftformer import ( + SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + SwiftFormerForImageClassification, + SwiftFormerModel, + SwiftFormerPreTrainedModel, + ) + from .models.swin import ( + SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, + SwinBackbone, + SwinForImageClassification, + SwinForMaskedImageModeling, + SwinModel, + SwinPreTrainedModel, + ) + from .models.swin2sr import ( + SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST, + Swin2SRForImageSuperResolution, + Swin2SRModel, + Swin2SRPreTrainedModel, + ) + from .models.swinv2 import ( + SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, + Swinv2Backbone, + Swinv2ForImageClassification, + Swinv2ForMaskedImageModeling, + Swinv2Model, + Swinv2PreTrainedModel, + ) + from .models.switch_transformers import ( + SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST, + SwitchTransformersEncoderModel, + SwitchTransformersForConditionalGeneration, + SwitchTransformersModel, + SwitchTransformersPreTrainedModel, + SwitchTransformersSparseMLP, + SwitchTransformersTop1Router, + ) + from .models.t5 import ( + T5_PRETRAINED_MODEL_ARCHIVE_LIST, + T5EncoderModel, + T5ForConditionalGeneration, + T5ForQuestionAnswering, + T5ForSequenceClassification, + T5ForTokenClassification, + T5Model, + T5PreTrainedModel, + load_tf_weights_in_t5, + ) + from .models.table_transformer import ( + TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TableTransformerForObjectDetection, + TableTransformerModel, + TableTransformerPreTrainedModel, + ) + from .models.tapas import ( + TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, + TapasForMaskedLM, + TapasForQuestionAnswering, + TapasForSequenceClassification, + TapasModel, + TapasPreTrainedModel, + load_tf_weights_in_tapas, + ) + from .models.time_series_transformer import ( + TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TimeSeriesTransformerForPrediction, + TimeSeriesTransformerModel, + TimeSeriesTransformerPreTrainedModel, + ) + from .models.timesformer import ( + TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TimesformerForVideoClassification, + TimesformerModel, + TimesformerPreTrainedModel, + ) + from .models.timm_backbone import TimmBackbone + from .models.trocr import ( + TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, + TrOCRForCausalLM, + TrOCRPreTrainedModel, + ) + from .models.tvlt import ( + TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, + TvltForAudioVisualClassification, + TvltForPreTraining, + TvltModel, + TvltPreTrainedModel, + ) + from .models.tvp import ( + TVP_PRETRAINED_MODEL_ARCHIVE_LIST, + TvpForVideoGrounding, + TvpModel, + TvpPreTrainedModel, + ) + from .models.udop import ( + UDOP_PRETRAINED_MODEL_ARCHIVE_LIST, + UdopEncoderModel, + UdopForConditionalGeneration, + UdopModel, + UdopPreTrainedModel, + ) + from .models.umt5 import ( + UMT5EncoderModel, + UMT5ForConditionalGeneration, + UMT5ForQuestionAnswering, + UMT5ForSequenceClassification, + UMT5ForTokenClassification, + UMT5Model, + UMT5PreTrainedModel, + ) + from .models.unispeech import ( + UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, + UniSpeechForCTC, + UniSpeechForPreTraining, + UniSpeechForSequenceClassification, + UniSpeechModel, + UniSpeechPreTrainedModel, + ) + from .models.unispeech_sat import ( + UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST, + UniSpeechSatForAudioFrameClassification, + UniSpeechSatForCTC, + UniSpeechSatForPreTraining, + UniSpeechSatForSequenceClassification, + UniSpeechSatForXVector, + UniSpeechSatModel, + UniSpeechSatPreTrainedModel, + ) + from .models.univnet import UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST, UnivNetModel + from .models.upernet import ( + UperNetForSemanticSegmentation, + UperNetPreTrainedModel, + ) + from .models.videomae import ( + VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, + VideoMAEForPreTraining, + VideoMAEForVideoClassification, + VideoMAEModel, + VideoMAEPreTrainedModel, + ) + from .models.vilt import ( + VILT_PRETRAINED_MODEL_ARCHIVE_LIST, + ViltForImageAndTextRetrieval, + ViltForImagesAndTextClassification, + ViltForMaskedLM, + ViltForQuestionAnswering, + ViltForTokenClassification, + ViltLayer, + ViltModel, + ViltPreTrainedModel, + ) + from .models.vipllava import ( + VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, + VipLlavaForConditionalGeneration, + VipLlavaPreTrainedModel, + ) + from .models.vision_encoder_decoder import VisionEncoderDecoderModel + from .models.vision_text_dual_encoder import VisionTextDualEncoderModel + from .models.visual_bert import ( + VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + VisualBertForMultipleChoice, + VisualBertForPreTraining, + VisualBertForQuestionAnswering, + VisualBertForRegionToPhraseAlignment, + VisualBertForVisualReasoning, + VisualBertLayer, + VisualBertModel, + VisualBertPreTrainedModel, + ) + from .models.vit import ( + VIT_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTForImageClassification, + ViTForMaskedImageModeling, + ViTModel, + ViTPreTrainedModel, + ) + from .models.vit_hybrid import ( + VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTHybridForImageClassification, + ViTHybridModel, + ViTHybridPreTrainedModel, + ) + from .models.vit_mae import ( + VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTMAEForPreTraining, + ViTMAELayer, + ViTMAEModel, + ViTMAEPreTrainedModel, + ) + from .models.vit_msn import ( + VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTMSNForImageClassification, + ViTMSNModel, + ViTMSNPreTrainedModel, + ) + from .models.vitdet import ( + VITDET_PRETRAINED_MODEL_ARCHIVE_LIST, + VitDetBackbone, + VitDetModel, + VitDetPreTrainedModel, + ) + from .models.vitmatte import ( + VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST, + VitMatteForImageMatting, + VitMattePreTrainedModel, + ) + from .models.vits import ( + VITS_PRETRAINED_MODEL_ARCHIVE_LIST, + VitsModel, + VitsPreTrainedModel, + ) + from .models.vivit import ( + VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + VivitForVideoClassification, + VivitModel, + VivitPreTrainedModel, + ) + from .models.wav2vec2 import ( + WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, + Wav2Vec2ForAudioFrameClassification, + Wav2Vec2ForCTC, + Wav2Vec2ForMaskedLM, + Wav2Vec2ForPreTraining, + Wav2Vec2ForSequenceClassification, + Wav2Vec2ForXVector, + Wav2Vec2Model, + Wav2Vec2PreTrainedModel, + ) + from .models.wav2vec2_bert import ( + WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + Wav2Vec2BertForAudioFrameClassification, + Wav2Vec2BertForCTC, + Wav2Vec2BertForSequenceClassification, + Wav2Vec2BertForXVector, + Wav2Vec2BertModel, + Wav2Vec2BertPreTrainedModel, + ) + from .models.wav2vec2_conformer import ( + WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + Wav2Vec2ConformerForAudioFrameClassification, + Wav2Vec2ConformerForCTC, + Wav2Vec2ConformerForPreTraining, + Wav2Vec2ConformerForSequenceClassification, + Wav2Vec2ConformerForXVector, + Wav2Vec2ConformerModel, + Wav2Vec2ConformerPreTrainedModel, + ) + from .models.wavlm import ( + WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, + WavLMForAudioFrameClassification, + WavLMForCTC, + WavLMForSequenceClassification, + WavLMForXVector, + WavLMModel, + WavLMPreTrainedModel, + ) + from .models.whisper import ( + WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, + WhisperForAudioClassification, + WhisperForCausalLM, + WhisperForConditionalGeneration, + WhisperModel, + WhisperPreTrainedModel, + ) + from .models.x_clip import ( + XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + XCLIPModel, + XCLIPPreTrainedModel, + XCLIPTextModel, + XCLIPVisionModel, + ) + from .models.xglm import ( + XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, + XGLMForCausalLM, + XGLMModel, + XGLMPreTrainedModel, + ) + from .models.xlm import ( + XLM_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMForMultipleChoice, + XLMForQuestionAnswering, + XLMForQuestionAnsweringSimple, + XLMForSequenceClassification, + XLMForTokenClassification, + XLMModel, + XLMPreTrainedModel, + XLMWithLMHeadModel, + ) + from .models.xlm_prophetnet import ( + XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMProphetNetDecoder, + XLMProphetNetEncoder, + XLMProphetNetForCausalLM, + XLMProphetNetForConditionalGeneration, + XLMProphetNetModel, + XLMProphetNetPreTrainedModel, + ) + from .models.xlm_roberta import ( + XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMRobertaForCausalLM, + XLMRobertaForMaskedLM, + XLMRobertaForMultipleChoice, + XLMRobertaForQuestionAnswering, + XLMRobertaForSequenceClassification, + XLMRobertaForTokenClassification, + XLMRobertaModel, + XLMRobertaPreTrainedModel, + ) + from .models.xlm_roberta_xl import ( + XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMRobertaXLForCausalLM, + XLMRobertaXLForMaskedLM, + XLMRobertaXLForMultipleChoice, + XLMRobertaXLForQuestionAnswering, + XLMRobertaXLForSequenceClassification, + XLMRobertaXLForTokenClassification, + XLMRobertaXLModel, + XLMRobertaXLPreTrainedModel, + ) + from .models.xlnet import ( + XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, + XLNetForMultipleChoice, + XLNetForQuestionAnswering, + XLNetForQuestionAnsweringSimple, + XLNetForSequenceClassification, + XLNetForTokenClassification, + XLNetLMHeadModel, + XLNetModel, + XLNetPreTrainedModel, + load_tf_weights_in_xlnet, + ) + from .models.xmod import ( + XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, + XmodForCausalLM, + XmodForMaskedLM, + XmodForMultipleChoice, + XmodForQuestionAnswering, + XmodForSequenceClassification, + XmodForTokenClassification, + XmodModel, + XmodPreTrainedModel, + ) + from .models.yolos import ( + YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, + YolosForObjectDetection, + YolosModel, + YolosPreTrainedModel, + ) + from .models.yoso import ( + YOSO_PRETRAINED_MODEL_ARCHIVE_LIST, + YosoForMaskedLM, + YosoForMultipleChoice, + YosoForQuestionAnswering, + YosoForSequenceClassification, + YosoForTokenClassification, + YosoLayer, + YosoModel, + YosoPreTrainedModel, + ) + + # Optimization + from .optimization import ( + Adafactor, + AdamW, + get_constant_schedule, + get_constant_schedule_with_warmup, + get_cosine_schedule_with_warmup, + get_cosine_with_hard_restarts_schedule_with_warmup, + get_inverse_sqrt_schedule, + get_linear_schedule_with_warmup, + get_polynomial_decay_schedule_with_warmup, + get_scheduler, + ) + from .pytorch_utils import Conv1D, apply_chunking_to_forward, prune_layer + + # Trainer + from .trainer import Trainer + from .trainer_pt_utils import torch_distributed_zero_first + from .trainer_seq2seq import Seq2SeqTrainer + + # TensorFlow + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + # Import the same objects as dummies to get them in the namespace. + # They will raise an import error if the user tries to instantiate / use them. + from .utils.dummy_tf_objects import * + else: + from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments + + # Benchmarks + from .benchmark.benchmark_tf import TensorFlowBenchmark + from .generation import ( + TFForcedBOSTokenLogitsProcessor, + TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, + TFGenerationMixin, + TFLogitsProcessor, + TFLogitsProcessorList, + TFLogitsWarper, + TFMinLengthLogitsProcessor, + TFNoBadWordsLogitsProcessor, + TFNoRepeatNGramLogitsProcessor, + TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, + TFTemperatureLogitsWarper, + TFTopKLogitsWarper, + TFTopPLogitsWarper, + ) + from .keras_callbacks import KerasMetricCallback, PushToHubCallback + from .modeling_tf_utils import ( + TFPreTrainedModel, + TFSequenceSummary, + TFSharedEmbeddings, + shape_list, + ) + + # TensorFlow model imports + from .models.albert import ( + TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFAlbertForMaskedLM, + TFAlbertForMultipleChoice, + TFAlbertForPreTraining, + TFAlbertForQuestionAnswering, + TFAlbertForSequenceClassification, + TFAlbertForTokenClassification, + TFAlbertMainLayer, + TFAlbertModel, + TFAlbertPreTrainedModel, + ) + from .models.auto import ( + TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_MASK_GENERATION_MAPPING, + TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, + TF_MODEL_FOR_MASKED_LM_MAPPING, + TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + TF_MODEL_FOR_PRETRAINING_MAPPING, + TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_TEXT_ENCODING_MAPPING, + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, + TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_MAPPING, + TF_MODEL_WITH_LM_HEAD_MAPPING, + TFAutoModel, + TFAutoModelForAudioClassification, + TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, + TFAutoModelForImageClassification, + TFAutoModelForMaskedImageModeling, + TFAutoModelForMaskedLM, + TFAutoModelForMaskGeneration, + TFAutoModelForMultipleChoice, + TFAutoModelForNextSentencePrediction, + TFAutoModelForPreTraining, + TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForSpeechSeq2Seq, + TFAutoModelForTableQuestionAnswering, + TFAutoModelForTextEncoding, + TFAutoModelForTokenClassification, + TFAutoModelForVision2Seq, + TFAutoModelForZeroShotImageClassification, + TFAutoModelWithLMHead, + ) + from .models.bart import ( + TFBartForConditionalGeneration, + TFBartForSequenceClassification, + TFBartModel, + TFBartPretrainedModel, + ) + from .models.bert import ( + TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFBertEmbeddings, + TFBertForMaskedLM, + TFBertForMultipleChoice, + TFBertForNextSentencePrediction, + TFBertForPreTraining, + TFBertForQuestionAnswering, + TFBertForSequenceClassification, + TFBertForTokenClassification, + TFBertLMHeadModel, + TFBertMainLayer, + TFBertModel, + TFBertPreTrainedModel, + ) + from .models.blenderbot import ( + TFBlenderbotForConditionalGeneration, + TFBlenderbotModel, + TFBlenderbotPreTrainedModel, + ) + from .models.blenderbot_small import ( + TFBlenderbotSmallForConditionalGeneration, + TFBlenderbotSmallModel, + TFBlenderbotSmallPreTrainedModel, + ) + from .models.blip import ( + TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + TFBlipForConditionalGeneration, + TFBlipForImageTextRetrieval, + TFBlipForQuestionAnswering, + TFBlipModel, + TFBlipPreTrainedModel, + TFBlipTextModel, + TFBlipVisionModel, + ) + from .models.camembert import ( + TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCamembertForCausalLM, + TFCamembertForMaskedLM, + TFCamembertForMultipleChoice, + TFCamembertForQuestionAnswering, + TFCamembertForSequenceClassification, + TFCamembertForTokenClassification, + TFCamembertModel, + TFCamembertPreTrainedModel, + ) + from .models.clip import ( + TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCLIPModel, + TFCLIPPreTrainedModel, + TFCLIPTextModel, + TFCLIPVisionModel, + ) + from .models.convbert import ( + TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFConvBertForMaskedLM, + TFConvBertForMultipleChoice, + TFConvBertForQuestionAnswering, + TFConvBertForSequenceClassification, + TFConvBertForTokenClassification, + TFConvBertLayer, + TFConvBertModel, + TFConvBertPreTrainedModel, + ) + from .models.convnext import ( + TFConvNextForImageClassification, + TFConvNextModel, + TFConvNextPreTrainedModel, + ) + from .models.convnextv2 import ( + TFConvNextV2ForImageClassification, + TFConvNextV2Model, + TFConvNextV2PreTrainedModel, + ) + from .models.ctrl import ( + TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCTRLForSequenceClassification, + TFCTRLLMHeadModel, + TFCTRLModel, + TFCTRLPreTrainedModel, + ) + from .models.cvt import ( + TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCvtForImageClassification, + TFCvtModel, + TFCvtPreTrainedModel, + ) + from .models.data2vec import ( + TFData2VecVisionForImageClassification, + TFData2VecVisionForSemanticSegmentation, + TFData2VecVisionModel, + TFData2VecVisionPreTrainedModel, + ) + from .models.deberta import ( + TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDebertaForMaskedLM, + TFDebertaForQuestionAnswering, + TFDebertaForSequenceClassification, + TFDebertaForTokenClassification, + TFDebertaModel, + TFDebertaPreTrainedModel, + ) + from .models.deberta_v2 import ( + TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDebertaV2ForMaskedLM, + TFDebertaV2ForMultipleChoice, + TFDebertaV2ForQuestionAnswering, + TFDebertaV2ForSequenceClassification, + TFDebertaV2ForTokenClassification, + TFDebertaV2Model, + TFDebertaV2PreTrainedModel, + ) + from .models.deit import ( + TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDeiTForImageClassification, + TFDeiTForImageClassificationWithTeacher, + TFDeiTForMaskedImageModeling, + TFDeiTModel, + TFDeiTPreTrainedModel, + ) + from .models.deprecated.transfo_xl import ( + TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFAdaptiveEmbedding, + TFTransfoXLForSequenceClassification, + TFTransfoXLLMHeadModel, + TFTransfoXLMainLayer, + TFTransfoXLModel, + TFTransfoXLPreTrainedModel, + ) + from .models.distilbert import ( + TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDistilBertForMaskedLM, + TFDistilBertForMultipleChoice, + TFDistilBertForQuestionAnswering, + TFDistilBertForSequenceClassification, + TFDistilBertForTokenClassification, + TFDistilBertMainLayer, + TFDistilBertModel, + TFDistilBertPreTrainedModel, + ) + from .models.dpr import ( + TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDPRContextEncoder, + TFDPRPretrainedContextEncoder, + TFDPRPretrainedQuestionEncoder, + TFDPRPretrainedReader, + TFDPRQuestionEncoder, + TFDPRReader, + ) + from .models.efficientformer import ( + TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFEfficientFormerForImageClassification, + TFEfficientFormerForImageClassificationWithTeacher, + TFEfficientFormerModel, + TFEfficientFormerPreTrainedModel, + ) + from .models.electra import ( + TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFElectraForMaskedLM, + TFElectraForMultipleChoice, + TFElectraForPreTraining, + TFElectraForQuestionAnswering, + TFElectraForSequenceClassification, + TFElectraForTokenClassification, + TFElectraModel, + TFElectraPreTrainedModel, + ) + from .models.encoder_decoder import TFEncoderDecoderModel + from .models.esm import ( + ESM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFEsmForMaskedLM, + TFEsmForSequenceClassification, + TFEsmForTokenClassification, + TFEsmModel, + TFEsmPreTrainedModel, + ) + from .models.flaubert import ( + TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFFlaubertForMultipleChoice, + TFFlaubertForQuestionAnsweringSimple, + TFFlaubertForSequenceClassification, + TFFlaubertForTokenClassification, + TFFlaubertModel, + TFFlaubertPreTrainedModel, + TFFlaubertWithLMHeadModel, + ) + from .models.funnel import ( + TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFFunnelBaseModel, + TFFunnelForMaskedLM, + TFFunnelForMultipleChoice, + TFFunnelForPreTraining, + TFFunnelForQuestionAnswering, + TFFunnelForSequenceClassification, + TFFunnelForTokenClassification, + TFFunnelModel, + TFFunnelPreTrainedModel, + ) + from .models.gpt2 import ( + TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, + TFGPT2DoubleHeadsModel, + TFGPT2ForSequenceClassification, + TFGPT2LMHeadModel, + TFGPT2MainLayer, + TFGPT2Model, + TFGPT2PreTrainedModel, + ) + from .models.gptj import ( + TFGPTJForCausalLM, + TFGPTJForQuestionAnswering, + TFGPTJForSequenceClassification, + TFGPTJModel, + TFGPTJPreTrainedModel, + ) + from .models.groupvit import ( + TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFGroupViTModel, + TFGroupViTPreTrainedModel, + TFGroupViTTextModel, + TFGroupViTVisionModel, + ) + from .models.hubert import ( + TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFHubertForCTC, + TFHubertModel, + TFHubertPreTrainedModel, + ) + from .models.layoutlm import ( + TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLayoutLMForMaskedLM, + TFLayoutLMForQuestionAnswering, + TFLayoutLMForSequenceClassification, + TFLayoutLMForTokenClassification, + TFLayoutLMMainLayer, + TFLayoutLMModel, + TFLayoutLMPreTrainedModel, + ) + from .models.layoutlmv3 import ( + TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLayoutLMv3ForQuestionAnswering, + TFLayoutLMv3ForSequenceClassification, + TFLayoutLMv3ForTokenClassification, + TFLayoutLMv3Model, + TFLayoutLMv3PreTrainedModel, + ) + from .models.led import ( + TFLEDForConditionalGeneration, + TFLEDModel, + TFLEDPreTrainedModel, + ) + from .models.longformer import ( + TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLongformerForMaskedLM, + TFLongformerForMultipleChoice, + TFLongformerForQuestionAnswering, + TFLongformerForSequenceClassification, + TFLongformerForTokenClassification, + TFLongformerModel, + TFLongformerPreTrainedModel, + TFLongformerSelfAttention, + ) + from .models.lxmert import ( + TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLxmertForPreTraining, + TFLxmertMainLayer, + TFLxmertModel, + TFLxmertPreTrainedModel, + TFLxmertVisualFeatureEncoder, + ) + from .models.marian import ( + TFMarianModel, + TFMarianMTModel, + TFMarianPreTrainedModel, + ) + from .models.mbart import ( + TFMBartForConditionalGeneration, + TFMBartModel, + TFMBartPreTrainedModel, + ) + from .models.mobilebert import ( + TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFMobileBertForMaskedLM, + TFMobileBertForMultipleChoice, + TFMobileBertForNextSentencePrediction, + TFMobileBertForPreTraining, + TFMobileBertForQuestionAnswering, + TFMobileBertForSequenceClassification, + TFMobileBertForTokenClassification, + TFMobileBertMainLayer, + TFMobileBertModel, + TFMobileBertPreTrainedModel, + ) + from .models.mobilevit import ( + TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFMobileViTForImageClassification, + TFMobileViTForSemanticSegmentation, + TFMobileViTModel, + TFMobileViTPreTrainedModel, + ) + from .models.mpnet import ( + TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, + TFMPNetForMaskedLM, + TFMPNetForMultipleChoice, + TFMPNetForQuestionAnswering, + TFMPNetForSequenceClassification, + TFMPNetForTokenClassification, + TFMPNetMainLayer, + TFMPNetModel, + TFMPNetPreTrainedModel, + ) + from .models.mt5 import ( + TFMT5EncoderModel, + TFMT5ForConditionalGeneration, + TFMT5Model, + ) + from .models.openai import ( + TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFOpenAIGPTDoubleHeadsModel, + TFOpenAIGPTForSequenceClassification, + TFOpenAIGPTLMHeadModel, + TFOpenAIGPTMainLayer, + TFOpenAIGPTModel, + TFOpenAIGPTPreTrainedModel, + ) + from .models.opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel + from .models.pegasus import ( + TFPegasusForConditionalGeneration, + TFPegasusModel, + TFPegasusPreTrainedModel, + ) + from .models.rag import ( + TFRagModel, + TFRagPreTrainedModel, + TFRagSequenceForGeneration, + TFRagTokenForGeneration, + ) + from .models.regnet import ( + TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRegNetForImageClassification, + TFRegNetModel, + TFRegNetPreTrainedModel, + ) + from .models.rembert import ( + TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRemBertForCausalLM, + TFRemBertForMaskedLM, + TFRemBertForMultipleChoice, + TFRemBertForQuestionAnswering, + TFRemBertForSequenceClassification, + TFRemBertForTokenClassification, + TFRemBertLayer, + TFRemBertModel, + TFRemBertPreTrainedModel, + ) + from .models.resnet import ( + TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, + TFResNetForImageClassification, + TFResNetModel, + TFResNetPreTrainedModel, + ) + from .models.roberta import ( + TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRobertaForCausalLM, + TFRobertaForMaskedLM, + TFRobertaForMultipleChoice, + TFRobertaForQuestionAnswering, + TFRobertaForSequenceClassification, + TFRobertaForTokenClassification, + TFRobertaMainLayer, + TFRobertaModel, + TFRobertaPreTrainedModel, + ) + from .models.roberta_prelayernorm import ( + TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRobertaPreLayerNormForCausalLM, + TFRobertaPreLayerNormForMaskedLM, + TFRobertaPreLayerNormForMultipleChoice, + TFRobertaPreLayerNormForQuestionAnswering, + TFRobertaPreLayerNormForSequenceClassification, + TFRobertaPreLayerNormForTokenClassification, + TFRobertaPreLayerNormMainLayer, + TFRobertaPreLayerNormModel, + TFRobertaPreLayerNormPreTrainedModel, + ) + from .models.roformer import ( + TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRoFormerForCausalLM, + TFRoFormerForMaskedLM, + TFRoFormerForMultipleChoice, + TFRoFormerForQuestionAnswering, + TFRoFormerForSequenceClassification, + TFRoFormerForTokenClassification, + TFRoFormerLayer, + TFRoFormerModel, + TFRoFormerPreTrainedModel, + ) + from .models.sam import ( + TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFSamModel, + TFSamPreTrainedModel, + ) + from .models.segformer import ( + TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFSegformerDecodeHead, + TFSegformerForImageClassification, + TFSegformerForSemanticSegmentation, + TFSegformerModel, + TFSegformerPreTrainedModel, + ) + from .models.speech_to_text import ( + TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFSpeech2TextForConditionalGeneration, + TFSpeech2TextModel, + TFSpeech2TextPreTrainedModel, + ) + from .models.swin import ( + TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, + TFSwinForImageClassification, + TFSwinForMaskedImageModeling, + TFSwinModel, + TFSwinPreTrainedModel, + ) + from .models.t5 import ( + TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, + TFT5EncoderModel, + TFT5ForConditionalGeneration, + TFT5Model, + TFT5PreTrainedModel, + ) + from .models.tapas import ( + TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, + TFTapasForMaskedLM, + TFTapasForQuestionAnswering, + TFTapasForSequenceClassification, + TFTapasModel, + TFTapasPreTrainedModel, + ) + from .models.vision_encoder_decoder import TFVisionEncoderDecoderModel + from .models.vision_text_dual_encoder import TFVisionTextDualEncoderModel + from .models.vit import ( + TFViTForImageClassification, + TFViTModel, + TFViTPreTrainedModel, + ) + from .models.vit_mae import ( + TFViTMAEForPreTraining, + TFViTMAEModel, + TFViTMAEPreTrainedModel, + ) + from .models.wav2vec2 import ( + TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, + TFWav2Vec2ForCTC, + TFWav2Vec2ForSequenceClassification, + TFWav2Vec2Model, + TFWav2Vec2PreTrainedModel, + ) + from .models.whisper import ( + TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFWhisperForConditionalGeneration, + TFWhisperModel, + TFWhisperPreTrainedModel, + ) + from .models.xglm import ( + TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXGLMForCausalLM, + TFXGLMModel, + TFXGLMPreTrainedModel, + ) + from .models.xlm import ( + TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXLMForMultipleChoice, + TFXLMForQuestionAnsweringSimple, + TFXLMForSequenceClassification, + TFXLMForTokenClassification, + TFXLMMainLayer, + TFXLMModel, + TFXLMPreTrainedModel, + TFXLMWithLMHeadModel, + ) + from .models.xlm_roberta import ( + TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXLMRobertaForCausalLM, + TFXLMRobertaForMaskedLM, + TFXLMRobertaForMultipleChoice, + TFXLMRobertaForQuestionAnswering, + TFXLMRobertaForSequenceClassification, + TFXLMRobertaForTokenClassification, + TFXLMRobertaModel, + TFXLMRobertaPreTrainedModel, + ) + from .models.xlnet import ( + TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXLNetForMultipleChoice, + TFXLNetForQuestionAnsweringSimple, + TFXLNetForSequenceClassification, + TFXLNetForTokenClassification, + TFXLNetLMHeadModel, + TFXLNetMainLayer, + TFXLNetModel, + TFXLNetPreTrainedModel, + ) + + # Optimization + from .optimization_tf import ( + AdamWeightDecay, + GradientAccumulator, + WarmUp, + create_optimizer, + ) + + try: + if not ( + is_librosa_available() + and is_essentia_available() + and is_scipy_available() + and is_torch_available() + and is_pretty_midi_available() + ): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects import * + else: + from .models.pop2piano import ( + Pop2PianoFeatureExtractor, + Pop2PianoProcessor, + Pop2PianoTokenizer, + ) + + try: + if not is_torchaudio_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torchaudio_objects import * + else: + from .models.musicgen_melody import MusicgenMelodyFeatureExtractor, MusicgenMelodyProcessor + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + # Import the same objects as dummies to get them in the namespace. + # They will raise an import error if the user tries to instantiate / use them. + from .utils.dummy_flax_objects import * + else: + from .generation import ( + FlaxForcedBOSTokenLogitsProcessor, + FlaxForcedEOSTokenLogitsProcessor, + FlaxForceTokensLogitsProcessor, + FlaxGenerationMixin, + FlaxLogitsProcessor, + FlaxLogitsProcessorList, + FlaxLogitsWarper, + FlaxMinLengthLogitsProcessor, + FlaxSuppressTokensAtBeginLogitsProcessor, + FlaxSuppressTokensLogitsProcessor, + FlaxTemperatureLogitsWarper, + FlaxTopKLogitsWarper, + FlaxTopPLogitsWarper, + FlaxWhisperTimeStampLogitsProcessor, + ) + from .modeling_flax_utils import FlaxPreTrainedModel + + # Flax model imports + from .models.albert import ( + FlaxAlbertForMaskedLM, + FlaxAlbertForMultipleChoice, + FlaxAlbertForPreTraining, + FlaxAlbertForQuestionAnswering, + FlaxAlbertForSequenceClassification, + FlaxAlbertForTokenClassification, + FlaxAlbertModel, + FlaxAlbertPreTrainedModel, + ) + from .models.auto import ( + FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_MASKED_LM_MAPPING, + FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + FLAX_MODEL_FOR_PRETRAINING_MAPPING, + FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, + FLAX_MODEL_MAPPING, + FlaxAutoModel, + FlaxAutoModelForCausalLM, + FlaxAutoModelForImageClassification, + FlaxAutoModelForMaskedLM, + FlaxAutoModelForMultipleChoice, + FlaxAutoModelForNextSentencePrediction, + FlaxAutoModelForPreTraining, + FlaxAutoModelForQuestionAnswering, + FlaxAutoModelForSeq2SeqLM, + FlaxAutoModelForSequenceClassification, + FlaxAutoModelForSpeechSeq2Seq, + FlaxAutoModelForTokenClassification, + FlaxAutoModelForVision2Seq, + ) + from .models.bart import ( + FlaxBartDecoderPreTrainedModel, + FlaxBartForCausalLM, + FlaxBartForConditionalGeneration, + FlaxBartForQuestionAnswering, + FlaxBartForSequenceClassification, + FlaxBartModel, + FlaxBartPreTrainedModel, + ) + from .models.beit import ( + FlaxBeitForImageClassification, + FlaxBeitForMaskedImageModeling, + FlaxBeitModel, + FlaxBeitPreTrainedModel, + ) + from .models.bert import ( + FlaxBertForCausalLM, + FlaxBertForMaskedLM, + FlaxBertForMultipleChoice, + FlaxBertForNextSentencePrediction, + FlaxBertForPreTraining, + FlaxBertForQuestionAnswering, + FlaxBertForSequenceClassification, + FlaxBertForTokenClassification, + FlaxBertModel, + FlaxBertPreTrainedModel, + ) + from .models.big_bird import ( + FlaxBigBirdForCausalLM, + FlaxBigBirdForMaskedLM, + FlaxBigBirdForMultipleChoice, + FlaxBigBirdForPreTraining, + FlaxBigBirdForQuestionAnswering, + FlaxBigBirdForSequenceClassification, + FlaxBigBirdForTokenClassification, + FlaxBigBirdModel, + FlaxBigBirdPreTrainedModel, + ) + from .models.blenderbot import ( + FlaxBlenderbotForConditionalGeneration, + FlaxBlenderbotModel, + FlaxBlenderbotPreTrainedModel, + ) + from .models.blenderbot_small import ( + FlaxBlenderbotSmallForConditionalGeneration, + FlaxBlenderbotSmallModel, + FlaxBlenderbotSmallPreTrainedModel, + ) + from .models.bloom import ( + FlaxBloomForCausalLM, + FlaxBloomModel, + FlaxBloomPreTrainedModel, + ) + from .models.clip import ( + FlaxCLIPModel, + FlaxCLIPPreTrainedModel, + FlaxCLIPTextModel, + FlaxCLIPTextModelWithProjection, + FlaxCLIPTextPreTrainedModel, + FlaxCLIPVisionModel, + FlaxCLIPVisionPreTrainedModel, + ) + from .models.distilbert import ( + FlaxDistilBertForMaskedLM, + FlaxDistilBertForMultipleChoice, + FlaxDistilBertForQuestionAnswering, + FlaxDistilBertForSequenceClassification, + FlaxDistilBertForTokenClassification, + FlaxDistilBertModel, + FlaxDistilBertPreTrainedModel, + ) + from .models.electra import ( + FlaxElectraForCausalLM, + FlaxElectraForMaskedLM, + FlaxElectraForMultipleChoice, + FlaxElectraForPreTraining, + FlaxElectraForQuestionAnswering, + FlaxElectraForSequenceClassification, + FlaxElectraForTokenClassification, + FlaxElectraModel, + FlaxElectraPreTrainedModel, + ) + from .models.encoder_decoder import FlaxEncoderDecoderModel + from .models.gemma import ( + FlaxGemmaForCausalLM, + FlaxGemmaModel, + FlaxGemmaPreTrainedModel, + ) + from .models.gpt2 import ( + FlaxGPT2LMHeadModel, + FlaxGPT2Model, + FlaxGPT2PreTrainedModel, + ) + from .models.gpt_neo import ( + FlaxGPTNeoForCausalLM, + FlaxGPTNeoModel, + FlaxGPTNeoPreTrainedModel, + ) + from .models.gptj import ( + FlaxGPTJForCausalLM, + FlaxGPTJModel, + FlaxGPTJPreTrainedModel, + ) + from .models.llama import ( + FlaxLlamaForCausalLM, + FlaxLlamaModel, + FlaxLlamaPreTrainedModel, + ) + from .models.longt5 import ( + FlaxLongT5ForConditionalGeneration, + FlaxLongT5Model, + FlaxLongT5PreTrainedModel, + ) + from .models.marian import ( + FlaxMarianModel, + FlaxMarianMTModel, + FlaxMarianPreTrainedModel, + ) + from .models.mbart import ( + FlaxMBartForConditionalGeneration, + FlaxMBartForQuestionAnswering, + FlaxMBartForSequenceClassification, + FlaxMBartModel, + FlaxMBartPreTrainedModel, + ) + from .models.mistral import ( + FlaxMistralForCausalLM, + FlaxMistralModel, + FlaxMistralPreTrainedModel, + ) + from .models.mt5 import ( + FlaxMT5EncoderModel, + FlaxMT5ForConditionalGeneration, + FlaxMT5Model, + ) + from .models.opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel + from .models.pegasus import ( + FlaxPegasusForConditionalGeneration, + FlaxPegasusModel, + FlaxPegasusPreTrainedModel, + ) + from .models.regnet import ( + FlaxRegNetForImageClassification, + FlaxRegNetModel, + FlaxRegNetPreTrainedModel, + ) + from .models.resnet import ( + FlaxResNetForImageClassification, + FlaxResNetModel, + FlaxResNetPreTrainedModel, + ) + from .models.roberta import ( + FlaxRobertaForCausalLM, + FlaxRobertaForMaskedLM, + FlaxRobertaForMultipleChoice, + FlaxRobertaForQuestionAnswering, + FlaxRobertaForSequenceClassification, + FlaxRobertaForTokenClassification, + FlaxRobertaModel, + FlaxRobertaPreTrainedModel, + ) + from .models.roberta_prelayernorm import ( + FlaxRobertaPreLayerNormForCausalLM, + FlaxRobertaPreLayerNormForMaskedLM, + FlaxRobertaPreLayerNormForMultipleChoice, + FlaxRobertaPreLayerNormForQuestionAnswering, + FlaxRobertaPreLayerNormForSequenceClassification, + FlaxRobertaPreLayerNormForTokenClassification, + FlaxRobertaPreLayerNormModel, + FlaxRobertaPreLayerNormPreTrainedModel, + ) + from .models.roformer import ( + FlaxRoFormerForMaskedLM, + FlaxRoFormerForMultipleChoice, + FlaxRoFormerForQuestionAnswering, + FlaxRoFormerForSequenceClassification, + FlaxRoFormerForTokenClassification, + FlaxRoFormerModel, + FlaxRoFormerPreTrainedModel, + ) + from .models.speech_encoder_decoder import FlaxSpeechEncoderDecoderModel + from .models.t5 import ( + FlaxT5EncoderModel, + FlaxT5ForConditionalGeneration, + FlaxT5Model, + FlaxT5PreTrainedModel, + ) + from .models.vision_encoder_decoder import FlaxVisionEncoderDecoderModel + from .models.vision_text_dual_encoder import FlaxVisionTextDualEncoderModel + from .models.vit import ( + FlaxViTForImageClassification, + FlaxViTModel, + FlaxViTPreTrainedModel, + ) + from .models.wav2vec2 import ( + FlaxWav2Vec2ForCTC, + FlaxWav2Vec2ForPreTraining, + FlaxWav2Vec2Model, + FlaxWav2Vec2PreTrainedModel, + ) + from .models.whisper import ( + FlaxWhisperForAudioClassification, + FlaxWhisperForConditionalGeneration, + FlaxWhisperModel, + FlaxWhisperPreTrainedModel, + ) + from .models.xglm import ( + FlaxXGLMForCausalLM, + FlaxXGLMModel, + FlaxXGLMPreTrainedModel, + ) + from .models.xlm_roberta import ( + FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + FlaxXLMRobertaForCausalLM, + FlaxXLMRobertaForMaskedLM, + FlaxXLMRobertaForMultipleChoice, + FlaxXLMRobertaForQuestionAnswering, + FlaxXLMRobertaForSequenceClassification, + FlaxXLMRobertaForTokenClassification, + FlaxXLMRobertaModel, + FlaxXLMRobertaPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + extra_objects={"__version__": __version__}, + ) + + +if not is_tf_available() and not is_torch_available() and not is_flax_available(): + logger.warning_advice( + "None of PyTorch, TensorFlow >= 2.0, or Flax have been found. " + "Models won't be available and only tokenizers, configuration " + "and file/data utilities can be used." + ) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py b/env-llmeval/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..e3270bb9debe50a10847d0291d0dbf360badd532 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py @@ -0,0 +1,551 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from argparse import ArgumentParser +from os import listdir, makedirs +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +from packaging.version import Version, parse + +from transformers.pipelines import Pipeline, pipeline +from transformers.tokenization_utils import BatchEncoding +from transformers.utils import ModelOutput, is_tf_available, is_torch_available + + +# This is the minimal required version to +# support some ONNX Runtime features +ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") + + +SUPPORTED_PIPELINES = [ + "feature-extraction", + "ner", + "sentiment-analysis", + "fill-mask", + "question-answering", + "text-generation", + "translation_en_to_fr", + "translation_en_to_de", + "translation_en_to_ro", +] + + +class OnnxConverterArgumentParser(ArgumentParser): + """ + Wraps all the script arguments supported to export transformers models to ONNX IR + """ + + def __init__(self): + super().__init__("ONNX Converter") + + self.add_argument( + "--pipeline", + type=str, + choices=SUPPORTED_PIPELINES, + default="feature-extraction", + ) + self.add_argument( + "--model", + type=str, + required=True, + help="Model's id or path (ex: google-bert/bert-base-cased)", + ) + self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: google-bert/bert-base-cased)") + self.add_argument( + "--framework", + type=str, + choices=["pt", "tf"], + help="Framework for loading the model", + ) + self.add_argument("--opset", type=int, default=11, help="ONNX opset to use") + self.add_argument( + "--check-loading", + action="store_true", + help="Check ONNX is able to load the model", + ) + self.add_argument( + "--use-external-format", + action="store_true", + help="Allow exporting model >= than 2Gb", + ) + self.add_argument( + "--quantize", + action="store_true", + help="Quantize the neural network to be run with int8", + ) + self.add_argument("output") + + +def generate_identified_filename(filename: Path, identifier: str) -> Path: + """ + Append a string-identifier at the end (before the extension, if any) to the provided filepath + + Args: + filename: pathlib.Path The actual path object we would like to add an identifier suffix + identifier: The suffix to add + + Returns: String with concatenated identifier at the end of the filename + """ + return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix) + + +def check_onnxruntime_requirements(minimum_version: Version): + """ + Check onnxruntime is installed and if the installed version match is recent enough + + Raises: + ImportError: If onnxruntime is not installed or too old version is found + """ + try: + import onnxruntime + + # Parse the version of the installed onnxruntime + ort_version = parse(onnxruntime.__version__) + + # We require 1.4.0 minimum + if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: + raise ImportError( + f"We found an older version of onnxruntime ({onnxruntime.__version__}) " + f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" + "Please update onnxruntime by running `pip install --upgrade onnxruntime`" + ) + + except ImportError: + raise ImportError( + "onnxruntime doesn't seem to be currently installed. " + "Please install the onnxruntime by running `pip install onnxruntime`" + " and relaunch the conversion." + ) + + +def ensure_valid_input(model, tokens, input_names): + """ + Ensure inputs are presented in the correct order, without any Non + + Args: + model: The model used to forward the input data + tokens: BatchEncoding holding the input data + input_names: The name of the inputs + + Returns: Tuple + + """ + print("Ensuring inputs are in correct order") + + model_args_name = model.forward.__code__.co_varnames + model_args, ordered_input_names = [], [] + for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument + if arg_name in input_names: + ordered_input_names.append(arg_name) + model_args.append(tokens[arg_name]) + else: + print(f"{arg_name} is not present in the generated input list.") + break + + print(f"Generated inputs order: {ordered_input_names}") + return ordered_input_names, tuple(model_args) + + +def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]: + """ + Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model + + Args: + nlp: The pipeline object holding the model to be exported + framework: The framework identifier to dispatch to the correct inference scheme (pt/tf) + + Returns: + + - List of the inferred input variable names + - List of the inferred output variable names + - Dictionary with input/output variables names as key and shape tensor as value + - a BatchEncoding reference which was used to infer all the above information + """ + + def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int): + if isinstance(tensor, (tuple, list)): + return [build_shape_dict(name, t, is_input, seq_len) for t in tensor] + + else: + # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...) + axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"} + if is_input: + if len(tensor.shape) == 2: + axes[1] = "sequence" + else: + raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})") + else: + seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len] + axes.update({dim: "sequence" for dim in seq_axes}) + + print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}") + return axes + + tokens = nlp.tokenizer("This is a sample output", return_tensors=framework) + seq_len = tokens.input_ids.shape[-1] + outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens) + if isinstance(outputs, ModelOutput): + outputs = outputs.to_tuple() + if not isinstance(outputs, (list, tuple)): + outputs = (outputs,) + + # Generate input names & axes + input_vars = list(tokens.keys()) + input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()} + + # flatten potentially grouped outputs (past for gpt2, attentions) + outputs_flat = [] + for output in outputs: + if isinstance(output, (tuple, list)): + outputs_flat.extend(output) + else: + outputs_flat.append(output) + + # Generate output names & axes + output_names = [f"output_{i}" for i in range(len(outputs_flat))] + output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)} + + # Create the aggregated axes representation + dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes) + return input_vars, output_names, dynamic_axes, tokens + + +def load_graph_from_args( + pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs +) -> Pipeline: + """ + Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model + + Args: + pipeline_name: The kind of pipeline to use (ner, question-answering, etc.) + framework: The actual model to convert the pipeline from ("pt" or "tf") + model: The model name which will be loaded by the pipeline + tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value + + Returns: Pipeline object + + """ + # If no tokenizer provided + if tokenizer is None: + tokenizer = model + + # Check the wanted framework is available + if framework == "pt" and not is_torch_available(): + raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.") + if framework == "tf" and not is_tf_available(): + raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.") + + print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})") + + # Allocate tokenizer and model + return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs) + + +def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool): + """ + Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR + + Args: + nlp: The pipeline to be exported + opset: The actual version of the ONNX operator set to use + output: Path where will be stored the generated ONNX model + use_external_format: Split the model definition from its parameters to allow model bigger than 2GB + + Returns: + + """ + if not is_torch_available(): + raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.") + + import torch + from torch.onnx import export + + print(f"Using framework PyTorch: {torch.__version__}") + + with torch.no_grad(): + input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt") + ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names) + + export( + nlp.model, + model_args, + f=output.as_posix(), + input_names=ordered_input_names, + output_names=output_names, + dynamic_axes=dynamic_axes, + do_constant_folding=True, + opset_version=opset, + ) + + +def convert_tensorflow(nlp: Pipeline, opset: int, output: Path): + """ + Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR) + + Args: + nlp: The pipeline to be exported + opset: The actual version of the ONNX operator set to use + output: Path where will be stored the generated ONNX model + + Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow + + """ + if not is_tf_available(): + raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.") + + print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\") + + try: + import tensorflow as tf + import tf2onnx + from tf2onnx import __version__ as t2ov + + print(f"Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}") + + # Build + input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf") + + # Forward + nlp.model.predict(tokens.data) + input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in tokens.items()] + model_proto, _ = tf2onnx.convert.from_keras( + nlp.model, input_signature, opset=opset, output_path=output.as_posix() + ) + + except ImportError as e: + raise Exception( + f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}" + ) + + +def convert( + framework: str, + model: str, + output: Path, + opset: int, + tokenizer: Optional[str] = None, + use_external_format: bool = False, + pipeline_name: str = "feature-extraction", + **model_kwargs, +): + """ + Convert the pipeline object to the ONNX Intermediate Representation (IR) format + + Args: + framework: The framework the pipeline is backed by ("pt" or "tf") + model: The name of the model to load for the pipeline + output: The path where the ONNX graph will be stored + opset: The actual version of the ONNX operator set to use + tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided + use_external_format: + Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only) + pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.) + model_kwargs: Keyword arguments to be forwarded to the model constructor + + Returns: + + """ + warnings.warn( + "The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of" + " Transformers", + FutureWarning, + ) + print(f"ONNX opset version set to: {opset}") + + # Load the pipeline + nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs) + + if not output.parent.exists(): + print(f"Creating folder {output.parent}") + makedirs(output.parent.as_posix()) + elif len(listdir(output.parent.as_posix())) > 0: + raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion") + + # Export the graph + if framework == "pt": + convert_pytorch(nlp, opset, output, use_external_format) + else: + convert_tensorflow(nlp, opset, output) + + +def optimize(onnx_model_path: Path) -> Path: + """ + Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the + optimizations possible + + Args: + onnx_model_path: filepath where the model binary description is stored + + Returns: Path where the optimized model binary description has been saved + + """ + from onnxruntime import InferenceSession, SessionOptions + + # Generate model name with suffix "optimized" + opt_model_path = generate_identified_filename(onnx_model_path, "-optimized") + sess_option = SessionOptions() + sess_option.optimized_model_filepath = opt_model_path.as_posix() + _ = InferenceSession(onnx_model_path.as_posix(), sess_option) + + print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}") + print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\") + + return opt_model_path + + +def quantize(onnx_model_path: Path) -> Path: + """ + Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU + + Args: + onnx_model_path: Path to location the exported ONNX model is stored + + Returns: The Path generated for the quantized + """ + import onnx + import onnxruntime + from onnx.onnx_pb import ModelProto + from onnxruntime.quantization import QuantizationMode + from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer + from onnxruntime.quantization.registry import IntegerOpsRegistry + + # Load the ONNX model + onnx_model = onnx.load(onnx_model_path.as_posix()) + + if parse(onnx.__version__) < parse("1.5.0"): + print( + "Models larger than 2GB will fail to quantize due to protobuf constraint.\n" + "Please upgrade to onnxruntime >= 1.5.0." + ) + + # Copy it + copy_model = ModelProto() + copy_model.CopyFrom(onnx_model) + + # Construct quantizer + # onnxruntime renamed input_qType to activation_qType in v1.13.1, so we + # check the onnxruntime version to ensure backward compatibility. + # See also: https://github.com/microsoft/onnxruntime/pull/12873 + if parse(onnxruntime.__version__) < parse("1.13.1"): + quantizer = ONNXQuantizer( + model=copy_model, + per_channel=False, + reduce_range=False, + mode=QuantizationMode.IntegerOps, + static=False, + weight_qType=True, + input_qType=False, + tensors_range=None, + nodes_to_quantize=None, + nodes_to_exclude=None, + op_types_to_quantize=list(IntegerOpsRegistry), + ) + else: + quantizer = ONNXQuantizer( + model=copy_model, + per_channel=False, + reduce_range=False, + mode=QuantizationMode.IntegerOps, + static=False, + weight_qType=True, + activation_qType=False, + tensors_range=None, + nodes_to_quantize=None, + nodes_to_exclude=None, + op_types_to_quantize=list(IntegerOpsRegistry), + ) + + # Quantize and export + quantizer.quantize_model() + + # Append "-quantized" at the end of the model's name + quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized") + + # Save model + print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}") + onnx.save_model(quantizer.model.model, quantized_model_path.as_posix()) + + return quantized_model_path + + +def verify(path: Path): + from onnxruntime import InferenceSession, SessionOptions + from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException + + print(f"Checking ONNX model loading from: {path} ...") + try: + onnx_options = SessionOptions() + _ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"]) + print(f"Model {path} correctly loaded: \N{heavy check mark}") + except RuntimeException as re: + print(f"Error while loading the model {re}: \N{heavy ballot x}") + + +if __name__ == "__main__": + parser = OnnxConverterArgumentParser() + args = parser.parse_args() + + # Make sure output is absolute path + args.output = Path(args.output).absolute() + + try: + print("\n====== Converting model to ONNX ======") + # Convert + convert( + args.framework, + args.model, + args.output, + args.opset, + args.tokenizer, + args.use_external_format, + args.pipeline, + ) + + if args.quantize: + # Ensure requirements for quantization on onnxruntime is met + check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION) + + # onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch + if args.framework == "tf": + print( + "\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n" + "\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n" + "\t For more information, please refer to the onnxruntime documentation:\n" + "\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n" + ) + + print("\n====== Optimizing ONNX model ======") + + # Quantization works best when using the optimized version of the model + args.optimized_output = optimize(args.output) + + # Do the quantization on the right graph + args.quantized_output = quantize(args.optimized_output) + + # And verify + if args.check_loading: + print("\n====== Check exported ONNX model(s) ======") + verify(args.output) + + if hasattr(args, "optimized_output"): + verify(args.optimized_output) + + if hasattr(args, "quantized_output"): + verify(args.quantized_output) + + except Exception as e: + print(f"Error while converting the model: {e}") + exit(1) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/debug_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/debug_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dbceb1d849076999c6821556accaea05e53a9ff9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/debug_utils.py @@ -0,0 +1,346 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from .utils import ExplicitEnum, is_torch_available, logging + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class DebugUnderflowOverflow: + """ + This debug class helps detect and understand where the model starts getting very large or very small, and more + importantly `nan` or `inf` weight and activation elements. + + There are 2 working modes: + + 1. Underflow/overflow detection (default) + 2. Specific batch absolute min/max tracing without detection + + Mode 1: Underflow/overflow detection + + To activate the underflow/overflow detection, initialize the object with the model : + + ```python + debug_overflow = DebugUnderflowOverflow(model) + ``` + + then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output + elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event, + each frame reporting + + 1. the fully qualified module name plus the class name whose `forward` was run + 2. the absolute min and max value of all elements for each module weights, and the inputs and output + + For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 + mixed precision : + + ``` + Detected inf/nan during batch_number=0 + Last 21 forward frames: + abs min abs max metadata + [...] + encoder.block.2.layer.1.DenseReluDense.wi_0 Linear + 2.17e-07 4.50e+00 weight + 1.79e-06 4.65e+00 input[0] + 2.68e-06 3.70e+01 output + encoder.block.2.layer.1.DenseReluDense.wi_1 Linear + 8.08e-07 2.66e+01 weight + 1.79e-06 4.65e+00 input[0] + 1.27e-04 2.37e+02 output + encoder.block.2.layer.1.DenseReluDense.wo Linear + 1.01e-06 6.44e+00 weight + 0.00e+00 9.74e+03 input[0] + 3.18e-04 6.27e+04 output + encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense + 1.79e-06 4.65e+00 input[0] + 3.18e-04 6.27e+04 output + encoder.block.2.layer.1.dropout Dropout + 3.18e-04 6.27e+04 input[0] + 0.00e+00 inf output + ``` + + You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was + around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which + renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than + 64K, and we get an overlow. + + As you can see it's the previous frames that we need to look into when the numbers start going into very large for + fp16 numbers. + + The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed. + + By default the last 21 frames are printed. You can change the default to adjust for your needs. For example : + + ```python + debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) + ``` + + To validate that you have set up this debugging feature correctly, and you intend to use it in a training that + may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in + the next section. + + + Mode 2. Specific batch absolute min/max tracing without detection + + The second work mode is per-batch tracing with the underflow/overflow detection feature turned off. + + Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a + given batch, and only do that for batches 1 and 3. Then you instantiate this class as : + + ```python + debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) + ``` + + And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed. + + This is helpful if you know that the program starts misbehaving after a certain batch number, so you can + fast-forward right to that area. + + + Early stopping: + + You can also specify the batch number after which to stop the training, with : + + ```python + debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) + ``` + + This feature is mainly useful in the tracing mode, but you can use it for any mode. + + + **Performance**: + + As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training + down. Therefore remember to turn it off once the debugging needs have been met. + + Args: + model (`nn.Module`): + The model to debug. + max_frames_to_save (`int`, *optional*, defaults to 21): + How many frames back to record + trace_batch_nums(`List[int]`, *optional*, defaults to `[]`): + Which batch numbers to trace (turns detection off) + abort_after_batch_num (`int``, *optional*): + Whether to abort after a certain batch number has finished + """ + + def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None): + self.model = model + self.trace_batch_nums = trace_batch_nums + self.abort_after_batch_num = abort_after_batch_num + + # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence + self.frames = collections.deque([], max_frames_to_save) + self.frame = [] + self.batch_number = 0 + self.total_calls = 0 + self.detected_overflow = False + self.prefix = " " + + self.analyse_model() + + self.register_forward_hook() + + def save_frame(self, frame=None): + if frame is not None: + self.expand_frame(frame) + self.frames.append("\n".join(self.frame)) + self.frame = [] # start a new frame + + def expand_frame(self, line): + self.frame.append(line) + + def trace_frames(self): + print("\n".join(self.frames)) + self.frames = [] + + def reset_saved_frames(self): + self.frames = [] + + def dump_saved_frames(self): + print(f"\nDetected inf/nan during batch_number={self.batch_number}") + print(f"Last {len(self.frames)} forward frames:") + print(f"{'abs min':8} {'abs max':8} metadata") + print("\n".join(self.frames)) + print("\n\n") + self.frames = [] + + def analyse_model(self): + # extract the fully qualified module names, to be able to report at run time. e.g.: + # encoder.block.2.layer.0.SelfAttention.o + # + # for shared weights only the first shared module name will be registered + self.module_names = {m: name for name, m in self.model.named_modules()} + # self.longest_module_name = max(len(v) for v in self.module_names.values()) + + def analyse_variable(self, var, ctx): + if torch.is_tensor(var): + self.expand_frame(get_abs_min_max(var, ctx)) + if detect_overflow(var, ctx): + self.detected_overflow = True + elif var is None: + self.expand_frame(f"{'None':>17} {ctx}") + else: + self.expand_frame(f"{'not a tensor':>17} {ctx}") + + def batch_start_frame(self): + self.expand_frame(f"\n\n{self.prefix} *** Starting batch number={self.batch_number} ***") + self.expand_frame(f"{'abs min':8} {'abs max':8} metadata") + + def batch_end_frame(self): + self.expand_frame(f"{self.prefix} *** Finished batch number={self.batch_number-1} ***\n\n") + + def create_frame(self, module, input, output): + self.expand_frame(f"{self.prefix} {self.module_names[module]} {module.__class__.__name__}") + + # params + for name, p in module.named_parameters(recurse=False): + self.analyse_variable(p, name) + + # inputs + if isinstance(input, tuple): + for i, x in enumerate(input): + self.analyse_variable(x, f"input[{i}]") + else: + self.analyse_variable(input, "input") + + # outputs + if isinstance(output, tuple): + for i, x in enumerate(output): + # possibly a tuple of tuples + if isinstance(x, tuple): + for j, y in enumerate(x): + self.analyse_variable(y, f"output[{i}][{j}]") + else: + self.analyse_variable(x, f"output[{i}]") + else: + self.analyse_variable(output, "output") + + self.save_frame() + + def register_forward_hook(self): + self.model.apply(self._register_forward_hook) + + def _register_forward_hook(self, module): + module.register_forward_hook(self.forward_hook) + + def forward_hook(self, module, input, output): + # - input is a tuple of packed inputs (could be non-Tensors) + # - output could be a Tensor or a tuple of Tensors and non-Tensors + + last_frame_of_batch = False + + trace_mode = True if self.batch_number in self.trace_batch_nums else False + if trace_mode: + self.reset_saved_frames() + + if self.total_calls == 0: + self.batch_start_frame() + self.total_calls += 1 + + # count batch numbers - the very first forward hook of the batch will be called when the + # batch completes - i.e. it gets called very last - we know this batch has finished + if module == self.model: + self.batch_number += 1 + last_frame_of_batch = True + + self.create_frame(module, input, output) + + # if last_frame_of_batch: + # self.batch_end_frame() + + if trace_mode: + self.trace_frames() + + if last_frame_of_batch: + self.batch_start_frame() + + if self.detected_overflow and not trace_mode: + self.dump_saved_frames() + + # now we can abort, as it's pointless to continue running + raise ValueError( + "DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. " + "Please scroll up above this traceback to see the activation values prior to this event." + ) + + # abort after certain batch if requested to do so + if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num: + raise ValueError( + f"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to" + f" `abort_after_batch_num={self.abort_after_batch_num}` arg" + ) + + +def get_abs_min_max(var, ctx): + abs_var = var.abs() + return f"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}" + + +def detect_overflow(var, ctx): + """ + Report whether the tensor contains any `nan` or `inf` entries. + + This is useful for detecting overflows/underflows and best to call right after the function that did some math that + modified the tensor in question. + + This function contains a few other helper features that you can enable and tweak directly if you want to track + various other things. + + Args: + var: the tensor variable to check + ctx: the message to print as a context + + Return: + `True` if `inf` or `nan` was detected, `False` otherwise + """ + detected = False + if torch.isnan(var).any().item(): + detected = True + print(f"{ctx} has nans") + if torch.isinf(var).any().item(): + detected = True + print(f"{ctx} has infs") + + # if needed to monitor large elements can enable the following + if 0: # and detected: + n100 = var[torch.ge(var.abs(), 100)] + if n100.numel() > 0: + print(f"{ctx}: n100={n100.numel()}") + n1000 = var[torch.ge(var.abs(), 1000)] + if n1000.numel() > 0: + print(f"{ctx}: n1000={n1000.numel()}") + n10000 = var[torch.ge(var.abs(), 10000)] + if n10000.numel() > 0: + print(f"{ctx}: n10000={n10000.numel()}") + + if 0: + print(f"min={var.min():9.2e} max={var.max():9.2e}") + + if 0: + print(f"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})") + + return detected + + +class DebugOption(ExplicitEnum): + UNDERFLOW_OVERFLOW = "underflow_overflow" + TPU_METRICS_DEBUG = "tpu_metrics_debug" diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/deepspeed.py b/env-llmeval/lib/python3.10/site-packages/transformers/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..840d9cc2f55a16337c94e2106f48c421f35c7266 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/deepspeed.py @@ -0,0 +1,40 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Integration with Deepspeed - kept for backward compatiblity, if you plan to make any edit, make sure to modify the file +in `integrations/deepspeed` instead. + +Check: https://github.com/huggingface/transformers/pull/25599 +""" +import warnings + + +warnings.warn( + "transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations", + FutureWarning, +) + +# Backward compatibility imports, to make sure all those objects can be found in integrations/deepspeed +from .integrations.deepspeed import ( # noqa + HfDeepSpeedConfig, + HfTrainerDeepSpeedConfig, + deepspeed_config, + deepspeed_init, + deepspeed_load_checkpoint, + deepspeed_optim_sched, + is_deepspeed_available, + is_deepspeed_zero3_enabled, + set_hf_deepspeed_config, + unset_hf_deepspeed_config, +) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/dependency_versions_check.py b/env-llmeval/lib/python3.10/site-packages/transformers/dependency_versions_check.py new file mode 100644 index 0000000000000000000000000000000000000000..82d07850847ec357f36ff51088ddec36aceff093 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/dependency_versions_check.py @@ -0,0 +1,63 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dependency_versions_table import deps +from .utils.versions import require_version, require_version_core + + +# define which module versions we always want to check at run time +# (usually the ones defined in `install_requires` in setup.py) +# +# order specific notes: +# - tqdm must be checked before tokenizers + +pkgs_to_check_at_runtime = [ + "python", + "tqdm", + "regex", + "requests", + "packaging", + "filelock", + "numpy", + "tokenizers", + "huggingface-hub", + "safetensors", + "accelerate", + "pyyaml", +] + +for pkg in pkgs_to_check_at_runtime: + if pkg in deps: + if pkg == "tokenizers": + # must be loaded here, or else tqdm check may fail + from .utils import is_tokenizers_available + + if not is_tokenizers_available(): + continue # not required, check version only if installed + elif pkg == "accelerate": + # must be loaded here, or else tqdm check may fail + from .utils import is_accelerate_available + + # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of + # Transformers with PyTorch + if not is_accelerate_available(): + continue # not required, check version only if installed + + require_version_core(deps[pkg]) + else: + raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") + + +def dep_version_check(pkg, hint=None): + require_version(deps[pkg], hint) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/dynamic_module_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/dynamic_module_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..72fd0367a0f401051c8c19623ec19d163322d3cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/dynamic_module_utils.py @@ -0,0 +1,628 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to dynamically load objects from the Hub.""" +import filecmp +import importlib +import os +import re +import shutil +import signal +import sys +import typing +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +from huggingface_hub import try_to_load_from_cache + +from .utils import ( + HF_MODULES_CACHE, + TRANSFORMERS_DYNAMIC_MODULE_NAME, + cached_file, + extract_commit_hash, + is_offline_mode, + logging, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def init_hf_modules(): + """ + Creates the cache directory for modules with an init, and adds it to the Python path. + """ + # This function has already been executed if HF_MODULES_CACHE already is in the Python path. + if HF_MODULES_CACHE in sys.path: + return + + sys.path.append(HF_MODULES_CACHE) + os.makedirs(HF_MODULES_CACHE, exist_ok=True) + init_path = Path(HF_MODULES_CACHE) / "__init__.py" + if not init_path.exists(): + init_path.touch() + importlib.invalidate_caches() + + +def create_dynamic_module(name: Union[str, os.PathLike]): + """ + Creates a dynamic module in the cache directory for modules. + + Args: + name (`str` or `os.PathLike`): + The name of the dynamic module to create. + """ + init_hf_modules() + dynamic_module_path = (Path(HF_MODULES_CACHE) / name).resolve() + # If the parent module does not exist yet, recursively create it. + if not dynamic_module_path.parent.exists(): + create_dynamic_module(dynamic_module_path.parent) + os.makedirs(dynamic_module_path, exist_ok=True) + init_path = dynamic_module_path / "__init__.py" + if not init_path.exists(): + init_path.touch() + # It is extremely important to invalidate the cache when we change stuff in those modules, or users end up + # with errors about module that do not exist. Same for all other `invalidate_caches` in this file. + importlib.invalidate_caches() + + +def get_relative_imports(module_file: Union[str, os.PathLike]) -> List[str]: + """ + Get the list of modules that are relatively imported in a module file. + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of relative imports in the module. + """ + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import .xxx` + relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from .xxx import yyy` + relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) + # Unique-ify + return list(set(relative_imports)) + + +def get_relative_import_files(module_file: Union[str, os.PathLike]) -> List[str]: + """ + Get the list of all files that are needed for a given module. Note that this function recurses through the relative + imports (if a imports b and b imports c, it will return module files for b and c). + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all relative imports a given module needs (recursively), which will give us the list + of module files a given module needs. + """ + no_change = False + files_to_check = [module_file] + all_relative_imports = [] + + # Let's recurse through all relative imports + while not no_change: + new_imports = [] + for f in files_to_check: + new_imports.extend(get_relative_imports(f)) + + module_path = Path(module_file).parent + new_import_files = [str(module_path / m) for m in new_imports] + new_import_files = [f for f in new_import_files if f not in all_relative_imports] + files_to_check = [f"{f}.py" for f in new_import_files] + + no_change = len(new_import_files) == 0 + all_relative_imports.extend(files_to_check) + + return all_relative_imports + + +def get_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Extracts all the libraries (not relative imports this time) that are imported in a file. + + Args: + filename (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all packages required to use the input module. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # filter out try/except block so in custom code we can have try/except imports + content = re.sub(r"\s*try\s*:\s*.*?\s*except\s*.*?:", "", content, flags=re.MULTILINE | re.DOTALL) + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + return list(set(imports)) + + +def check_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a + library is missing. + + Args: + filename (`str` or `os.PathLike`): The module file to check. + + Returns: + `List[str]`: The list of relative imports in the file. + """ + imports = get_imports(filename) + missing_packages = [] + for imp in imports: + try: + importlib.import_module(imp) + except ImportError: + missing_packages.append(imp) + + if len(missing_packages) > 0: + raise ImportError( + "This modeling file requires the following packages that were not found in your environment: " + f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" + ) + + return get_relative_imports(filename) + + +def get_class_in_module(class_name: str, module_path: Union[str, os.PathLike]) -> typing.Type: + """ + Import a module on the cache directory for modules and extract a class from it. + + Args: + class_name (`str`): The name of the class to import. + module_path (`str` or `os.PathLike`): The path to the module to import. + + Returns: + `typing.Type`: The class looked for. + """ + name = os.path.normpath(module_path).replace(".py", "").replace(os.path.sep, ".") + module_path = str(Path(HF_MODULES_CACHE) / module_path) + module = importlib.machinery.SourceFileLoader(name, module_path).load_module() + return getattr(module, class_name) + + +def get_cached_module_file( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + repo_type: Optional[str] = None, + _commit_hash: Optional[str] = None, + **deprecated_kwargs, +) -> str: + """ + Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached + Transformers module. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `str`: The path to the module inside the cache. + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + submodule = os.path.basename(pretrained_model_name_or_path) + else: + submodule = pretrained_model_name_or_path.replace("/", os.path.sep) + cached_module = try_to_load_from_cache( + pretrained_model_name_or_path, module_file, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type + ) + + new_files = [] + try: + # Load from URL or cache if already cached + resolved_module_file = cached_file( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + revision=revision, + repo_type=repo_type, + _commit_hash=_commit_hash, + ) + if not is_local and cached_module != resolved_module_file: + new_files.append(module_file) + + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + + # Check we have all the requirements in our environment + modules_needed = check_imports(resolved_module_file) + + # Now we move the module inside our cached dynamic modules. + full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule + create_dynamic_module(full_submodule) + submodule_path = Path(HF_MODULES_CACHE) / full_submodule + if submodule == os.path.basename(pretrained_model_name_or_path): + # We copy local files to avoid putting too many folders in sys.path. This copy is done when the file is new or + # has changed since last copy. + if not (submodule_path / module_file).exists() or not filecmp.cmp( + resolved_module_file, str(submodule_path / module_file) + ): + shutil.copy(resolved_module_file, submodule_path / module_file) + importlib.invalidate_caches() + for module_needed in modules_needed: + module_needed = f"{module_needed}.py" + module_needed_file = os.path.join(pretrained_model_name_or_path, module_needed) + if not (submodule_path / module_needed).exists() or not filecmp.cmp( + module_needed_file, str(submodule_path / module_needed) + ): + shutil.copy(module_needed_file, submodule_path / module_needed) + importlib.invalidate_caches() + else: + # Get the commit hash + commit_hash = extract_commit_hash(resolved_module_file, _commit_hash) + + # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the + # benefit of versioning. + submodule_path = submodule_path / commit_hash + full_submodule = full_submodule + os.path.sep + commit_hash + create_dynamic_module(full_submodule) + + if not (submodule_path / module_file).exists(): + shutil.copy(resolved_module_file, submodule_path / module_file) + importlib.invalidate_caches() + # Make sure we also have every file with relative + for module_needed in modules_needed: + if not (submodule_path / f"{module_needed}.py").exists(): + get_cached_module_file( + pretrained_model_name_or_path, + f"{module_needed}.py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + _commit_hash=commit_hash, + ) + new_files.append(f"{module_needed}.py") + + if len(new_files) > 0 and revision is None: + new_files = "\n".join([f"- {f}" for f in new_files]) + repo_type_str = "" if repo_type is None else f"{repo_type}s/" + url = f"https://huggingface.co/{repo_type_str}{pretrained_model_name_or_path}" + logger.warning( + f"A new version of the following files was downloaded from {url}:\n{new_files}" + "\n. Make sure to double-check they do not contain any added malicious code. To avoid downloading new " + "versions of the code file, you can pin a revision." + ) + + return os.path.join(full_submodule, module_file) + + +def get_class_from_dynamic_module( + class_reference: str, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + repo_type: Optional[str] = None, + code_revision: Optional[str] = None, + **kwargs, +) -> typing.Type: + """ + Extracts a class from a module file, present in the local folder or repository of a model. + + + + Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should + therefore only be called on trusted repos. + + + + + + Args: + class_reference (`str`): + The full name of the class to load, including its module and optionally its repo. + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + This is used when `class_reference` does not specify another repo. + module_file (`str`): + The name of the module file containing the class to look for. + class_name (`str`): + The name of the class to import in the module. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + code_revision (`str`, *optional*, defaults to `"main"`): + The specific revision to use for the code on the Hub, if the code leaves in a different repository than the + rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for + storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `typing.Type`: The class, dynamically imported from the module. + + Examples: + + ```python + # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("modeling.MyBertModel", "sgugger/my-bert-model") + + # Download module `modeling.py` from a given repo and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("sgugger/my-bert-model--modeling.MyBertModel", "sgugger/another-bert-model") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + # Catch the name of the repo if it's specified in `class_reference` + if "--" in class_reference: + repo_id, class_reference = class_reference.split("--") + else: + repo_id = pretrained_model_name_or_path + module_file, class_name = class_reference.split(".") + + if code_revision is None and pretrained_model_name_or_path == repo_id: + code_revision = revision + # And lastly we get the class inside our newly created module + final_module = get_cached_module_file( + repo_id, + module_file + ".py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=code_revision, + local_files_only=local_files_only, + repo_type=repo_type, + ) + return get_class_in_module(class_name, final_module) + + +def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[Dict] = None) -> List[str]: + """ + Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally + adds the proper fields in a config. + + Args: + obj (`Any`): The object for which to save the module files. + folder (`str` or `os.PathLike`): The folder where to save. + config (`PretrainedConfig` or dictionary, `optional`): + A config in which to register the auto_map corresponding to this custom object. + + Returns: + `List[str]`: The list of files saved. + """ + if obj.__module__ == "__main__": + logger.warning( + f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put " + "this code in a separate module so we can include it in the saved folder and make it easier to share via " + "the Hub." + ) + return + + def _set_auto_map_in_config(_config): + module_name = obj.__class__.__module__ + last_module = module_name.split(".")[-1] + full_name = f"{last_module}.{obj.__class__.__name__}" + # Special handling for tokenizers + if "Tokenizer" in full_name: + slow_tokenizer_class = None + fast_tokenizer_class = None + if obj.__class__.__name__.endswith("Fast"): + # Fast tokenizer: we have the fast tokenizer class and we may have the slow one has an attribute. + fast_tokenizer_class = f"{last_module}.{obj.__class__.__name__}" + if getattr(obj, "slow_tokenizer_class", None) is not None: + slow_tokenizer = getattr(obj, "slow_tokenizer_class") + slow_tok_module_name = slow_tokenizer.__module__ + last_slow_tok_module = slow_tok_module_name.split(".")[-1] + slow_tokenizer_class = f"{last_slow_tok_module}.{slow_tokenizer.__name__}" + else: + # Slow tokenizer: no way to have the fast class + slow_tokenizer_class = f"{last_module}.{obj.__class__.__name__}" + + full_name = (slow_tokenizer_class, fast_tokenizer_class) + + if isinstance(_config, dict): + auto_map = _config.get("auto_map", {}) + auto_map[obj._auto_class] = full_name + _config["auto_map"] = auto_map + elif getattr(_config, "auto_map", None) is not None: + _config.auto_map[obj._auto_class] = full_name + else: + _config.auto_map = {obj._auto_class: full_name} + + # Add object class to the config auto_map + if isinstance(config, (list, tuple)): + for cfg in config: + _set_auto_map_in_config(cfg) + elif config is not None: + _set_auto_map_in_config(config) + + result = [] + # Copy module file to the output folder. + object_file = sys.modules[obj.__module__].__file__ + dest_file = Path(folder) / (Path(object_file).name) + shutil.copy(object_file, dest_file) + result.append(dest_file) + + # Gather all relative imports recursively and make sure they are copied as well. + for needed_file in get_relative_import_files(object_file): + dest_file = Path(folder) / (Path(needed_file).name) + shutil.copy(needed_file, dest_file) + result.append(dest_file) + + return result + + +def _raise_timeout_error(signum, frame): + raise ValueError( + "Loading this model requires you to execute custom code contained in the model repository on your local " + "machine. Please set the option `trust_remote_code=True` to permit loading of this model." + ) + + +TIME_OUT_REMOTE_CODE = 15 + + +def resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code): + if trust_remote_code is None: + if has_local_code: + trust_remote_code = False + elif has_remote_code and TIME_OUT_REMOTE_CODE > 0: + try: + signal.signal(signal.SIGALRM, _raise_timeout_error) + signal.alarm(TIME_OUT_REMOTE_CODE) + while trust_remote_code is None: + answer = input( + f"The repository for {model_name} contains custom code which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" + f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n" + f"Do you wish to run the custom code? [y/N] " + ) + if answer.lower() in ["yes", "y", "1"]: + trust_remote_code = True + elif answer.lower() in ["no", "n", "0", ""]: + trust_remote_code = False + signal.alarm(0) + except Exception: + # OS which does not support signal.SIGALRM + raise ValueError( + f"The repository for {model_name} contains custom code which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + elif has_remote_code: + # For the CI which puts the timeout at 0 + _raise_timeout_error(None, None) + + if has_remote_code and not has_local_code and not trust_remote_code: + raise ValueError( + f"Loading {model_name} requires you to execute the configuration file in that" + " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" + " set the option `trust_remote_code=True` to remove this error." + ) + + return trust_remote_code diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/feature_extraction_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/feature_extraction_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b0df39e1642b9a5afbc07866061a9d992759c566 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/feature_extraction_utils.py @@ -0,0 +1,684 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Feature extraction saving/loading class for common feature extractors. +""" + +import copy +import json +import os +import warnings +from collections import UserDict +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union + +import numpy as np + +from .dynamic_module_utils import custom_object_save +from .utils import ( + FEATURE_EXTRACTOR_NAME, + PushToHubMixin, + TensorType, + add_model_info_to_auto_map, + cached_file, + copy_func, + download_url, + is_flax_available, + is_jax_tensor, + is_numpy_array, + is_offline_mode, + is_remote_url, + is_tf_available, + is_torch_available, + is_torch_device, + is_torch_dtype, + logging, + requires_backends, +) + + +if TYPE_CHECKING: + if is_torch_available(): + import torch # noqa + + +logger = logging.get_logger(__name__) + +PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821 + + +class BatchFeature(UserDict): + r""" + Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods. + + This class is derived from a python dictionary and can be used as a dictionary. + + Args: + data (`dict`, *optional*): + Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask', + etc.). + tensor_type (`Union[None, str, TensorType]`, *optional*): + You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at + initialization. + """ + + def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None): + super().__init__(data) + self.convert_to_tensors(tensor_type=tensor_type) + + def __getitem__(self, item: str) -> Union[Any]: + """ + If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask', + etc.). + """ + if isinstance(item, str): + return self.data[item] + else: + raise KeyError("Indexing with integers is not available when using Python based feature extractors") + + def __getattr__(self, item: str): + try: + return self.data[item] + except KeyError: + raise AttributeError + + def __getstate__(self): + return {"data": self.data} + + def __setstate__(self, state): + if "data" in state: + self.data = state["data"] + + # Copied from transformers.tokenization_utils_base.BatchEncoding.keys + def keys(self): + return self.data.keys() + + # Copied from transformers.tokenization_utils_base.BatchEncoding.values + def values(self): + return self.data.values() + + # Copied from transformers.tokenization_utils_base.BatchEncoding.items + def items(self): + return self.data.items() + + def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]] = None): + if tensor_type is None: + return None, None + + # Convert to TensorType + if not isinstance(tensor_type, TensorType): + tensor_type = TensorType(tensor_type) + + # Get a function reference for the correct framework + if tensor_type == TensorType.TENSORFLOW: + if not is_tf_available(): + raise ImportError( + "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." + ) + import tensorflow as tf + + as_tensor = tf.constant + is_tensor = tf.is_tensor + elif tensor_type == TensorType.PYTORCH: + if not is_torch_available(): + raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") + import torch # noqa + + def as_tensor(value): + if isinstance(value, (list, tuple)) and len(value) > 0 and isinstance(value[0], np.ndarray): + value = np.array(value) + return torch.tensor(value) + + is_tensor = torch.is_tensor + elif tensor_type == TensorType.JAX: + if not is_flax_available(): + raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") + import jax.numpy as jnp # noqa: F811 + + as_tensor = jnp.array + is_tensor = is_jax_tensor + else: + + def as_tensor(value, dtype=None): + if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): + value_lens = [len(val) for val in value] + if len(set(value_lens)) > 1 and dtype is None: + # we have a ragged list so handle explicitly + value = as_tensor([np.asarray(val) for val in value], dtype=object) + return np.asarray(value, dtype=dtype) + + is_tensor = is_numpy_array + return is_tensor, as_tensor + + def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): + """ + Convert the inner content to tensors. + + Args: + tensor_type (`str` or [`~utils.TensorType`], *optional*): + The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If + `None`, no modification is done. + """ + if tensor_type is None: + return self + + is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type) + + # Do the tensor conversion in batch + for key, value in self.items(): + try: + if not is_tensor(value): + tensor = as_tensor(value) + + self[key] = tensor + except: # noqa E722 + if key == "overflowing_values": + raise ValueError("Unable to create tensor returning overflowing values of different lengths. ") + raise ValueError( + "Unable to create tensor, you should probably activate padding " + "with 'padding=True' to have batched tensors with the same length." + ) + + return self + + def to(self, *args, **kwargs) -> "BatchFeature": + """ + Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in + different `dtypes` and sending the `BatchFeature` to a different `device`. + + Args: + args (`Tuple`): + Will be passed to the `to(...)` function of the tensors. + kwargs (`Dict`, *optional*): + Will be passed to the `to(...)` function of the tensors. + + Returns: + [`BatchFeature`]: The same instance after modification. + """ + requires_backends(self, ["torch"]) + import torch # noqa + + new_data = {} + device = kwargs.get("device") + # Check if the args are a device or a dtype + if device is None and len(args) > 0: + # device should be always the first argument + arg = args[0] + if is_torch_dtype(arg): + # The first argument is a dtype + pass + elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): + device = arg + else: + # it's something else + raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") + # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` + for k, v in self.items(): + # check if v is a floating point + if torch.is_floating_point(v): + # cast and send to device + new_data[k] = v.to(*args, **kwargs) + elif device is not None: + new_data[k] = v.to(device=device) + else: + new_data[k] = v + self.data = new_data + return self + + +class FeatureExtractionMixin(PushToHubMixin): + """ + This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature + extractors. + """ + + _auto_class = None + + def __init__(self, **kwargs): + """Set elements of `kwargs` as attributes.""" + # Pop "processor_class" as it should be saved as private attribute + self._processor_class = kwargs.pop("processor_class", None) + # Additional attributes without default values + for key, value in kwargs.items(): + try: + setattr(self, key, value) + except AttributeError as err: + logger.error(f"Can't set {key} with value {value} for {self}") + raise err + + def _set_processor_class(self, processor_class: str): + """Sets processor class as an attribute.""" + self._processor_class = processor_class + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ): + r""" + Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a + derived class of [`SequenceFeatureExtractor`]. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a feature extractor file saved using the + [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g., + `./my_model_directory/`. + - a path or url to a saved feature extractor JSON *file*, e.g., + `./my_model_directory/preprocessor_config.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model feature extractor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the feature extractor files and override the cached versions + if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final feature extractor object. If `True`, then this + functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of + `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are feature extractor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + Returns: + A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]. + + Examples: + + ```python + # We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a + # derived class: *Wav2Vec2FeatureExtractor* + feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( + "facebook/wav2vec2-base-960h" + ) # Download feature_extraction_config from huggingface.co and cache. + feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( + "./test/saved_model/" + ) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')* + feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json") + feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( + "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False + ) + assert feature_extractor.return_attention_mask is False + feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( + "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True + ) + assert feature_extractor.return_attention_mask is False + assert unused_kwargs == {"foo": False} + ```""" + kwargs["cache_dir"] = cache_dir + kwargs["force_download"] = force_download + kwargs["local_files_only"] = local_files_only + kwargs["revision"] = revision + + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) + + return cls.from_dict(feature_extractor_dict, **kwargs) + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the + [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the feature extractor JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self) + + # If we save using the predefined names, we can load using `from_pretrained` + output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME) + + self.to_json_file(output_feature_extractor_file) + logger.info(f"Feature extractor saved in {output_feature_extractor_file}") + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=kwargs.get("token"), + ) + + return [output_feature_extractor_file] + + @classmethod + def get_feature_extractor_dict( + cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a + feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`): + The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. + + Returns: + `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object. + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + subfolder = kwargs.pop("subfolder", None) + token = kwargs.pop("token", None) + use_auth_token = kwargs.pop("use_auth_token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + + user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isdir(pretrained_model_name_or_path): + feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME) + if os.path.isfile(pretrained_model_name_or_path): + resolved_feature_extractor_file = pretrained_model_name_or_path + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + feature_extractor_file = pretrained_model_name_or_path + resolved_feature_extractor_file = download_url(pretrained_model_name_or_path) + else: + feature_extractor_file = FEATURE_EXTRACTOR_NAME + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_feature_extractor_file = cached_file( + pretrained_model_name_or_path, + feature_extractor_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + subfolder=subfolder, + token=token, + user_agent=user_agent, + revision=revision, + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load" + " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a {FEATURE_EXTRACTOR_NAME} file" + ) + + try: + # Load feature_extractor dict + with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader: + text = reader.read() + feature_extractor_dict = json.loads(text) + + except json.JSONDecodeError: + raise EnvironmentError( + f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file." + ) + + if is_local: + logger.info(f"loading configuration file {resolved_feature_extractor_file}") + else: + logger.info( + f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}" + ) + + if "auto_map" in feature_extractor_dict and not is_local: + feature_extractor_dict["auto_map"] = add_model_info_to_auto_map( + feature_extractor_dict["auto_map"], pretrained_model_name_or_path + ) + + return feature_extractor_dict, kwargs + + @classmethod + def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor: + """ + Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of + parameters. + + Args: + feature_extractor_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be + retrieved from a pretrained checkpoint by leveraging the + [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the feature extractor object. + + Returns: + [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those + parameters. + """ + return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) + + feature_extractor = cls(**feature_extractor_dict) + + # Update feature_extractor with kwargs if needed + to_remove = [] + for key, value in kwargs.items(): + if hasattr(feature_extractor, key): + setattr(feature_extractor, key, value) + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + logger.info(f"Feature extractor {feature_extractor}") + if return_unused_kwargs: + return feature_extractor, kwargs + else: + return feature_extractor + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + output["feature_extractor_type"] = self.__class__.__name__ + if "mel_filters" in output: + del output["mel_filters"] + if "window" in output: + del output["window"] + return output + + @classmethod + def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor: + """ + Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to + a JSON file of parameters. + + Args: + json_file (`str` or `os.PathLike`): + Path to the JSON file containing the parameters. + + Returns: + A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor + object instantiated from that JSON file. + """ + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + feature_extractor_dict = json.loads(text) + return cls(**feature_extractor_dict) + + def to_json_string(self) -> str: + """ + Serializes this instance to a JSON string. + + Returns: + `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. + """ + dictionary = self.to_dict() + + for key, value in dictionary.items(): + if isinstance(value, np.ndarray): + dictionary[key] = value.tolist() + + # make sure private name "_processor_class" is correctly + # saved as "processor_class" + _processor_class = dictionary.pop("_processor_class", None) + if _processor_class is not None: + dictionary["processor_class"] = _processor_class + + return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike]): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this feature_extractor instance's parameters will be saved. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string()) + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + @classmethod + def register_for_auto_class(cls, auto_class="AutoFeatureExtractor"): + """ + Register this class with a given auto class. This should only be used for custom feature extractors as the ones + in the library are already mapped with `AutoFeatureExtractor`. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`): + The auto class to register this new feature extractor with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + +FeatureExtractionMixin.push_to_hub = copy_func(FeatureExtractionMixin.push_to_hub) +if FeatureExtractionMixin.push_to_hub.__doc__ is not None: + FeatureExtractionMixin.push_to_hub.__doc__ = FeatureExtractionMixin.push_to_hub.__doc__.format( + object="feature extractor", object_class="AutoFeatureExtractor", object_files="feature extractor file" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation_tf_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation_tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7cb2e32047ac9251c7f003e9fb818a3f27dc26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation_tf_utils.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from .generation import TFGenerationMixin + + +class TFGenerationMixin(TFGenerationMixin): + # warning at import time + warnings.warn( + "Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will " + "be removed in Transformers v4.40. Import as `from transformers import TFGenerationMixin` instead.", + FutureWarning, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/generation_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/generation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cc269ee77b05388746d978ab55877a5e8a417267 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/generation_utils.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from .generation import GenerationMixin + + +class GenerationMixin(GenerationMixin): + # warning at import time + warnings.warn( + "Importing `GenerationMixin` from `src/transformers/generation_utils.py` is deprecated and will " + "be removed in Transformers v4.40. Import as `from transformers import GenerationMixin` instead.", + FutureWarning, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/image_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/image_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e53b3fe7d4f2d898eccc35c39ff130c145bc9e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/image_utils.py @@ -0,0 +1,769 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import os +from io import BytesIO +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np +import requests +from packaging import version + +from .utils import ( + ExplicitEnum, + is_jax_tensor, + is_tf_tensor, + is_torch_available, + is_torch_tensor, + is_vision_available, + logging, + requires_backends, + to_numpy, +) +from .utils.constants import ( # noqa: F401 + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, +) + + +if is_vision_available(): + import PIL.Image + import PIL.ImageOps + + if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): + PILImageResampling = PIL.Image.Resampling + else: + PILImageResampling = PIL.Image + +if TYPE_CHECKING: + if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +ImageInput = Union[ + "PIL.Image.Image", np.ndarray, "torch.Tensor", List["PIL.Image.Image"], List[np.ndarray], List["torch.Tensor"] +] # noqa + + +class ChannelDimension(ExplicitEnum): + FIRST = "channels_first" + LAST = "channels_last" + + +class AnnotationFormat(ExplicitEnum): + COCO_DETECTION = "coco_detection" + COCO_PANOPTIC = "coco_panoptic" + + +class AnnotionFormat(ExplicitEnum): + COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value + COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value + + +AnnotationType = Dict[str, Union[int, str, List[Dict]]] + + +def is_pil_image(img): + return is_vision_available() and isinstance(img, PIL.Image.Image) + + +def is_valid_image(img): + return ( + (is_vision_available() and isinstance(img, PIL.Image.Image)) + or isinstance(img, np.ndarray) + or is_torch_tensor(img) + or is_tf_tensor(img) + or is_jax_tensor(img) + ) + + +def valid_images(imgs): + # If we have an list of images, make sure every image is valid + if isinstance(imgs, (list, tuple)): + for img in imgs: + if not valid_images(img): + return False + # If not a list of tuple, we have been given a single image or batched tensor of images + elif not is_valid_image(imgs): + return False + return True + + +def is_batched(img): + if isinstance(img, (list, tuple)): + return is_valid_image(img[0]) + return False + + +def is_scaled_image(image: np.ndarray) -> bool: + """ + Checks to see whether the pixel values have already been rescaled to [0, 1]. + """ + if image.dtype == np.uint8: + return False + + # It's possible the image has pixel values in [0, 255] but is of floating type + return np.min(image) >= 0 and np.max(image) <= 1 + + +def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]: + """ + Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1. + If the input is a batch of images, it is converted to a list of images. + + Args: + images (`ImageInput`): + Image of images to turn into a list of images. + expected_ndims (`int`, *optional*, defaults to 3): + Expected number of dimensions for a single input image. If the input image has a different number of + dimensions, an error is raised. + """ + if is_batched(images): + return images + + # Either the input is a single image, in which case we create a list of length 1 + if isinstance(images, PIL.Image.Image): + # PIL images are never batched + return [images] + + if is_valid_image(images): + if images.ndim == expected_ndims + 1: + # Batch of images + images = list(images) + elif images.ndim == expected_ndims: + # Single image + images = [images] + else: + raise ValueError( + f"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got" + f" {images.ndim} dimensions." + ) + return images + raise ValueError( + "Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or " + f"jax.ndarray, but got {type(images)}." + ) + + +def to_numpy_array(img) -> np.ndarray: + if not is_valid_image(img): + raise ValueError(f"Invalid image type: {type(img)}") + + if is_vision_available() and isinstance(img, PIL.Image.Image): + return np.array(img) + return to_numpy(img) + + +def infer_channel_dimension_format( + image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None +) -> ChannelDimension: + """ + Infers the channel dimension format of `image`. + + Args: + image (`np.ndarray`): + The image to infer the channel dimension of. + num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`): + The number of channels of the image. + + Returns: + The channel dimension of the image. + """ + num_channels = num_channels if num_channels is not None else (1, 3) + num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels + + if image.ndim == 3: + first_dim, last_dim = 0, 2 + elif image.ndim == 4: + first_dim, last_dim = 1, 3 + else: + raise ValueError(f"Unsupported number of image dimensions: {image.ndim}") + + if image.shape[first_dim] in num_channels: + return ChannelDimension.FIRST + elif image.shape[last_dim] in num_channels: + return ChannelDimension.LAST + raise ValueError("Unable to infer channel dimension format") + + +def get_channel_dimension_axis( + image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]] = None +) -> int: + """ + Returns the channel dimension axis of the image. + + Args: + image (`np.ndarray`): + The image to get the channel dimension axis of. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the image. If `None`, will infer the channel dimension from the image. + + Returns: + The channel dimension axis of the image. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + if input_data_format == ChannelDimension.FIRST: + return image.ndim - 3 + elif input_data_format == ChannelDimension.LAST: + return image.ndim - 1 + raise ValueError(f"Unsupported data format: {input_data_format}") + + +def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]: + """ + Returns the (height, width) dimensions of the image. + + Args: + image (`np.ndarray`): + The image to get the dimensions of. + channel_dim (`ChannelDimension`, *optional*): + Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image. + + Returns: + A tuple of the image's height and width. + """ + if channel_dim is None: + channel_dim = infer_channel_dimension_format(image) + + if channel_dim == ChannelDimension.FIRST: + return image.shape[-2], image.shape[-1] + elif channel_dim == ChannelDimension.LAST: + return image.shape[-3], image.shape[-2] + else: + raise ValueError(f"Unsupported data format: {channel_dim}") + + +def is_valid_annotation_coco_detection(annotation: Dict[str, Union[List, Tuple]]) -> bool: + if ( + isinstance(annotation, dict) + and "image_id" in annotation + and "annotations" in annotation + and isinstance(annotation["annotations"], (list, tuple)) + and ( + # an image can have no annotations + len(annotation["annotations"]) == 0 or isinstance(annotation["annotations"][0], dict) + ) + ): + return True + return False + + +def is_valid_annotation_coco_panoptic(annotation: Dict[str, Union[List, Tuple]]) -> bool: + if ( + isinstance(annotation, dict) + and "image_id" in annotation + and "segments_info" in annotation + and "file_name" in annotation + and isinstance(annotation["segments_info"], (list, tuple)) + and ( + # an image can have no segments + len(annotation["segments_info"]) == 0 or isinstance(annotation["segments_info"][0], dict) + ) + ): + return True + return False + + +def valid_coco_detection_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool: + return all(is_valid_annotation_coco_detection(ann) for ann in annotations) + + +def valid_coco_panoptic_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool: + return all(is_valid_annotation_coco_panoptic(ann) for ann in annotations) + + +def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] = None) -> "PIL.Image.Image": + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + timeout (`float`, *optional*): + The timeout value in seconds for the URL request. + + Returns: + `PIL.Image.Image`: A PIL Image. + """ + requires_backends(load_image, ["vision"]) + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + # We need to actually check for a real protocol, otherwise it's impossible to use a local file + # like http_huggingface_co.png + image = PIL.Image.open(requests.get(image, stream=True, timeout=timeout).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + if image.startswith("data:image/"): + image = image.split(",")[1] + + # Try to load as base64 + try: + b64 = base64.b64decode(image, validate=True) + image = PIL.Image.open(BytesIO(b64)) + except Exception as e: + raise ValueError( + f"Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got {image}. Failed with {e}" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def validate_preprocess_arguments( + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + size_divisibility: Optional[int] = None, + do_center_crop: Optional[bool] = None, + crop_size: Optional[Dict[str, int]] = None, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample: Optional["PILImageResampling"] = None, +): + """ + Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method. + Raises `ValueError` if arguments incompatibility is caught. + Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`, + sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow + existing arguments when possible. + + """ + if do_rescale and rescale_factor is None: + raise ValueError("rescale_factor must be specified if do_rescale is True.") + + if do_pad and size_divisibility is None: + # Here, size_divisor might be passed as the value of size + raise ValueError( + "Depending on moel, size_divisibility, size_divisor, pad_size or size must be specified if do_pad is True." + ) + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("image_mean and image_std must both be specified if do_normalize is True.") + + if do_center_crop and crop_size is None: + raise ValueError("crop_size must be specified if do_center_crop is True.") + + if do_resize and (size is None or resample is None): + raise ValueError("size and resample must be specified if do_resize is True.") + + +# In the future we can add a TF implementation here when we have TF models. +class ImageFeatureExtractionMixin: + """ + Mixin that contain utilities for preparing image features. + """ + + def _ensure_format_supported(self, image): + if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image): + raise ValueError( + f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and " + "`torch.Tensor` are." + ) + + def to_pil_image(self, image, rescale=None): + """ + Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if + needed. + + Args: + image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`): + The image to convert to the PIL Image format. + rescale (`bool`, *optional*): + Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will + default to `True` if the image type is a floating type, `False` otherwise. + """ + self._ensure_format_supported(image) + + if is_torch_tensor(image): + image = image.numpy() + + if isinstance(image, np.ndarray): + if rescale is None: + # rescale default to the array being of floating type. + rescale = isinstance(image.flat[0], np.floating) + # If the channel as been moved to first dim, we put it back at the end. + if image.ndim == 3 and image.shape[0] in [1, 3]: + image = image.transpose(1, 2, 0) + if rescale: + image = image * 255 + image = image.astype(np.uint8) + return PIL.Image.fromarray(image) + return image + + def convert_rgb(self, image): + """ + Converts `PIL.Image.Image` to RGB format. + + Args: + image (`PIL.Image.Image`): + The image to convert. + """ + self._ensure_format_supported(image) + if not isinstance(image, PIL.Image.Image): + return image + + return image.convert("RGB") + + def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray: + """ + Rescale a numpy image by scale amount + """ + self._ensure_format_supported(image) + return image * scale + + def to_numpy_array(self, image, rescale=None, channel_first=True): + """ + Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first + dimension. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to convert to a NumPy array. + rescale (`bool`, *optional*): + Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will + default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise. + channel_first (`bool`, *optional*, defaults to `True`): + Whether or not to permute the dimensions of the image to put the channel dimension first. + """ + self._ensure_format_supported(image) + + if isinstance(image, PIL.Image.Image): + image = np.array(image) + + if is_torch_tensor(image): + image = image.numpy() + + rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale + + if rescale: + image = self.rescale(image.astype(np.float32), 1 / 255.0) + + if channel_first and image.ndim == 3: + image = image.transpose(2, 0, 1) + + return image + + def expand_dims(self, image): + """ + Expands 2-dimensional `image` to 3 dimensions. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to expand. + """ + self._ensure_format_supported(image) + + # Do nothing if PIL image + if isinstance(image, PIL.Image.Image): + return image + + if is_torch_tensor(image): + image = image.unsqueeze(0) + else: + image = np.expand_dims(image, axis=0) + return image + + def normalize(self, image, mean, std, rescale=False): + """ + Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array + if it's a PIL Image. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to normalize. + mean (`List[float]` or `np.ndarray` or `torch.Tensor`): + The mean (per channel) to use for normalization. + std (`List[float]` or `np.ndarray` or `torch.Tensor`): + The standard deviation (per channel) to use for normalization. + rescale (`bool`, *optional*, defaults to `False`): + Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will + happen automatically. + """ + self._ensure_format_supported(image) + + if isinstance(image, PIL.Image.Image): + image = self.to_numpy_array(image, rescale=True) + # If the input image is a PIL image, it automatically gets rescaled. If it's another + # type it may need rescaling. + elif rescale: + if isinstance(image, np.ndarray): + image = self.rescale(image.astype(np.float32), 1 / 255.0) + elif is_torch_tensor(image): + image = self.rescale(image.float(), 1 / 255.0) + + if isinstance(image, np.ndarray): + if not isinstance(mean, np.ndarray): + mean = np.array(mean).astype(image.dtype) + if not isinstance(std, np.ndarray): + std = np.array(std).astype(image.dtype) + elif is_torch_tensor(image): + import torch + + if not isinstance(mean, torch.Tensor): + mean = torch.tensor(mean) + if not isinstance(std, torch.Tensor): + std = torch.tensor(std) + + if image.ndim == 3 and image.shape[0] in [1, 3]: + return (image - mean[:, None, None]) / std[:, None, None] + else: + return (image - mean) / std + + def resize(self, image, size, resample=None, default_to_square=True, max_size=None): + """ + Resizes `image`. Enforces conversion of input to PIL.Image. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to resize. + size (`int` or `Tuple[int, int]`): + The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be + matched to this. + + If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If + `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to + this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). + resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`): + The filter to user for resampling. + default_to_square (`bool`, *optional*, defaults to `True`): + How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a + square (`size`,`size`). If set to `False`, will replicate + [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) + with support for resizing only the smallest edge and providing an optional `max_size`. + max_size (`int`, *optional*, defaults to `None`): + The maximum allowed for the longer edge of the resized image: if the longer edge of the image is + greater than `max_size` after being resized according to `size`, then the image is resized again so + that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller + edge may be shorter than `size`. Only used if `default_to_square` is `False`. + + Returns: + image: A resized `PIL.Image.Image`. + """ + resample = resample if resample is not None else PILImageResampling.BILINEAR + + self._ensure_format_supported(image) + + if not isinstance(image, PIL.Image.Image): + image = self.to_pil_image(image) + + if isinstance(size, list): + size = tuple(size) + + if isinstance(size, int) or len(size) == 1: + if default_to_square: + size = (size, size) if isinstance(size, int) else (size[0], size[0]) + else: + width, height = image.size + # specified size only for the smallest edge + short, long = (width, height) if width <= height else (height, width) + requested_new_short = size if isinstance(size, int) else size[0] + + if short == requested_new_short: + return image + + new_short, new_long = requested_new_short, int(requested_new_short * long / short) + + if max_size is not None: + if max_size <= requested_new_short: + raise ValueError( + f"max_size = {max_size} must be strictly greater than the requested " + f"size for the smaller edge size = {size}" + ) + if new_long > max_size: + new_short, new_long = int(max_size * new_short / new_long), max_size + + size = (new_short, new_long) if width <= height else (new_long, new_short) + + return image.resize(size, resample=resample) + + def center_crop(self, image, size): + """ + Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the + size given, it will be padded (so the returned result has the size asked). + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)): + The image to resize. + size (`int` or `Tuple[int, int]`): + The size to which crop the image. + + Returns: + new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels, + height, width). + """ + self._ensure_format_supported(image) + + if not isinstance(size, tuple): + size = (size, size) + + # PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width) + if is_torch_tensor(image) or isinstance(image, np.ndarray): + if image.ndim == 2: + image = self.expand_dims(image) + image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2] + else: + image_shape = (image.size[1], image.size[0]) + + top = (image_shape[0] - size[0]) // 2 + bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result. + left = (image_shape[1] - size[1]) // 2 + right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result. + + # For PIL Images we have a method to crop directly. + if isinstance(image, PIL.Image.Image): + return image.crop((left, top, right, bottom)) + + # Check if image is in (n_channels, height, width) or (height, width, n_channels) format + channel_first = True if image.shape[0] in [1, 3] else False + + # Transpose (height, width, n_channels) format images + if not channel_first: + if isinstance(image, np.ndarray): + image = image.transpose(2, 0, 1) + if is_torch_tensor(image): + image = image.permute(2, 0, 1) + + # Check if cropped area is within image boundaries + if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]: + return image[..., top:bottom, left:right] + + # Otherwise, we may need to pad if the image is too small. Oh joy... + new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1])) + if isinstance(image, np.ndarray): + new_image = np.zeros_like(image, shape=new_shape) + elif is_torch_tensor(image): + new_image = image.new_zeros(new_shape) + + top_pad = (new_shape[-2] - image_shape[0]) // 2 + bottom_pad = top_pad + image_shape[0] + left_pad = (new_shape[-1] - image_shape[1]) // 2 + right_pad = left_pad + image_shape[1] + new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image + + top += top_pad + bottom += top_pad + left += left_pad + right += left_pad + + new_image = new_image[ + ..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right) + ] + + return new_image + + def flip_channel_order(self, image): + """ + Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of + `image` to a NumPy array if it's a PIL Image. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should + be first. + """ + self._ensure_format_supported(image) + + if isinstance(image, PIL.Image.Image): + image = self.to_numpy_array(image) + + return image[::-1, :, :] + + def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None): + """ + Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees + counter clockwise around its centre. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before + rotating. + + Returns: + image: A rotated `PIL.Image.Image`. + """ + resample = resample if resample is not None else PIL.Image.NEAREST + + self._ensure_format_supported(image) + + if not isinstance(image, PIL.Image.Image): + image = self.to_pil_image(image) + + return image.rotate( + angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor + ) + + +def promote_annotation_format(annotation_format: Union[AnnotionFormat, AnnotationFormat]) -> AnnotationFormat: + # can be removed when `AnnotionFormat` is fully deprecated + return AnnotationFormat(annotation_format.value) + + +def validate_annotations( + annotation_format: AnnotationFormat, + supported_annotation_formats: Tuple[AnnotationFormat, ...], + annotations: List[Dict], +) -> None: + if isinstance(annotation_format, AnnotionFormat): + logger.warning_once( + f"`{annotation_format.__class__.__name__}` is deprecated and will be removed in v4.38. " + f"Please use `{AnnotationFormat.__name__}` instead." + ) + annotation_format = promote_annotation_format(annotation_format) + + if annotation_format not in supported_annotation_formats: + raise ValueError(f"Unsupported annotation format: {format} must be one of {supported_annotation_formats}") + + if annotation_format is AnnotationFormat.COCO_DETECTION: + if not valid_coco_detection_annotations(annotations): + raise ValueError( + "Invalid COCO detection annotations. Annotations must a dict (single image) or list of dicts " + "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " + "being a list of annotations in the COCO format." + ) + + if annotation_format is AnnotationFormat.COCO_PANOPTIC: + if not valid_coco_panoptic_annotations(annotations): + raise ValueError( + "Invalid COCO panoptic annotations. Annotations must a dict (single image) or list of dicts " + "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " + "the latter being a list of annotations in the COCO format." + ) + + +def validate_kwargs(valid_processor_keys: List[str], captured_kwargs: List[str]): + unused_keys = set(captured_kwargs).difference(set(valid_processor_keys)) + if unused_keys: + unused_key_str = ", ".join(unused_keys) + # TODO raise a warning here instead of simply logging? + logger.warning(f"Unused or unrecognized kwargs: {unused_key_str}.") diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/keras_callbacks.py b/env-llmeval/lib/python3.10/site-packages/transformers/keras_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e832729a1eeb482d1193753cc2c07ad1f16c2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/keras_callbacks.py @@ -0,0 +1,413 @@ +import logging +import os +from pathlib import Path +from time import sleep +from typing import Callable, List, Optional, Union + +import numpy as np +import tensorflow as tf +from huggingface_hub import Repository, create_repo +from packaging.version import parse + +from . import IntervalStrategy, PreTrainedTokenizerBase +from .modelcard import TrainingSummary +from .modeling_tf_utils import keras + + +logger = logging.getLogger(__name__) + + +class KerasMetricCallback(keras.callbacks.Callback): + """ + Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be + compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string + operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the + `eval_dataset` before being passed to the `metric_fn` in `np.ndarray` format. The `metric_fn` should compute + metrics and return a dict mapping metric names to metric values. + + We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that + this example skips some post-processing for readability and simplicity, and should probably not be used as-is! + + ```py + from datasets import load_metric + + rouge_metric = load_metric("rouge") + + + def rouge_fn(predictions, labels): + decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) + return {key: value.mid.fmeasure * 100 for key, value in result.items()} + ``` + + The above function will return a dict containing values which will be logged like any other Keras metric: + + ``` + {'rouge1': 37.4199, 'rouge2': 13.9768, 'rougeL': 34.361, 'rougeLsum': 35.0781 + ``` + + Args: + metric_fn (`Callable`): + Metric function provided by the user. It will be called with two arguments - `predictions` and `labels`. + These contain the model's outputs and matching labels from the dataset. It should return a dict mapping + metric names to numerical values. + eval_dataset (`tf.data.Dataset` or `dict` or `tuple` or `np.ndarray` or `tf.Tensor`): + Validation data to be used to generate predictions for the `metric_fn`. + output_cols (`List[str], *optional*): + A list of columns to be retained from the model output as the predictions. Defaults to all. + label_cols ('`List[str]`, *optional*'): + A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not + supplied. + batch_size (`int`, *optional*): + Batch size. Only used when the data is not a pre-batched `tf.data.Dataset`. + predict_with_generate (`bool`, *optional*, defaults to `False`): + Whether we should use `model.generate()` to get outputs for the model. + use_xla_generation (`bool`, *optional*, defaults to `False`): + If we're generating, whether to compile model generation with XLA. This can massively increase the speed of + generation (up to 100X speedup) but will require a new XLA compilation for each input shape. When using XLA + generation, it's a good idea to pad your inputs to the same size, or to use the `pad_to_multiple_of` + argument in your `tokenizer` or `DataCollator`, which will reduce the number of unique input shapes and + save a lot of compilation time. This option has no effect is `predict_with_generate` is `False`. + generate_kwargs (`dict`, *optional*): + Keyword arguments to pass to `model.generate()` when generating. Has no effect if `predict_with_generate` + is `False`. + + """ + + def __init__( + self, + metric_fn: Callable, + eval_dataset: Union[tf.data.Dataset, np.ndarray, tf.Tensor, tuple, dict], + output_cols: Optional[List[str]] = None, + label_cols: Optional[List[str]] = None, + batch_size: Optional[int] = None, + predict_with_generate: bool = False, + use_xla_generation: bool = False, + generate_kwargs: Optional[dict] = None, + ): + super().__init__() + self.metric_fn = metric_fn + self.batch_size = batch_size + if not isinstance(eval_dataset, tf.data.Dataset): + if batch_size is None: + raise ValueError( + "When passing data to KerasMetricCallback that is not a pre-batched tf.data.Dataset " + "the batch_size argument must be set." + ) + # Wrap a tf.data.Dataset around it + eval_dataset = tf.data.Dataset.from_tensor_slices(eval_dataset).batch(batch_size, drop_remainder=False) + self.eval_dataset = eval_dataset + self.predict_with_generate = predict_with_generate + self.output_cols = output_cols + + # This next block attempts to parse out which elements of the dataset should be appended to the labels list + # that is passed to the metric_fn + if isinstance(eval_dataset.element_spec, tuple) and len(eval_dataset.element_spec) == 2: + input_spec, label_spec = eval_dataset.element_spec + else: + input_spec = eval_dataset.element_spec + label_spec = None + if label_cols is not None: + for label in label_cols: + if label not in input_spec: + raise ValueError(f"Label {label} is in label_cols but could not be found in the dataset inputs!") + self.label_cols = label_cols + self.use_keras_label = False + elif label_spec is not None: + # If the dataset inputs are split into a 2-tuple of inputs and labels, + # assume the second element is the labels + self.label_cols = None + self.use_keras_label = True + elif "labels" in input_spec: + self.label_cols = ["labels"] + self.use_keras_label = False + logging.warning("No label_cols specified for KerasMetricCallback, assuming you want the 'labels' key.") + elif "start_positions" in input_spec and "end_positions" in input_spec: + self.label_cols = ["start_positions", "end_positions"] + self.use_keras_label = False + logging.warning( + "No label_cols specified for KerasMetricCallback, assuming you want the " + "start_positions and end_positions keys." + ) + else: + raise ValueError("Could not autodetect label_cols for KerasMetricCallback, please specify them!") + if parse(tf.__version__) < parse("2.7"): + logging.warning("TF versions less than 2.7 may encounter issues with KerasMetricCallback!") + + self.use_xla_generation = use_xla_generation + self.generate_kwargs = {} if generate_kwargs is None else generate_kwargs + + self.generation_function = None + + @staticmethod + def _concatenate_batches(batches, padding_index=-100): + # If all batches are unidimensional or same length, do a simple concatenation + if batches[0].ndim == 1 or all(batch.shape[1] == batches[0].shape[1] for batch in batches): + return np.concatenate(batches, axis=0) + + # Welp, they're not the same length. Let's do some padding + max_len = max([batch.shape[1] for batch in batches]) + num_samples = sum([batch.shape[0] for batch in batches]) + output = np.full_like( + batches[0], fill_value=padding_index, shape=[num_samples, max_len] + list(batches[0].shape[2:]) + ) + # i keeps track of which part of the concatenated array we're writing the next batch to + i = 0 + for batch in batches: + output[i : i + len(batch), : batch.shape[1]] = batch + i += len(batch) + return output + + def _postprocess_predictions_or_labels(self, inputs): + if isinstance(inputs[0], dict): + outputs = {} + for key in inputs[0].keys(): + outputs[key] = self._concatenate_batches([batch[key] for batch in inputs]) + # If it's a dict with only one key, just return the array + if len(outputs) == 1: + outputs = list(outputs.values())[0] + elif isinstance(inputs[0], list) or isinstance(inputs[0], tuple): + outputs = [] + for input_list in zip(*inputs): + outputs.append(self._concatenate_batches(input_list)) + if len(outputs) == 1: + outputs = outputs[0] # If it's a list with only one element, just return the array + elif isinstance(inputs[0], np.ndarray): + outputs = self._concatenate_batches(inputs) + elif isinstance(inputs[0], tf.Tensor): + outputs = self._concatenate_batches([tensor.numpy() for tensor in inputs]) + else: + raise TypeError(f"Couldn't handle batch of type {type(inputs[0])}!") + return outputs + + def on_epoch_end(self, epoch, logs=None): + if hasattr(self.model, "config"): + ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) + else: + ignore_keys = [] + + main_input_name = None + if self.predict_with_generate: + # This dense conditional recognizes the case where we have an encoder-decoder model, but + # avoids getting tangled up when we just have a model with a layer called 'encoder' + if hasattr(self.model, "encoder") and hasattr(self.model.encoder, "main_input_name"): + main_input_name = self.model.encoder.main_input_name + else: + main_input_name = getattr(self.model, "main_input_name", "input_ids") + + if self.use_xla_generation and self.generation_function is None: + + def generation_function(inputs, attention_mask): + return self.model.generate(inputs, attention_mask=attention_mask, **self.generate_kwargs) + + self.generation_function = tf.function(generation_function, jit_compile=True) + + prediction_list = [] + label_list = [] + + # The whole predict/generate loop is handled inside this method + for batch in self.eval_dataset: + if isinstance(batch, tuple): + batch, labels = batch + else: + labels = None + if self.predict_with_generate: + if isinstance(batch, dict): + generation_inputs = batch[main_input_name] + attention_mask = batch.get("attention_mask", None) + else: + generation_inputs = batch + attention_mask = None + if self.use_xla_generation: + predictions = self.generation_function(generation_inputs, attention_mask=attention_mask) + else: + predictions = self.model.generate( + generation_inputs, attention_mask=attention_mask, **self.generate_kwargs + ) + else: + predictions = self.model.predict_on_batch(batch) + if isinstance(predictions, dict): + # This converts any dict-subclass to a regular dict + # Keras REALLY doesn't like it when we pass around a BatchEncoding or other derived class + predictions = dict(predictions) + if self.output_cols is not None: + predictions = {key: predictions[key] for key in self.output_cols} + else: + predictions = { + key: val for key, val in predictions.items() if key not in ignore_keys + ["loss"] + } + prediction_list.append(predictions) + if not self.use_keras_label: + labels = {key: batch[key].numpy() for key in self.label_cols} + elif isinstance(labels, dict): + labels = {key: array.numpy() for key, array in labels.items()} + elif isinstance(labels, list) or isinstance(labels, tuple): + labels = [array.numpy() for array in labels] + elif isinstance(labels, tf.Tensor): + labels = labels.numpy() + else: + raise TypeError(f"Confused by labels of type {type(labels)}") + label_list.append(labels) + + all_preds = self._postprocess_predictions_or_labels(prediction_list) + all_labels = self._postprocess_predictions_or_labels(label_list) + + metric_output = self.metric_fn((all_preds, all_labels)) + if not isinstance(metric_output, dict): + raise TypeError( + f"metric_fn should return a dict mapping metric names to values but instead returned {metric_output}" + ) + # This is the critical bit - Keras passes a dict containing the loss and standard metric values for this epoch + # in the logs argument. Ordinarily, this is so the callback can read them, but in this case we write a bunch of + # new keys in there, which will then get read by the History callback and treated like any other metric value. + # I promise that I have it in writing from Chollet that this is okay. + logs.update(metric_output) + + +class PushToHubCallback(keras.callbacks.Callback): + """ + Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can + be changed with the `save_strategy` argument. Pushed models can be accessed like any other model on the hub, such + as with the `from_pretrained` method. + + ```py + from transformers.keras_callbacks import PushToHubCallback + + push_to_hub_callback = PushToHubCallback( + output_dir="./model_save", + tokenizer=tokenizer, + hub_model_id="gpt5-7xlarge", + ) + + model.fit(train_dataset, callbacks=[push_to_hub_callback]) + ``` + + Args: + output_dir (`str`): + The output directory where the model predictions and checkpoints will be written and synced with the + repository on the Hub. + save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"epoch"`): + The checkpoint save strategy to adopt during training. Possible values are: + + - `"no"`: Save is done at the end of training. + - `"epoch"`: Save is done at the end of each epoch. + - `"steps"`: Save is done every `save_steps` + save_steps (`int`, *optional*): + The number of steps between saves when using the "steps" `save_strategy`. + tokenizer (`PreTrainedTokenizerBase`, *optional*): + The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights. + hub_model_id (`str`, *optional*): + The name of the repository to keep in sync with the local `output_dir`. It can be a simple model ID in + which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, + for instance `"user_name/model"`, which allows you to push to an organization you are a member of with + `"organization_name/model"`. + + Will default to the name of `output_dir`. + hub_token (`str`, *optional*): + The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with + `huggingface-cli login`. + checkpoint (`bool`, *optional*, defaults to `False`): + Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be + resumed. Only usable when `save_strategy` is `"epoch"`. + """ + + def __init__( + self, + output_dir: Union[str, Path], + save_strategy: Union[str, IntervalStrategy] = "epoch", + save_steps: Optional[int] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + hub_model_id: Optional[str] = None, + hub_token: Optional[str] = None, + checkpoint: bool = False, + **model_card_args, + ): + super().__init__() + if checkpoint and save_strategy != "epoch": + raise ValueError("Cannot save checkpoints when save_strategy is not 'epoch'!") + if isinstance(save_strategy, str): + save_strategy = IntervalStrategy(save_strategy.lower()) + self.save_strategy = save_strategy + if self.save_strategy == IntervalStrategy.STEPS and (not isinstance(save_steps, int) or save_steps <= 0): + raise ValueError("Please supply a positive integer argument for save_steps when save_strategy == 'steps'!") + self.save_steps = save_steps + output_dir = Path(output_dir) + + # Create repo and retrieve repo_id + if hub_model_id is None: + hub_model_id = output_dir.absolute().name + self.hub_model_id = create_repo(repo_id=hub_model_id, exist_ok=True, token=hub_token).repo_id + + self.output_dir = output_dir + self.repo = Repository(str(self.output_dir), clone_from=self.hub_model_id, token=hub_token) + + self.tokenizer = tokenizer + self.last_job = None + self.checkpoint = checkpoint + self.training_history = None + self.model_card_args = model_card_args + + def on_train_begin(self, logs=None): + # Although we can access model.history, we have no guarantees that the History callback will fire before this + # one, so we keep track of it here too + self.training_history = [] + + def on_train_batch_end(self, batch, logs=None): + if self.save_strategy == IntervalStrategy.STEPS and (batch + 1) % self.save_steps == 0: + if self.last_job is not None and not self.last_job.is_done: + return # The last upload is still running, don't start another + self.model.save_pretrained(self.output_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(self.output_dir) + _, self.last_job = self.repo.push_to_hub( + commit_message=f"Training in progress steps {batch}", blocking=False + ) + + def on_epoch_end(self, epoch, logs=None): + logs = logs.copy() # Don't accidentally write things that Keras will read later + if "epoch" not in logs: + logs["epoch"] = epoch + self.training_history.append(logs) + if self.save_strategy == IntervalStrategy.EPOCH: + if self.last_job is not None and not self.last_job.is_done: + return # The last upload is still running, don't start another + self.model.save_pretrained(self.output_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(self.output_dir) + if self.checkpoint: + checkpoint_dir = os.path.join(self.output_dir, "checkpoint") + self.model._save_checkpoint(checkpoint_dir, epoch) + train_summary = TrainingSummary.from_keras( + model=self.model, + model_name=self.hub_model_id, + keras_history=self.training_history, + **self.model_card_args, + ) + model_card = train_summary.to_model_card() + with (self.output_dir / "README.md").open("w") as f: + f.write(model_card) + _, self.last_job = self.repo.push_to_hub( + commit_message=f"Training in progress epoch {epoch}", blocking=False + ) + + def on_train_end(self, logs=None): + # Makes sure the latest version of the model is uploaded + if self.last_job is not None and not self.last_job.is_done: + logging.info("Pushing the last epoch to the Hub, this may take a while...") + while not self.last_job.is_done: + sleep(1) + else: + self.model.save_pretrained(self.output_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(self.output_dir) + train_summary = TrainingSummary.from_keras( + model=self.model, + model_name=self.hub_model_id, + keras_history=self.training_history, + **self.model_card_args, + ) + model_card = train_summary.to_model_card() + with (self.output_dir / "README.md").open("w") as f: + f.write(model_card) + self.repo.push_to_hub(commit_message="End of training", blocking=True) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/modelcard.py b/env-llmeval/lib/python3.10/site-packages/transformers/modelcard.py new file mode 100644 index 0000000000000000000000000000000000000000..4776737a3746e368901a83f53caf7889a95b0a3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/modelcard.py @@ -0,0 +1,904 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Configuration base class and utilities.""" + + +import copy +import json +import os +import warnings +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import requests +import yaml +from huggingface_hub import model_info +from huggingface_hub.utils import HFValidationError + +from . import __version__ +from .models.auto.modeling_auto import ( + MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_CTC_MAPPING_NAMES, + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, + MODEL_FOR_MASKED_LM_MAPPING_NAMES, + MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, + MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, +) +from .training_args import ParallelMode +from .utils import ( + MODEL_CARD_NAME, + cached_file, + is_datasets_available, + is_offline_mode, + is_tf_available, + is_tokenizers_available, + is_torch_available, + logging, +) + + +TASK_MAPPING = { + "text-generation": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, + "image-segmentation": MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, + "fill-mask": MODEL_FOR_MASKED_LM_MAPPING_NAMES, + "object-detection": MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, + "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + "text2text-generation": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + "text-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, + "table-question-answering": MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, + "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, + "automatic-speech-recognition": {**MODEL_FOR_CTC_MAPPING_NAMES, **MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES}, + "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, +} + +logger = logging.get_logger(__name__) + + +class ModelCard: + r""" + Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. + + Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by + Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, + Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 + + Note: A model card can be loaded and saved to disk. + """ + + def __init__(self, **kwargs): + warnings.warn( + "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning + ) + # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers) + self.model_details = kwargs.pop("model_details", {}) + self.intended_use = kwargs.pop("intended_use", {}) + self.factors = kwargs.pop("factors", {}) + self.metrics = kwargs.pop("metrics", {}) + self.evaluation_data = kwargs.pop("evaluation_data", {}) + self.training_data = kwargs.pop("training_data", {}) + self.quantitative_analyses = kwargs.pop("quantitative_analyses", {}) + self.ethical_considerations = kwargs.pop("ethical_considerations", {}) + self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {}) + + # Open additional attributes + for key, value in kwargs.items(): + try: + setattr(self, key, value) + except AttributeError as err: + logger.error(f"Can't set {key} with value {value} for {self}") + raise err + + def save_pretrained(self, save_directory_or_file): + """Save a model card object to the directory or file `save_directory_or_file`.""" + if os.path.isdir(save_directory_or_file): + # If we save using the predefined names, we can load using `from_pretrained` + output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME) + else: + output_model_card_file = save_directory_or_file + + self.to_json_file(output_model_card_file) + logger.info(f"Model card saved in {output_model_card_file}") + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate a [`ModelCard`] from a pre-trained model model card. + + Parameters: + pretrained_model_name_or_path: either: + + - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. + - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] + method, e.g.: `./my_model_directory/`. + - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. + + cache_dir: (*optional*) string: + Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache + should not be used. + + kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading. + + - The values in kwargs of any keys which are model card attributes will be used to override the loaded + values. + - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the + *return_unused_kwargs* keyword parameter. + + proxies: (*optional*) dict, default None: + A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. + + return_unused_kwargs: (*optional*) bool: + + - If False, then this function returns just the final model card object. + - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a + dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of + kwargs which has not been used to update *ModelCard* and is otherwise ignored. + + Examples: + + ```python + # Download model card from huggingface.co and cache. + modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased") + # Model card was saved using *save_pretrained('./test/saved_model/')* + modelcard = ModelCard.from_pretrained("./test/saved_model/") + modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") + modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) + ```""" + cache_dir = kwargs.pop("cache_dir", None) + proxies = kwargs.pop("proxies", None) + return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) + from_pipeline = kwargs.pop("_from_pipeline", None) + + user_agent = {"file_type": "model_card"} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isfile(pretrained_model_name_or_path): + resolved_model_card_file = pretrained_model_name_or_path + is_local = True + else: + try: + # Load from URL or cache if already cached + resolved_model_card_file = cached_file( + pretrained_model_name_or_path, + filename=MODEL_CARD_NAME, + cache_dir=cache_dir, + proxies=proxies, + user_agent=user_agent, + ) + if is_local: + logger.info(f"loading model card file {resolved_model_card_file}") + else: + logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}") + # Load model card + modelcard = cls.from_json_file(resolved_model_card_file) + + except (EnvironmentError, json.JSONDecodeError): + # We fall back on creating an empty model card + modelcard = cls() + + # Update model card with kwargs if needed + to_remove = [] + for key, value in kwargs.items(): + if hasattr(modelcard, key): + setattr(modelcard, key, value) + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + logger.info(f"Model card: {modelcard}") + if return_unused_kwargs: + return modelcard, kwargs + else: + return modelcard + + @classmethod + def from_dict(cls, json_object): + """Constructs a `ModelCard` from a Python dictionary of parameters.""" + return cls(**json_object) + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `ModelCard` from a json file of parameters.""" + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + dict_obj = json.loads(text) + return cls(**dict_obj) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path): + """Save this instance to a json file.""" + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string()) + + +AUTOGENERATED_TRAINER_COMMENT = """ + +""" + +AUTOGENERATED_KERAS_COMMENT = """ + +""" + + +TASK_TAG_TO_NAME_MAPPING = { + "fill-mask": "Masked Language Modeling", + "image-classification": "Image Classification", + "image-segmentation": "Image Segmentation", + "multiple-choice": "Multiple Choice", + "object-detection": "Object Detection", + "question-answering": "Question Answering", + "summarization": "Summarization", + "table-question-answering": "Table Question Answering", + "text-classification": "Text Classification", + "text-generation": "Causal Language Modeling", + "text2text-generation": "Sequence-to-sequence Language Modeling", + "token-classification": "Token Classification", + "translation": "Translation", + "zero-shot-classification": "Zero Shot Classification", + "automatic-speech-recognition": "Automatic Speech Recognition", + "audio-classification": "Audio Classification", +} + + +METRIC_TAGS = [ + "accuracy", + "bleu", + "f1", + "matthews_correlation", + "pearsonr", + "precision", + "recall", + "rouge", + "sacrebleu", + "spearmanr", + "wer", +] + + +def _listify(obj): + if obj is None: + return [] + elif isinstance(obj, str): + return [obj] + else: + return obj + + +def _insert_values_as_list(metadata, name, values): + if values is None: + return metadata + if isinstance(values, str): + values = [values] + values = [v for v in values if v is not None] + if len(values) == 0: + return metadata + metadata[name] = values + return metadata + + +def infer_metric_tags_from_eval_results(eval_results): + if eval_results is None: + return {} + result = {} + for key in eval_results.keys(): + if key.lower().replace(" ", "_") in METRIC_TAGS: + result[key.lower().replace(" ", "_")] = key + elif key.lower() == "rouge1": + result["rouge"] = key + return result + + +def _insert_value(metadata, name, value): + if value is None: + return metadata + metadata[name] = value + return metadata + + +def is_hf_dataset(dataset): + if not is_datasets_available(): + return False + + from datasets import Dataset, IterableDataset + + return isinstance(dataset, (Dataset, IterableDataset)) + + +def _get_mapping_values(mapping): + result = [] + for v in mapping.values(): + if isinstance(v, (tuple, list)): + result += list(v) + else: + result.append(v) + return result + + +@dataclass +class TrainingSummary: + model_name: str + language: Optional[Union[str, List[str]]] = None + license: Optional[str] = None + tags: Optional[Union[str, List[str]]] = None + finetuned_from: Optional[str] = None + tasks: Optional[Union[str, List[str]]] = None + dataset: Optional[Union[str, List[str]]] = None + dataset_tags: Optional[Union[str, List[str]]] = None + dataset_args: Optional[Union[str, List[str]]] = None + dataset_metadata: Optional[Dict[str, Any]] = None + eval_results: Optional[Dict[str, float]] = None + eval_lines: Optional[List[str]] = None + hyperparameters: Optional[Dict[str, Any]] = None + source: Optional[str] = "trainer" + + def __post_init__(self): + # Infer default license from the checkpoint used, if possible. + if ( + self.license is None + and not is_offline_mode() + and self.finetuned_from is not None + and len(self.finetuned_from) > 0 + ): + try: + info = model_info(self.finetuned_from) + for tag in info.tags: + if tag.startswith("license:"): + self.license = tag[8:] + except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, HFValidationError): + pass + + def create_model_index(self, metric_mapping): + model_index = {"name": self.model_name} + + # Dataset mapping tag -> name + dataset_names = _listify(self.dataset) + dataset_tags = _listify(self.dataset_tags) + dataset_args = _listify(self.dataset_args) + dataset_metadata = _listify(self.dataset_metadata) + if len(dataset_args) < len(dataset_tags): + dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) + dataset_mapping = dict(zip(dataset_tags, dataset_names)) + dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) + dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) + + task_mapping = { + task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING + } + + model_index["results"] = [] + + if len(task_mapping) == 0 and len(dataset_mapping) == 0: + return [model_index] + if len(task_mapping) == 0: + task_mapping = {None: None} + if len(dataset_mapping) == 0: + dataset_mapping = {None: None} + + # One entry per dataset and per task + all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] + for task_tag, ds_tag in all_possibilities: + result = {} + if task_tag is not None: + result["task"] = {"name": task_mapping[task_tag], "type": task_tag} + + if ds_tag is not None: + metadata = dataset_metadata_mapping.get(ds_tag, {}) + result["dataset"] = { + "name": dataset_mapping[ds_tag], + "type": ds_tag, + **metadata, + } + if dataset_arg_mapping[ds_tag] is not None: + result["dataset"]["args"] = dataset_arg_mapping[ds_tag] + + if len(metric_mapping) > 0: + result["metrics"] = [] + for metric_tag, metric_name in metric_mapping.items(): + result["metrics"].append( + { + "name": metric_name, + "type": metric_tag, + "value": self.eval_results[metric_name], + } + ) + + # Remove partial results to avoid the model card being rejected. + if "task" in result and "dataset" in result and "metrics" in result: + model_index["results"].append(result) + else: + logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}") + + return [model_index] + + def create_metadata(self): + metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) + + metadata = {} + metadata = _insert_values_as_list(metadata, "language", self.language) + metadata = _insert_value(metadata, "license", self.license) + if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and len(self.finetuned_from) > 0: + metadata = _insert_value(metadata, "base_model", self.finetuned_from) + metadata = _insert_values_as_list(metadata, "tags", self.tags) + metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags) + metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys())) + metadata["model-index"] = self.create_model_index(metric_mapping) + + return metadata + + def to_model_card(self): + model_card = "" + + metadata = yaml.dump(self.create_metadata(), sort_keys=False) + if len(metadata) > 0: + model_card = f"---\n{metadata}---\n" + + # Now the model card for realsies. + if self.source == "trainer": + model_card += AUTOGENERATED_TRAINER_COMMENT + else: + model_card += AUTOGENERATED_KERAS_COMMENT + + model_card += f"\n# {self.model_name}\n\n" + + if self.finetuned_from is None: + model_card += "This model was trained from scratch on " + else: + model_card += ( + "This model is a fine-tuned version of" + f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on " + ) + + if self.dataset is None: + model_card += "an unknown dataset." + else: + if isinstance(self.dataset, str): + model_card += f"the {self.dataset} dataset." + elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: + model_card += f"the {self.dataset[0]} dataset." + else: + model_card += ( + ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets." + ) + + if self.eval_results is not None: + model_card += "\nIt achieves the following results on the evaluation set:\n" + model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()]) + model_card += "\n" + + model_card += "\n## Model description\n\nMore information needed\n" + model_card += "\n## Intended uses & limitations\n\nMore information needed\n" + model_card += "\n## Training and evaluation data\n\nMore information needed\n" + + model_card += "\n## Training procedure\n" + model_card += "\n### Training hyperparameters\n" + if self.hyperparameters is not None: + model_card += "\nThe following hyperparameters were used during training:\n" + model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()]) + model_card += "\n" + else: + model_card += "\nMore information needed\n" + + if self.eval_lines is not None: + model_card += "\n### Training results\n\n" + model_card += make_markdown_table(self.eval_lines) + model_card += "\n" + + model_card += "\n### Framework versions\n\n" + model_card += f"- Transformers {__version__}\n" + + if self.source == "trainer" and is_torch_available(): + import torch + + model_card += f"- Pytorch {torch.__version__}\n" + elif self.source == "keras" and is_tf_available(): + import tensorflow as tf + + model_card += f"- TensorFlow {tf.__version__}\n" + if is_datasets_available(): + import datasets + + model_card += f"- Datasets {datasets.__version__}\n" + if is_tokenizers_available(): + import tokenizers + + model_card += f"- Tokenizers {tokenizers.__version__}\n" + + return model_card + + @classmethod + def from_trainer( + cls, + trainer, + language=None, + license=None, + tags=None, + model_name=None, + finetuned_from=None, + tasks=None, + dataset_tags=None, + dataset_metadata=None, + dataset=None, + dataset_args=None, + ): + # Infer default from dataset + one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset + if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): + default_tag = one_dataset.builder_name + # Those are not real datasets from the Hub so we exclude them. + if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: + if dataset_metadata is None: + dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}] + if dataset_tags is None: + dataset_tags = [default_tag] + if dataset_args is None: + dataset_args = [one_dataset.config_name] + + if dataset is None and dataset_tags is not None: + dataset = dataset_tags + + # Infer default finetuned_from + if ( + finetuned_from is None + and hasattr(trainer.model.config, "_name_or_path") + and not os.path.isdir(trainer.model.config._name_or_path) + ): + finetuned_from = trainer.model.config._name_or_path + + # Infer default task tag: + if tasks is None: + model_class_name = trainer.model.__class__.__name__ + for task, mapping in TASK_MAPPING.items(): + if model_class_name in _get_mapping_values(mapping): + tasks = task + + if model_name is None: + model_name = Path(trainer.args.output_dir).name + if len(model_name) == 0: + model_name = finetuned_from + + # Add `generated_from_trainer` to the tags + if tags is None: + tags = ["generated_from_trainer"] + elif isinstance(tags, str) and tags != "generated_from_trainer": + tags = [tags, "generated_from_trainer"] + elif "generated_from_trainer" not in tags: + tags.append("generated_from_trainer") + + _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) + hyperparameters = extract_hyperparameters_from_trainer(trainer) + + return cls( + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset=dataset, + dataset_tags=dataset_tags, + dataset_args=dataset_args, + dataset_metadata=dataset_metadata, + eval_results=eval_results, + eval_lines=eval_lines, + hyperparameters=hyperparameters, + ) + + @classmethod + def from_keras( + cls, + model, + model_name, + keras_history=None, + language=None, + license=None, + tags=None, + finetuned_from=None, + tasks=None, + dataset_tags=None, + dataset=None, + dataset_args=None, + ): + # Infer default from dataset + if dataset is not None: + if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None): + default_tag = dataset.builder_name + # Those are not real datasets from the Hub so we exclude them. + if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: + if dataset_tags is None: + dataset_tags = [default_tag] + if dataset_args is None: + dataset_args = [dataset.config_name] + + if dataset is None and dataset_tags is not None: + dataset = dataset_tags + + # Infer default finetuned_from + if ( + finetuned_from is None + and hasattr(model.config, "_name_or_path") + and not os.path.isdir(model.config._name_or_path) + ): + finetuned_from = model.config._name_or_path + + # Infer default task tag: + if tasks is None: + model_class_name = model.__class__.__name__ + for task, mapping in TASK_MAPPING.items(): + if model_class_name in _get_mapping_values(mapping): + tasks = task + + # Add `generated_from_keras_callback` to the tags + if tags is None: + tags = ["generated_from_keras_callback"] + elif isinstance(tags, str) and tags != "generated_from_keras_callback": + tags = [tags, "generated_from_keras_callback"] + elif "generated_from_keras_callback" not in tags: + tags.append("generated_from_keras_callback") + + if keras_history is not None: + _, eval_lines, eval_results = parse_keras_history(keras_history) + else: + eval_lines = [] + eval_results = {} + hyperparameters = extract_hyperparameters_from_keras(model) + + return cls( + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + eval_results=eval_results, + eval_lines=eval_lines, + hyperparameters=hyperparameters, + source="keras", + ) + + +def parse_keras_history(logs): + """ + Parse the `logs` of either a `keras.History` object returned by `model.fit()` or an accumulated logs `dict` + passed to the `PushToHubCallback`. Returns lines and logs compatible with those returned by `parse_log_history`. + """ + if hasattr(logs, "history"): + # This looks like a `History` object + if not hasattr(logs, "epoch"): + # This history looks empty, return empty results + return None, [], {} + logs.history["epoch"] = logs.epoch + logs = logs.history + else: + # Training logs is a list of dicts, let's invert it to a dict of lists to match a History object + logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]} + + lines = [] + for i in range(len(logs["epoch"])): + epoch_dict = {log_key: log_value_list[i] for log_key, log_value_list in logs.items()} + values = {} + for k, v in epoch_dict.items(): + if k.startswith("val_"): + k = "validation_" + k[4:] + elif k != "epoch": + k = "train_" + k + splits = k.split("_") + name = " ".join([part.capitalize() for part in splits]) + values[name] = v + lines.append(values) + + eval_results = lines[-1] + + return logs, lines, eval_results + + +def parse_log_history(log_history): + """ + Parse the `log_history` of a Trainer to get the intermediate and final evaluation results. + """ + idx = 0 + while idx < len(log_history) and "train_runtime" not in log_history[idx]: + idx += 1 + + # If there are no training logs + if idx == len(log_history): + idx -= 1 + while idx >= 0 and "eval_loss" not in log_history[idx]: + idx -= 1 + + if idx >= 0: + return None, None, log_history[idx] + else: + return None, None, None + + # From now one we can assume we have training logs: + train_log = log_history[idx] + lines = [] + training_loss = "No log" + for i in range(idx): + if "loss" in log_history[i]: + training_loss = log_history[i]["loss"] + if "eval_loss" in log_history[i]: + metrics = log_history[i].copy() + _ = metrics.pop("total_flos", None) + epoch = metrics.pop("epoch", None) + step = metrics.pop("step", None) + _ = metrics.pop("eval_runtime", None) + _ = metrics.pop("eval_samples_per_second", None) + _ = metrics.pop("eval_steps_per_second", None) + _ = metrics.pop("eval_jit_compilation_time", None) + values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step} + for k, v in metrics.items(): + if k == "eval_loss": + values["Validation Loss"] = v + else: + splits = k.split("_") + name = " ".join([part.capitalize() for part in splits[1:]]) + values[name] = v + lines.append(values) + + idx = len(log_history) - 1 + while idx >= 0 and "eval_loss" not in log_history[idx]: + idx -= 1 + + if idx > 0: + eval_results = {} + for key, value in log_history[idx].items(): + if key.startswith("eval_"): + key = key[5:] + if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]: + camel_cased_key = " ".join([part.capitalize() for part in key.split("_")]) + eval_results[camel_cased_key] = value + return train_log, lines, eval_results + else: + return train_log, lines, None + + +def extract_hyperparameters_from_keras(model): + from .modeling_tf_utils import keras + + hyperparameters = {} + if hasattr(model, "optimizer") and model.optimizer is not None: + hyperparameters["optimizer"] = model.optimizer.get_config() + else: + hyperparameters["optimizer"] = None + hyperparameters["training_precision"] = keras.mixed_precision.global_policy().name + + return hyperparameters + + +def _maybe_round(v, decimals=4): + if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals: + return f"{v:.{decimals}f}" + return str(v) + + +def _regular_table_line(values, col_widths): + values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)] + return "".join(values_with_space) + "|\n" + + +def _second_table_line(col_widths): + values = ["|:" + "-" * w + ":" for w in col_widths] + return "".join(values) + "|\n" + + +def make_markdown_table(lines): + """ + Create a nice Markdown table from the results in `lines`. + """ + if lines is None or len(lines) == 0: + return "" + col_widths = {key: len(str(key)) for key in lines[0].keys()} + for line in lines: + for key, value in line.items(): + if col_widths[key] < len(_maybe_round(value)): + col_widths[key] = len(_maybe_round(value)) + + table = _regular_table_line(list(lines[0].keys()), list(col_widths.values())) + table += _second_table_line(list(col_widths.values())) + for line in lines: + table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values())) + return table + + +_TRAINING_ARGS_KEYS = [ + "learning_rate", + "train_batch_size", + "eval_batch_size", + "seed", +] + + +def extract_hyperparameters_from_trainer(trainer): + hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS} + + if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]: + hyperparameters["distributed_type"] = ( + "multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value + ) + if trainer.args.world_size > 1: + hyperparameters["num_devices"] = trainer.args.world_size + if trainer.args.gradient_accumulation_steps > 1: + hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps + + total_train_batch_size = ( + trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps + ) + if total_train_batch_size != hyperparameters["train_batch_size"]: + hyperparameters["total_train_batch_size"] = total_train_batch_size + total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size + if total_eval_batch_size != hyperparameters["eval_batch_size"]: + hyperparameters["total_eval_batch_size"] = total_eval_batch_size + + if trainer.args.adafactor: + hyperparameters["optimizer"] = "Adafactor" + else: + hyperparameters["optimizer"] = ( + f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and" + f" epsilon={trainer.args.adam_epsilon}" + ) + + hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value + if trainer.args.warmup_ratio != 0.0: + hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio + if trainer.args.warmup_steps != 0.0: + hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps + if trainer.args.max_steps != -1: + hyperparameters["training_steps"] = trainer.args.max_steps + else: + hyperparameters["num_epochs"] = trainer.args.num_train_epochs + + if trainer.args.fp16: + if trainer.use_apex: + hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}" + else: + hyperparameters["mixed_precision_training"] = "Native AMP" + + if trainer.args.label_smoothing_factor != 0.0: + hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor + + return hyperparameters diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/modeling_flax_outputs.py b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_flax_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..179a0b787936960c118bbb5ad34f73d00469d481 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_flax_outputs.py @@ -0,0 +1,700 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Tuple + +import flax +import jax.numpy as jnp + +from .utils import ModelOutput + + +@flax.struct.dataclass +class FlaxBaseModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithNoAttention(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the + model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): + Last layer hidden-state after a pooling operation on the spatial dimensions. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the + model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: jnp.ndarray = None + pooler_output: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxImageClassifierOutputWithNoAttention(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when + `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + past_key_values (`Dict[str, jnp.ndarray]`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: jnp.ndarray = None + past_key_values: Optional[Dict[str, jnp.ndarray]] = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPooling(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) further processed by a + Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence + prediction (classification) objective during pretraining. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: jnp.ndarray = None + pooler_output: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + last_hidden_state: jnp.ndarray = None + pooler_output: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSeq2SeqModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + last_hidden_state: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross attentions weights after the attention softmax, used to compute the weighted average in the + cross-attention heads. + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value + states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. + Only relevant if `config.is_decoder = True`. + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + """ + + logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxMaskedLMOutput(ModelOutput): + """ + Base class for masked language models outputs. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +FlaxCausalLMOutput = FlaxMaskedLMOutput + + +@flax.struct.dataclass +class FlaxSeq2SeqLMOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxNextSentencePredictorOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence sentence classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxMultipleChoiceModelOutput(ModelOutput): + """ + Base class for outputs of multiple choice models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, num_choices)`): + *num_choices* is the second dimension of the input tensors. (see *input_ids* above). + + Classification scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxTokenClassifierOutput(ModelOutput): + """ + Base class for outputs of token classification models. + + Args: + logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`): + Classification scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of question answering models. + + Args: + start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + start_logits: jnp.ndarray = None + end_logits: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray]] = None + attentions: Optional[Tuple[jnp.ndarray]] = None + + +@flax.struct.dataclass +class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence question answering models. + + Args: + start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + start_logits: jnp.ndarray = None + end_logits: jnp.ndarray = None + past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None + decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + decoder_attentions: Optional[Tuple[jnp.ndarray]] = None + cross_attentions: Optional[Tuple[jnp.ndarray]] = None + encoder_last_hidden_state: Optional[jnp.ndarray] = None + encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None + encoder_attentions: Optional[Tuple[jnp.ndarray]] = None diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/modeling_outputs.py b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..7328e05186f2deddebb54f76d64427475de849a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_outputs.py @@ -0,0 +1,1753 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch + +from .utils import ModelOutput + + +@dataclass +class BaseModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithNoAttention(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPooling(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state after a pooling operation on the spatial dimensions. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithCrossAttentions(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class MoECausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden + states terms, to train a MoE model. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + z_loss for the sparse modules. + aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + aux_loss for the sparse modules. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse + modules. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + z_loss: torch.FloatTensor = None + aux_loss: torch.FloatTensor = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoEModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary + loss and the z_loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_probs: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoeModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary + loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoeCausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) with mixture of experts outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + + aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + aux_loss for the sparse modules. + + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary + loss for Mixture of Experts models. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + aux_loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as + Mixture of Expert's router hidden states terms, to train a MoE model. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary + loss and the z_loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_probs: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqMoEModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse + modules. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class CausalLMOutput(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class CausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class CausalLMOutputWithCrossAttentions(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross attentions weights after the attention softmax, used to compute the weighted average in the + cross-attention heads. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `torch.FloatTensor` tuples of length `config.n_layers`, with each tuple containing the cached key, + value states of the self-attention and the cross-attention layers if model is used in encoder-decoder + setting. Only relevant if `config.is_decoder = True`. + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SequenceClassifierOutputWithPast(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class MaskedLMOutput(ModelOutput): + """ + Base class for masked language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Masked language modeling (MLM) loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqLMOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqMoEOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and z_loss for Mixture of Experts + models. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + encoder_z_loss: torch.FloatTensor = None + decoder_z_loss: torch.FloatTensor = None + encoder_aux_loss: torch.FloatTensor = None + decoder_aux_loss: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class NextSentencePredictorOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided): + Next sequence prediction (classification) loss. + logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class MultipleChoiceModelOutput(ModelOutput): + """ + Base class for outputs of multiple choice models. + + Args: + loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): + *num_choices* is the second dimension of the input tensors. (see *input_ids* above). + + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class TokenClassifierOutput(ModelOutput): + """ + Base class for outputs of token classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class QuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of question answering models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence question answering models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SemanticSegmenterOutput(ModelOutput): + """ + Base class for outputs of semantic segmentation models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): + Classification scores for each pixel. + + + + The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is + to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the + original image size as post-processing. You should always check your logits shape and resize as needed. + + + + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class ImageClassifierOutput(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class ImageClassifierOutputWithNoAttention(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class DepthEstimatorOutput(ModelOutput): + """ + Base class for outputs of depth estimation models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`): + Predicted depth for each pixel. + + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + predicted_depth: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class ImageSuperResolutionOutput(ModelOutput): + """ + Base class for outputs of image super resolution models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Reconstruction loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed images, possibly upscaled. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Wav2Vec2BaseModelOutput(ModelOutput): + """ + Base class for models that have been trained with the Wav2Vec2 loss objective. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): + Sequence of extracted feature vectors of the last convolutional layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + extract_features: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class XVectorOutput(ModelOutput): + """ + Output type of [`Wav2Vec2ForXVector`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): + Classification hidden states before AMSoftmax. + embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): + Utterance embeddings used for vector similarity-based retrieval. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + embeddings: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BackboneOutput(ModelOutput): + """ + Base class for outputs of backbones. + + Args: + feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`): + Feature maps of the stages. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, num_channels, height, width)`, + depending on the backbone. + + Hidden-states of the model at the output of each stage plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Only applicable if the backbone uses attention. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + feature_maps: Tuple[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndProjection(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + projection_state (`tuple(torch.FloatTensor)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` of shape `(batch_size,config.project_dim)`. + + Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + projection_state: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqSpectrogramOutput(ModelOutput): + """ + Base class for sequence-to-sequence spectrogram outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Spectrogram generation loss. + spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): + The predicted spectrogram. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + spectrogram: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqTSModelOutput(ModelOutput): + """ + Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up + sequential decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Shift values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to shift back to the original magnitude. + scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale back to the original magnitude. + static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + loc: Optional[torch.FloatTensor] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class Seq2SeqTSPredictionOutput(ModelOutput): + """ + Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the + chosen distribution. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): + Distributional loss. + params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): + Parameters of the chosen distribution. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Shift values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to shift back to the original magnitude. + scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale back to the original magnitude. + static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + loss: Optional[torch.FloatTensor] = None + params: Optional[Tuple[torch.FloatTensor]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + loc: Optional[torch.FloatTensor] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class SampleTSPredictionOutput(ModelOutput): + """ + Base class for time series model's predictions outputs that contains the sampled values from the chosen + distribution. + + Args: + sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`): + Sampled values from the chosen distribution. + """ + + sequences: torch.FloatTensor = None + + +@dataclass +class MaskedImageModelingOutput(ModelOutput): + """ + Base class for outputs of masked image completion / in-painting models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): + Reconstruction loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed / completed images. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or + when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when + `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + @property + def logits(self): + warnings.warn( + "logits attribute is deprecated and will be removed in version 5 of Transformers." + " Please use the reconstruction attribute to retrieve the final output instead.", + FutureWarning, + ) + return self.reconstruction diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/modeling_tf_pytorch_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_tf_pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..99fa106674862f011bc94a25d2691bcab58b1722 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_tf_pytorch_utils.py @@ -0,0 +1,609 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch - TF 2.0 general utilities.""" + + +import os +import re + +import numpy + +from .utils import ExplicitEnum, expand_dims, is_numpy_array, is_torch_tensor, logging, reshape, squeeze, tensor_size +from .utils import transpose as transpose_func + + +logger = logging.get_logger(__name__) + + +class TransposeType(ExplicitEnum): + """ + Possible ... + """ + + NO = "no" + SIMPLE = "simple" + CONV1D = "conv1d" + CONV2D = "conv2d" + + +def convert_tf_weight_name_to_pt_weight_name( + tf_name, start_prefix_to_remove="", tf_weight_shape=None, name_scope=None +): + """ + Convert a TF 2.0 model variable name in a pytorch model weight name. + + Conventions for TF2.0 scopes -> PyTorch attribute names conversions: + + - '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) + - '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) + + return tuple with: + + - pytorch model weight name + - transpose: `TransposeType` member indicating whether and how TF2.0 and PyTorch weights matrices should be + transposed with regards to each other + """ + if name_scope is not None: + if not tf_name.startswith(name_scope) and "final_logits_bias" not in tf_name: + raise ValueError( + f"Weight name {tf_name} does not start with name_scope {name_scope}. This is an internal error " + "in Transformers, so (unless you were doing something really evil) please open an issue to report it!" + ) + tf_name = tf_name[len(name_scope) :] + tf_name = tf_name.lstrip("/") + tf_name = tf_name.replace(":0", "") # device ids + tf_name = re.sub( + r"/[^/]*___([^/]*)/", r"/\1/", tf_name + ) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) + tf_name = tf_name.replace( + "_._", "/" + ) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) + tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end + tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators + # Some weights have a single name without "/" such as final_logits_bias in BART + if len(tf_name) > 1: + tf_name = tf_name[1:] # Remove level zero + + tf_weight_shape = list(tf_weight_shape) + + # When should we transpose the weights + if tf_name[-1] == "kernel" and tf_weight_shape is not None and len(tf_weight_shape) == 4: + transpose = TransposeType.CONV2D + elif tf_name[-1] == "kernel" and tf_weight_shape is not None and len(tf_weight_shape) == 3: + transpose = TransposeType.CONV1D + elif bool( + tf_name[-1] in ["kernel", "pointwise_kernel", "depthwise_kernel"] + or "emb_projs" in tf_name + or "out_projs" in tf_name + ): + transpose = TransposeType.SIMPLE + else: + transpose = TransposeType.NO + + # Convert standard TF2.0 names in PyTorch names + if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma": + tf_name[-1] = "weight" + if tf_name[-1] == "beta": + tf_name[-1] = "bias" + + # The SeparableConv1D TF layer contains two weights that are translated to PyTorch Conv1D here + if tf_name[-1] == "pointwise_kernel" or tf_name[-1] == "depthwise_kernel": + tf_name[-1] = tf_name[-1].replace("_kernel", ".weight") + + # Remove prefix if needed + tf_name = ".".join(tf_name) + if start_prefix_to_remove: + tf_name = tf_name.replace(start_prefix_to_remove, "", 1) + + return tf_name, transpose + + +def apply_transpose(transpose: TransposeType, weight, match_shape=None, pt_to_tf=True): + """ + Apply a transpose to some weight then tries to reshape the weight to the same shape as a given shape, all in a + framework agnostic way. + """ + if transpose is TransposeType.CONV2D: + # Conv2D weight: + # PT: (num_out_channel, num_in_channel, kernel[0], kernel[1]) + # -> TF: (kernel[0], kernel[1], num_in_channel, num_out_channel) + axes = (2, 3, 1, 0) if pt_to_tf else (3, 2, 0, 1) + weight = transpose_func(weight, axes=axes) + elif transpose is TransposeType.CONV1D: + # Conv1D weight: + # PT: (num_out_channel, num_in_channel, kernel) + # -> TF: (kernel, num_in_channel, num_out_channel) + weight = transpose_func(weight, axes=(2, 1, 0)) + elif transpose is TransposeType.SIMPLE: + weight = transpose_func(weight) + + if match_shape is None: + return weight + + if len(match_shape) < len(weight.shape): + weight = squeeze(weight) + elif len(match_shape) > len(weight.shape): + weight = expand_dims(weight, axis=0) + + if list(match_shape) != list(weight.shape): + try: + weight = reshape(weight, match_shape) + except AssertionError as e: + e.args += (match_shape, match_shape) + raise e + + return weight + + +##################### +# PyTorch => TF 2.0 # +##################### + + +def load_pytorch_checkpoint_in_tf2_model( + tf_model, + pytorch_checkpoint_path, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, +): + """Load pytorch checkpoints in a TF 2.0 model""" + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + from safetensors.torch import load_file as safe_load_file # noqa: F401 + + from .pytorch_utils import is_torch_greater_or_equal_than_1_13 # noqa: F401 + except ImportError: + logger.error( + "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + # Treats a single file as a collection of shards with 1 shard. + if isinstance(pytorch_checkpoint_path, str): + pytorch_checkpoint_path = [pytorch_checkpoint_path] + + # Loads all shards into a single state dictionary + pt_state_dict = {} + for path in pytorch_checkpoint_path: + pt_path = os.path.abspath(path) + logger.info(f"Loading PyTorch weights from {pt_path}") + if pt_path.endswith(".safetensors"): + state_dict = safe_load_file(pt_path) + else: + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + state_dict = torch.load(pt_path, map_location="cpu", **weights_only_kwarg) + + pt_state_dict.update(state_dict) + + logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters") + + return load_pytorch_weights_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=tf_inputs, + allow_missing_keys=allow_missing_keys, + output_loading_info=output_loading_info, + _prefix=_prefix, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ) + + +def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False): + """Load pytorch checkpoints in a TF 2.0 model""" + pt_state_dict = pt_model.state_dict() + + return load_pytorch_weights_in_tf2_model( + tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys + ) + + +def load_pytorch_weights_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, +): + """Load pytorch state_dict in a TF 2.0 model.""" + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} + return load_pytorch_state_dict_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=tf_inputs, + allow_missing_keys=allow_missing_keys, + output_loading_info=output_loading_info, + _prefix=_prefix, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ) + + +def load_pytorch_state_dict_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, + ignore_mismatched_sizes=False, +): + """Load a pytorch state_dict in a TF 2.0 model. pt_state_dict can be either an actual dict or a lazy-loading + safetensors archive created with the safe_open() function.""" + import tensorflow as tf + + if tf_inputs is None: + tf_inputs = tf_model.dummy_inputs + + if _prefix is None: + _prefix = "" + if tf_inputs: + with tf.name_scope(_prefix): + tf_model(tf_inputs, training=False) # Make sure model is built + # Convert old format to new format if needed from a PyTorch state_dict + tf_keys_to_pt_keys = {} + for key in pt_state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if "running_var" in key: + new_key = key.replace("running_var", "moving_variance") + if "running_mean" in key: + new_key = key.replace("running_mean", "moving_mean") + + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + key_components = key.split(".") + name = None + if key_components[-3::2] == ["parametrizations", "original0"]: + name = key_components[-2] + "_g" + elif key_components[-3::2] == ["parametrizations", "original1"]: + name = key_components[-2] + "_v" + if name is not None: + key_components = key_components[:-3] + [name] + new_key = ".".join(key_components) + + if new_key is None: + new_key = key + tf_keys_to_pt_keys[new_key] = key + + # Matt: All TF models store the actual model stem in a MainLayer class, including the base model. + # In PT, the derived models (with heads) use the base model class as the stem instead, + # and there is no MainLayer class. This means that TF base classes have one + # extra layer in their weight names, corresponding to the MainLayer class. This code block compensates for that. + start_prefix_to_remove = "" + if not any(s.startswith(tf_model.base_model_prefix) for s in tf_keys_to_pt_keys.keys()): + start_prefix_to_remove = tf_model.base_model_prefix + "." + + symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights + tf_loaded_numel = 0 + all_pytorch_weights = set(tf_keys_to_pt_keys.keys()) + missing_keys = [] + mismatched_keys = [] + is_safetensor_archive = hasattr(pt_state_dict, "get_tensor") + for symbolic_weight in symbolic_weights: + sw_name = symbolic_weight.name + name, transpose = convert_tf_weight_name_to_pt_weight_name( + sw_name, + start_prefix_to_remove=start_prefix_to_remove, + tf_weight_shape=symbolic_weight.shape, + name_scope=_prefix, + ) + if tf_to_pt_weight_rename is not None: + aliases = tf_to_pt_weight_rename(name) # Is a tuple to account for possible name aliasing + for alias in aliases: # The aliases are in priority order, take the first one that matches + if alias in tf_keys_to_pt_keys: + name = alias + break + else: + # If none of the aliases match, just use the first one (it'll be reported as missing) + name = aliases[0] + + # Find associated numpy array in pytorch model state dict + if name not in tf_keys_to_pt_keys: + if allow_missing_keys: + missing_keys.append(name) + continue + elif tf_model._keys_to_ignore_on_load_missing is not None: + # authorized missing keys don't have to be loaded + if any(re.search(pat, name) is not None for pat in tf_model._keys_to_ignore_on_load_missing): + continue + raise AttributeError(f"{name} not found in PyTorch model") + state_dict_name = tf_keys_to_pt_keys[name] + if is_safetensor_archive: + array = pt_state_dict.get_tensor(state_dict_name) + else: + array = pt_state_dict[state_dict_name] + try: + array = apply_transpose(transpose, array, symbolic_weight.shape) + except tf.errors.InvalidArgumentError as e: + if not ignore_mismatched_sizes: + error_msg = str(e) + error_msg += ( + "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." + ) + raise tf.errors.InvalidArgumentError(error_msg) + else: + mismatched_keys.append((name, array.shape, symbolic_weight.shape)) + continue + + tf_loaded_numel += tensor_size(array) + + symbolic_weight.assign(tf.cast(array, symbolic_weight.dtype)) + del array # Immediately free memory to keep peak usage as low as possible + all_pytorch_weights.discard(name) + + logger.info(f"Loaded {tf_loaded_numel:,} parameters in the TF 2.0 model.") + + unexpected_keys = list(all_pytorch_weights) + + if tf_model._keys_to_ignore_on_load_missing is not None: + for pat in tf_model._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + if tf_model._keys_to_ignore_on_load_unexpected is not None: + for pat in tf_model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the PyTorch model were not used when initializing the TF 2.0 model" + f" {tf_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {tf_model.__class__.__name__} from a PyTorch model trained on another task or with another architecture" + " (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n- This IS" + f" NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect" + " to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a" + " BertForSequenceClassification model)." + ) + else: + logger.warning(f"All PyTorch model weights were used when initializing {tf_model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the" + f" PyTorch model and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a" + " down-stream task to be able to use it for predictions and inference." + ) + else: + logger.warning( + f"All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model.\n" + "If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {tf_model.__class__.__name__} for predictions without further training." + ) + + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {tf_model.__class__.__name__} were not initialized from the model checkpoint" + f" are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + if output_loading_info: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + } + return tf_model, loading_info + + return tf_model + + +##################### +# TF 2.0 => PyTorch # +##################### + + +def load_tf2_checkpoint_in_pytorch_model( + pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False +): + """ + Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see + https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). + """ + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + import transformers + + from .modeling_tf_utils import load_tf_weights + + logger.info(f"Loading TensorFlow weights from {tf_checkpoint_path}") + + # Instantiate and load the associated TF 2.0 model + tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beginning + tf_model_class = getattr(transformers, tf_model_class_name) + tf_model = tf_model_class(pt_model.config) + + if tf_inputs is None: + tf_inputs = tf_model.dummy_inputs + + if tf_inputs is not None: + tf_model(tf_inputs, training=False) # Make sure model is built + + load_tf_weights(tf_model, tf_checkpoint_path) + + return load_tf2_model_in_pytorch_model( + pt_model, tf_model, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info + ) + + +def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False, output_loading_info=False): + """Load TF 2.0 model in a pytorch model""" + weights = tf_model.weights + + return load_tf2_weights_in_pytorch_model( + pt_model, weights, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info + ) + + +def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False, output_loading_info=False): + """Load TF2.0 symbolic weights in a PyTorch model""" + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + tf_state_dict = {tf_weight.name: tf_weight.numpy() for tf_weight in tf_weights} + return load_tf2_state_dict_in_pytorch_model( + pt_model, tf_state_dict, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info + ) + + +def load_tf2_state_dict_in_pytorch_model(pt_model, tf_state_dict, allow_missing_keys=False, output_loading_info=False): + import torch + + new_pt_params_dict = {} + current_pt_params_dict = dict(pt_model.named_parameters()) + + # Make sure we are able to load PyTorch base models as well as derived models (with heads) + # TF models always have a prefix, some of PyTorch models (base ones) don't + start_prefix_to_remove = "" + if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()): + start_prefix_to_remove = pt_model.base_model_prefix + "." + + # Build a map from potential PyTorch weight names to TF 2.0 Variables + tf_weights_map = {} + for name, tf_weight in tf_state_dict.items(): + pt_name, transpose = convert_tf_weight_name_to_pt_weight_name( + name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=tf_weight.shape + ) + tf_weights_map[pt_name] = (tf_weight, transpose) + + all_tf_weights = set(tf_weights_map.keys()) + loaded_pt_weights_data_ptr = {} + missing_keys_pt = [] + for pt_weight_name, pt_weight in current_pt_params_dict.items(): + # Handle PyTorch shared weight ()not duplicated in TF 2.0 + if pt_weight.data_ptr() in loaded_pt_weights_data_ptr: + new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()] + continue + + pt_weight_name_to_check = pt_weight_name + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + key_components = pt_weight_name.split(".") + name = None + if key_components[-3::2] == ["parametrizations", "original0"]: + name = key_components[-2] + "_g" + elif key_components[-3::2] == ["parametrizations", "original1"]: + name = key_components[-2] + "_v" + if name is not None: + key_components = key_components[:-3] + [name] + pt_weight_name_to_check = ".".join(key_components) + + # Find associated numpy array in pytorch model state dict + if pt_weight_name_to_check not in tf_weights_map: + if allow_missing_keys: + missing_keys_pt.append(pt_weight_name) + continue + + raise AttributeError(f"{pt_weight_name} not found in TF 2.0 model") + + array, transpose = tf_weights_map[pt_weight_name_to_check] + + array = apply_transpose(transpose, array, pt_weight.shape, pt_to_tf=False) + + if numpy.isscalar(array): + array = numpy.array(array) + if not is_torch_tensor(array) and not is_numpy_array(array): + array = array.numpy() + if is_numpy_array(array): + # Convert to torch tensor + array = torch.from_numpy(array) + + new_pt_params_dict[pt_weight_name] = array + loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = array + all_tf_weights.discard(pt_weight_name) + + missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False) + missing_keys += missing_keys_pt + + # Some models may have keys that are not in the state by design, removing them before needlessly warning + # the user. + if pt_model._keys_to_ignore_on_load_missing is not None: + for pat in pt_model._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + + if pt_model._keys_to_ignore_on_load_unexpected is not None: + for pat in pt_model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the TF 2.0 model were not used when initializing the PyTorch model" + f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {pt_model.__class__.__name__} from a TF 2.0 model trained on another task or with another architecture" + " (e.g. initializing a BertForSequenceClassification model from a TFBertForPreTraining model).\n- This IS" + f" NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect" + " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" + " TFBertForSequenceClassification model)." + ) + else: + logger.warning(f"All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model and are newly" + f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" + " use it for predictions and inference." + ) + else: + logger.warning( + f"All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\n" + "If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {pt_model.__class__.__name__} for predictions without further training." + ) + + logger.info(f"Weights or buffers not loaded from TF 2.0 model: {all_tf_weights}") + + if output_loading_info: + loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys} + return pt_model, loading_info + + return pt_model diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/modeling_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..263ae5d2f988cff5ddcce7037e5338858851340f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/modeling_utils.py @@ -0,0 +1,4703 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +import copy +import functools +import gc +import importlib.metadata +import inspect +import itertools +import json +import os +import re +import shutil +import tempfile +import warnings +from contextlib import contextmanager +from dataclasses import dataclass +from functools import partial, wraps +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from zipfile import is_zipfile + +import torch +from packaging import version +from torch import Tensor, nn +from torch.nn import CrossEntropyLoss, Identity +from torch.utils.checkpoint import checkpoint + +from .activations import get_activation +from .configuration_utils import PretrainedConfig +from .dynamic_module_utils import custom_object_save +from .generation import GenerationConfig, GenerationMixin +from .integrations import PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled +from .pytorch_utils import ( # noqa: F401 + Conv1D, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + id_tensor_storage, + is_torch_greater_or_equal_than_1_13, + prune_conv1d_layer, + prune_layer, + prune_linear_layer, +) +from .quantizers import AutoHfQuantizer, HfQuantizer +from .quantizers.quantizers_utils import get_module_from_name +from .safetensors_conversion import auto_conversion +from .utils import ( + ADAPTER_SAFE_WEIGHTS_NAME, + ADAPTER_WEIGHTS_NAME, + CONFIG_NAME, + DUMMY_INPUTS, + FLAX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + TF2_WEIGHTS_NAME, + TF_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + ContextManagers, + ModelOutput, + PushToHubMixin, + cached_file, + copy_func, + download_url, + extract_commit_hash, + has_file, + is_accelerate_available, + is_bitsandbytes_available, + is_flash_attn_2_available, + is_offline_mode, + is_optimum_available, + is_peft_available, + is_remote_url, + is_safetensors_available, + is_torch_sdpa_available, + is_torch_xla_available, + logging, + replace_return_docstrings, + strtobool, +) +from .utils.hub import convert_file_size_to_int, create_and_tag_model_card, get_checkpoint_shard_files +from .utils.import_utils import ( + ENV_VARS_TRUE_VALUES, + is_sagemaker_mp_enabled, + is_torch_fx_proxy, + is_torchdynamo_compiling, +) +from .utils.quantization_config import BitsAndBytesConfig, QuantizationMethod + + +XLA_USE_BF16 = os.environ.get("XLA_USE_BF16", "0").upper() +XLA_DOWNCAST_BF16 = os.environ.get("XLA_DOWNCAST_BF16", "0").upper() + +if is_accelerate_available(): + from accelerate import dispatch_model, infer_auto_device_map, init_empty_weights + from accelerate.hooks import add_hook_to_module + from accelerate.utils import ( + check_tied_parameters_on_same_device, + find_tied_parameters, + get_balanced_memory, + get_max_memory, + load_offloaded_weights, + offload_weight, + save_offload_index, + set_module_tensor_to_device, + ) + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.torch import load_file as safe_load_file + from safetensors.torch import save_file as safe_save_file + +logger = logging.get_logger(__name__) + + +_init_weights = True + + +def is_fsdp_enabled(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and strtobool(os.environ.get("ACCELERATE_USE_FSDP", "False")) == 1 + and strtobool(os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING", "False")) == 1 + ) + + +def is_local_dist_rank_0(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and int(os.environ.get("LOCAL_RANK", -1)) == 0 + ) + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") +else: + IS_SAGEMAKER_MP_POST_1_10 = False + +if is_peft_available(): + from .utils import find_adapter_config_file + +TORCH_INIT_FUNCTIONS = { + "uniform_": nn.init.uniform_, + "normal_": nn.init.normal_, + "trunc_normal_": nn.init.trunc_normal_, + "constant_": nn.init.constant_, + "xavier_uniform_": nn.init.xavier_uniform_, + "xavier_normal_": nn.init.xavier_normal_, + "kaiming_uniform_": nn.init.kaiming_uniform_, + "kaiming_normal_": nn.init.kaiming_normal_, + "uniform": nn.init.uniform, + "normal": nn.init.normal, + "xavier_uniform": nn.init.xavier_uniform, + "xavier_normal": nn.init.xavier_normal, + "kaiming_uniform": nn.init.kaiming_uniform, + "kaiming_normal": nn.init.kaiming_normal, +} + + +@contextmanager +def no_init_weights(_enable=True): + """ + Context manager to globally disable weight initialization to speed up loading large models. + + TODO(Patrick): Delete safety argument `_enable=True` at next major version. . + """ + global _init_weights + old_init_weights = _init_weights + + if _enable: + _init_weights = False + + def _skip_init(*args, **kwargs): + pass + + # # Save the original initialization functions + for name, init_func in TORCH_INIT_FUNCTIONS.items(): + setattr(torch.nn.init, name, _skip_init) + try: + yield + finally: + _init_weights = old_init_weights + if _enable: + # # Restore the original initialization functions + for name, init_func in TORCH_INIT_FUNCTIONS.items(): + setattr(torch.nn.init, name, init_func) + + +def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + try: + return next(parameter.parameters()).device + except StopIteration: + # For nn.DataParallel compatibility in PyTorch 1.5 + + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].device + + +def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + """ + Returns the first parameter dtype (can be non-floating) or asserts if none were found. + """ + try: + return next(parameter.parameters()).dtype + except StopIteration: + # For nn.DataParallel compatibility in PyTorch > 1.5 + + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].dtype + + +def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + """ + Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found. + """ + last_dtype = None + for t in parameter.parameters(): + last_dtype = t.dtype + if t.is_floating_point(): + # Adding fix for https://github.com/pytorch/xla/issues/4152 + # Fixes issue where the model code passes a value that is out of range for XLA_USE_BF16=1 + # and XLA_DOWNCAST_BF16=1 so the conversion would cast it to -inf + # NOTE: `is_torch_xla_available()` is checked last as it induces a graph break in torch dynamo + if XLA_USE_BF16 in ENV_VARS_TRUE_VALUES and is_torch_xla_available(): + return torch.bfloat16 + if XLA_DOWNCAST_BF16 in ENV_VARS_TRUE_VALUES and is_torch_xla_available(): + if t.dtype == torch.float: + return torch.bfloat16 + if t.dtype == torch.double: + return torch.float32 + return t.dtype + + if last_dtype is not None: + # if no floating dtype was found return whatever the first dtype is + return last_dtype + + # For nn.DataParallel compatibility in PyTorch > 1.5 + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + last_tuple = None + for tuple in gen: + last_tuple = tuple + if tuple[1].is_floating_point(): + return tuple[1].dtype + + if last_tuple is not None: + # fallback to the last dtype + return last_tuple[1].dtype + + # fallback to buffer dtype + for t in parameter.buffers(): + last_dtype = t.dtype + if t.is_floating_point(): + return t.dtype + return last_dtype + + +def get_state_dict_float_dtype(state_dict): + """ + Returns the first found floating dtype in `state_dict` or asserts if none were found. + """ + for t in state_dict.values(): + if t.is_floating_point(): + return t.dtype + + raise ValueError("couldn't find any floating point dtypes in state_dict") + + +def get_state_dict_dtype(state_dict): + """ + Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype. + """ + for t in state_dict.values(): + if t.is_floating_point(): + return t.dtype + + # if no floating dtype was found return whatever the first dtype is + else: + return next(state_dict.values()).dtype + + +def dtype_byte_size(dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(torch.float32) + 4 + ``` + """ + if dtype == torch.bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", str(dtype)) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def shard_checkpoint( + state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME +): + """ + Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a + given size. + + The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no + optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the + limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], + [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's weight is bigger than `max_shard_size`, it will end up in its own sub-checkpoint which will + have a size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit + (like `"5MB"`). + weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): + The name of the model save file. + """ + max_shard_size = convert_file_size_to_int(max_shard_size) + + sharded_state_dicts = [{}] + last_block_size = 0 + total_size = 0 + storage_id_to_block = {} + + for key, weight in state_dict.items(): + # when bnb serialization is used the weights in the state dict can be strings + # check: https://github.com/huggingface/transformers/pull/24416 for more details + if isinstance(weight, str): + continue + else: + storage_id = id_tensor_storage(weight) + + # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block` + if storage_id in storage_id_to_block: + block_id = storage_id_to_block[storage_id] + sharded_state_dicts[block_id][key] = weight + continue + + weight_size = weight.numel() * dtype_byte_size(weight.dtype) + + # If this weight is going to tip up over the maximal size, we split, but only if we have put at least one + # weight in the current shard. + if last_block_size + weight_size > max_shard_size and len(sharded_state_dicts[-1]) > 0: + sharded_state_dicts.append({}) + last_block_size = 0 + + sharded_state_dicts[-1][key] = weight + last_block_size += weight_size + total_size += weight_size + storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 + + # If we only have one shard, we return it + if len(sharded_state_dicts) == 1: + return {weights_name: sharded_state_dicts[0]}, None + + # Otherwise, let's build the index + weight_map = {} + shards = {} + for idx, shard in enumerate(sharded_state_dicts): + shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin") + shard_file = shard_file.replace( + ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" + ) + shards[shard_file] = shard + for key in shard.keys(): + weight_map[key] = shard_file + + # Add the metadata + metadata = {"total_size": total_size} + index = {"metadata": metadata, "weight_map": weight_map} + return shards, index + + +def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True): + """ + This is the same as + [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) + but for a sharded checkpoint. + + This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being + loaded in the model. + + Args: + model (`torch.nn.Module`): The model in which to load the checkpoint. + folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. + strict (`bool`, *optional`, defaults to `True`): + Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. + prefer_safe (`bool`, *optional*, defaults to `False`) + If both safetensors and PyTorch save files are present in checkpoint and `prefer_safe` is True, the + safetensors files will be loaded. Otherwise, PyTorch files are always loaded when possible. + + Returns: + `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields + - `missing_keys` is a list of str containing the missing keys + - `unexpected_keys` is a list of str containing the unexpected keys + """ + # Load the index + index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) + safe_index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) + + index_present = os.path.isfile(index_file) + safe_index_present = os.path.isfile(safe_index_file) + + if not index_present and not (safe_index_present and is_safetensors_available()): + filenames = ( + (WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME) if is_safetensors_available() else (WEIGHTS_INDEX_NAME,) + ) + raise ValueError(f"Can't find a checkpoint index ({' or '.join(filenames)}) in {folder}.") + + load_safe = False + if safe_index_present: + if prefer_safe: + if is_safetensors_available(): + load_safe = True # load safe due to preference + else: + logger.warning( + f"Cannot load sharded checkpoint at {folder} safely since safetensors is not installed!" + ) + elif not index_present: + load_safe = True # load safe since we have no other choice + + load_index = safe_index_file if load_safe else index_file + + with open(load_index, "r", encoding="utf-8") as f: + index = json.load(f) + + shard_files = list(set(index["weight_map"].values())) + + # If strict=True, error before loading any of the state dicts. + loaded_keys = index["weight_map"].keys() + model_keys = model.state_dict().keys() + missing_keys = [key for key in model_keys if key not in loaded_keys] + unexpected_keys = [key for key in loaded_keys if key not in model_keys] + if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): + error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" + if len(missing_keys) > 0: + str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) + error_message += f"\nMissing key(s): {str_missing_keys}." + if len(unexpected_keys) > 0: + str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) + error_message += f"\nMissing key(s): {str_unexpected_keys}." + raise RuntimeError(error_message) + + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + loader = safe_load_file if load_safe else partial(torch.load, map_location="cpu", **weights_only_kwarg) + + for shard_file in shard_files: + state_dict = loader(os.path.join(folder, shard_file)) + model.load_state_dict(state_dict, strict=False) + + # Make sure memory is freed before we load the next state dict. + del state_dict + gc.collect() + + # Return the same thing as PyTorch load_state_dict function. + return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) + + +def load_state_dict(checkpoint_file: Union[str, os.PathLike], is_quantized: bool = False): + """ + Reads a PyTorch checkpoint file, returning properly formatted errors if they arise. + """ + if checkpoint_file.endswith(".safetensors") and is_safetensors_available(): + # Check format of the archive + with safe_open(checkpoint_file, framework="pt") as f: + metadata = f.metadata() + if metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: + raise OSError( + f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " + "you save your model with the `save_pretrained` method." + ) + return safe_load_file(checkpoint_file) + try: + if ( + (is_deepspeed_zero3_enabled() and torch.distributed.is_initialized() and torch.distributed.get_rank() > 0) + or (is_fsdp_enabled() and not is_local_dist_rank_0()) + ) and not is_quantized: + map_location = "meta" + else: + map_location = "cpu" + extra_args = {} + # mmap can only be used with files serialized with zipfile-based format. + if ( + isinstance(checkpoint_file, str) + and map_location != "meta" + and version.parse(torch.__version__) >= version.parse("2.1.0") + and is_zipfile(checkpoint_file) + ): + extra_args = {"mmap": True} + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + return torch.load( + checkpoint_file, + map_location=map_location, + **weights_only_kwarg, + **extra_args, + ) + except Exception as e: + try: + with open(checkpoint_file) as f: + if f.read(7) == "version": + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError( + f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " + "model. Make sure you have saved the model properly." + ) from e + except (UnicodeDecodeError, ValueError): + raise OSError( + f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' " + f"at '{checkpoint_file}'. " + "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." + ) + + +def set_initialized_submodules(model, state_dict_keys): + """ + Sets the `_is_hf_initialized` flag in all submodules of a given model when all its weights are in the loaded state + dict. + """ + not_initialized_submodules = {} + for module_name, module in model.named_modules(): + loaded_keys = {k.replace(f"{module_name}.", "") for k in state_dict_keys if k.startswith(f"{module_name}.")} + if loaded_keys.issuperset(module.state_dict()): + module._is_hf_initialized = True + else: + not_initialized_submodules[module_name] = module + return not_initialized_submodules + + +def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): + # Convert old format to new format if needed from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, "_metadata", None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + error_msgs = [] + + # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants + # so we need to apply the function recursively. + def load(module: nn.Module, state_dict, prefix=""): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) + # Parameters of module and children will start with prefix. We can exit early if there are none in this + # state_dict + if len([key for key in state_dict if key.startswith(prefix)]) > 0: + if is_deepspeed_zero3_enabled(): + import deepspeed + + # In sharded models, each shard has only part of the full state_dict, so only gather + # parameters that are in the current state_dict. + named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) + params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] + if len(params_to_gather) > 0: + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): + if torch.distributed.get_rank() == 0: + module._load_from_state_dict(*args) + else: + module._load_from_state_dict(*args) + + for name, child in module._modules.items(): + if child is not None: + load(child, state_dict, prefix + name + ".") + + load(model_to_load, state_dict, prefix=start_prefix) + # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so + # it's safe to delete it. + del state_dict + + return error_msgs + + +def find_submodule_and_param_name(model, long_key, start_prefix): + """ + A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed + from the start of the key + """ + + if len(start_prefix) > 0 and long_key.startswith(start_prefix): + long_key = ".".join(long_key.split(".")[1:]) + + split_key = long_key.split(".") + submodule = model + while len(split_key) > 1: + if hasattr(submodule, split_key[0]): + submodule = getattr(submodule, split_key[0]) + del split_key[0] + else: + submodule = None + break + if submodule == model: + submodule = None + return submodule, split_key[0] + + +def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix): + """ + Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params. + + `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in + `bert.pooler.dense.weight` + + """ + + # dematerialize param storage for keys that are going to be replaced by state_dict, by + # putting those on the meta device + for k in loaded_state_dict_keys: + submodule, param_name = find_submodule_and_param_name(model, k, start_prefix) + if submodule is not None: + # selectively switch to the meta device only those params/buffers that will + # be next replaced from state_dict. This a complex way to do p.to_("meta") + # since we have no in-place to_ for tensors. + new_val = getattr(submodule, param_name) + if isinstance(new_val, torch.nn.Parameter): + # isinstance returns False for Params on meta device, so switch after the check + new_val = torch.nn.Parameter(new_val.to("meta")) + else: + new_val = new_val.to("meta") + setattr(submodule, param_name, new_val) + + +def _load_state_dict_into_meta_model( + model, + state_dict, + loaded_state_dict_keys, # left for now but could be removed, see below + start_prefix, + expected_keys, + device_map=None, + offload_folder=None, + offload_index=None, + state_dict_folder=None, + state_dict_index=None, + dtype=None, + hf_quantizer=None, + is_safetensors=False, + keep_in_fp32_modules=None, + unexpected_keys=None, # passing `unexpected` for cleanup from quantization items +): + """ + This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its + params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the + params back to the normal device, but only for `loaded_state_dict_keys`. + + `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in + `bert.pooler.dense.weight` + + """ + + # XXX: remaining features to implement to be fully compatible with _load_state_dict_into_model + # - deepspeed zero 3 support + # - need to copy metadata if any - see _load_state_dict_into_model + # - handling error_msgs - mimicking the error handling in module._load_from_state_dict() + # - Is there a situation where some keys aren't in `loaded_state_dict_keys` and in which case + # they won't get loaded. + + error_msgs = [] + + old_keys = [] + new_keys = [] + is_quantized = hf_quantizer is not None + for key in state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + for param_name, param in state_dict.items(): + # First part of the test is always true as load_state_dict_keys always contains state_dict keys. + if param_name not in loaded_state_dict_keys or param_name not in expected_keys: + continue + + if param_name.startswith(start_prefix): + param_name = param_name[len(start_prefix) :] + + module_name = param_name + set_module_kwargs = {} + + # We convert floating dtypes to the `dtype` passed. We want to keep the buffers/params + # in int/uint/bool and not cast them. + if dtype is not None and torch.is_floating_point(param): + if ( + keep_in_fp32_modules is not None + and any( + module_to_keep_in_fp32 in param_name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules + ) + and dtype == torch.float16 + ): + param = param.to(torch.float32) + + # For backward compatibility with older versions of `accelerate` + # TODO: @sgugger replace this check with version check at the next `accelerate` release + if "dtype" in list(inspect.signature(set_module_tensor_to_device).parameters): + set_module_kwargs["dtype"] = torch.float32 + else: + param = param.to(dtype) + + # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model, and which + # uses `param.copy_(input_param)` that preserves the contiguity of the parameter in the model. + # Reference: https://github.com/pytorch/pytorch/blob/db79ceb110f6646523019a59bbd7b838f43d4a86/torch/nn/modules/module.py#L2040C29-L2040C29 + old_param = model + splits = param_name.split(".") + for split in splits: + old_param = getattr(old_param, split) + if old_param is None: + break + + if old_param is not None: + if dtype is None: + param = param.to(old_param.dtype) + + if old_param.is_contiguous(): + param = param.contiguous() + + set_module_kwargs["value"] = param + + if device_map is None: + param_device = "cpu" + else: + # find next higher level module that is defined in device_map: + # bert.lm_head.weight -> bert.lm_head -> bert -> '' + while len(module_name) > 0 and module_name not in device_map: + module_name = ".".join(module_name.split(".")[:-1]) + if module_name == "" and "" not in device_map: + # TODO: group all errors and raise at the end. + raise ValueError(f"{param_name} doesn't have any device set.") + param_device = device_map[module_name] + + if param_device == "disk": + if not is_safetensors: + offload_index = offload_weight(param, param_name, offload_folder, offload_index) + elif param_device == "cpu" and state_dict_index is not None: + state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) + elif ( + not is_quantized + or (not hf_quantizer.requires_parameters_quantization) + or ( + not hf_quantizer.check_quantized_param( + model, param, param_name, state_dict, param_device=param_device, device_map=device_map + ) + ) + ): + # For backward compatibility with older versions of `accelerate` and for non-quantized params + set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs) + else: + hf_quantizer.create_quantized_param(model, param, param_name, param_device, state_dict, unexpected_keys) + # For quantized modules with FSDP/DeepSpeed Stage 3, we need to quantize the parameter on the GPU + # and then cast it to CPU to avoid excessive memory usage on each GPU + # in comparison to the sharded model across GPUs. + if is_fsdp_enabled() or is_deepspeed_zero3_enabled(): + module, tensor_name = get_module_from_name(model, param_name) + value = getattr(module, tensor_name) + value = type(value)(value.data.to("cpu"), **value.__dict__) + setattr(module, tensor_name, value) + # TODO: consider removing used param_parts from state_dict before return + + return error_msgs, offload_index, state_dict_index + + +def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: + if variant is not None: + splits = weights_name.split(".") + splits = splits[:-1] + [variant] + splits[-1:] + weights_name = ".".join(splits) + + return weights_name + + +class ModuleUtilsMixin: + """ + A few utilities for `torch.nn.Modules`, to be used as a mixin. + """ + + @staticmethod + def _hook_rss_memory_pre_forward(module, *args, **kwargs): + try: + import psutil + except ImportError: + raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") + + process = psutil.Process(os.getpid()) + mem = process.memory_info() + module.mem_rss_pre_forward = mem.rss + return None + + @staticmethod + def _hook_rss_memory_post_forward(module, *args, **kwargs): + try: + import psutil + except ImportError: + raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") + + process = psutil.Process(os.getpid()) + mem = process.memory_info() + module.mem_rss_post_forward = mem.rss + mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward + module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) + return None + + def add_memory_hooks(self): + """ + Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. + + Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero + with `model.reset_memory_hooks_state()`. + """ + for module in self.modules(): + module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) + module.register_forward_hook(self._hook_rss_memory_post_forward) + self.reset_memory_hooks_state() + + def reset_memory_hooks_state(self): + """ + Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). + """ + for module in self.modules(): + module.mem_rss_diff = 0 + module.mem_rss_post_forward = 0 + module.mem_rss_pre_forward = 0 + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + return get_parameter_device(self) + + @property + def dtype(self) -> torch.dtype: + """ + `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). + """ + return get_parameter_dtype(self) + + def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: + """ + Invert an attention mask (e.g., switches 0. and 1.). + + Args: + encoder_attention_mask (`torch.Tensor`): An attention mask. + + Returns: + `torch.Tensor`: The inverted attention mask. + """ + if encoder_attention_mask.dim() == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if encoder_attention_mask.dim() == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow + # /transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = (encoder_extended_attention_mask == + # encoder_extended_attention_mask.transpose(-1, -2)) + encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min + + return encoder_extended_attention_mask + + @staticmethod + def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + else: + device = attention_mask.device + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + return extended_attention_mask + + def get_extended_attention_mask( + self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + + Returns: + `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + if dtype is None: + dtype = self.dtype + + if not (attention_mask.dim() == 2 and self.config.is_decoder): + # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder: + extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( + input_shape, attention_mask, device + ) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min + return extended_attention_mask + + def get_head_mask( + self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False + ) -> Tensor: + """ + Prepare the head mask if needed. + + Args: + head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): + The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). + num_hidden_layers (`int`): + The number of hidden layers in the model. + is_attention_chunked (`bool`, *optional*, defaults to `False`): + Whether or not the attentions scores are computed by chunks or not. + + Returns: + `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with + `[None]` for each layer. + """ + if head_mask is not None: + head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) + if is_attention_chunked is True: + head_mask = head_mask.unsqueeze(-1) + else: + head_mask = [None] * num_hidden_layers + + return head_mask + + def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): + """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" + head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility + return head_mask + + def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: + """ + Get number of (optionally, trainable or non-embeddings) parameters in the module. + + Args: + only_trainable (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of trainable parameters + + exclude_embeddings (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of non-embeddings parameters + + Returns: + `int`: The number of parameters. + """ + + if exclude_embeddings: + embedding_param_names = [ + f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) + ] + total_parameters = [ + parameter for name, parameter in self.named_parameters() if name not in embedding_param_names + ] + else: + total_parameters = list(self.parameters()) + + total_numel = [] + is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False) + + if is_loaded_in_4bit: + if is_bitsandbytes_available(): + import bitsandbytes as bnb + else: + raise ValueError( + "bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong" + " make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. " + ) + + for param in total_parameters: + if param.requires_grad or not only_trainable: + # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are + # used for the 4bit quantization (uint8 tensors are stored) + if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): + total_numel.append( + param.numel() * 2 * self.hf_quantizer.quantization_config.bnb_4bit_quant_storage.itemsize + ) + else: + total_numel.append(param.numel()) + + return sum(total_numel) + + def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: + """ + Helper function to estimate the total number of tokens from the model inputs. + + Args: + inputs (`dict`): The model inputs. + + Returns: + `int`: The total number of tokens. + """ + if not hasattr(self, "warnings_issued"): + self.warnings_issued = {} + if self.main_input_name in input_dict: + return input_dict[self.main_input_name].numel() + elif "estimate_tokens" not in self.warnings_issued: + logger.warning( + "Could not estimate the number of tokens of the input, floating-point operations will not be computed" + ) + self.warnings_issued["estimate_tokens"] = True + return 0 + + def floating_point_ops( + self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True + ) -> int: + """ + Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a + batch with this transformer model. Default approximation neglects the quadratic dependency on the number of + tokens (valid if `12 * d_model << sequence_length`) as laid out in [this + paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter + re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. + + Args: + batch_size (`int`): + The batch size for the forward pass. + + sequence_length (`int`): + The number of tokens in each line of the batch. + + exclude_embeddings (`bool`, *optional*, defaults to `True`): + Whether or not to count embedding and softmax operations. + + Returns: + `int`: The number of floating-point operations. + """ + + return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) + + +class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): + r""" + Base class for all models. + + [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, + downloading and saving models as well as a few methods common to all models to: + + - resize the input embeddings, + - prune heads in the self-attention heads. + + Class attributes (overridden by derived classes): + + - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class + for this model architecture. + - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model, + taking as arguments: + + - **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint. + - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model. + - **path** (`str`) -- A path to the TensorFlow checkpoint. + + - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived + classes of the same architecture adding modules on top of the base model. + - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. + - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP + models, `pixel_values` for vision models and `input_values` for speech models). + """ + + config_class = None + base_model_prefix = "" + main_input_name = "input_ids" + model_tags = None + + _auto_class = None + _no_split_modules = None + _skip_keys_device_placement = None + _keep_in_fp32_modules = None + + # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing + # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings. + _keys_to_ignore_on_load_missing = None + # a list of `re` patterns of `state_dict` keys that should be removed from the list of + # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary + # warnings. + _keys_to_ignore_on_load_unexpected = None + # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't + # trained, but which are either deterministic or tied variables) + _keys_to_ignore_on_save = None + # a list of `state_dict` keys that are potentially tied to another key in the state_dict. + _tied_weights_keys = None + + is_parallelizable = False + supports_gradient_checkpointing = False + + # Flash Attention 2 support + _supports_flash_attn_2 = False + + # SDPA support + _supports_sdpa = False + + # Has support for a `Cache` instance as `past_key_values` + _supports_cache_class = False + + @property + def dummy_inputs(self) -> Dict[str, torch.Tensor]: + """ + `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. + """ + return {"input_ids": torch.tensor(DUMMY_INPUTS)} + + @property + def framework(self) -> str: + """ + :str: Identifies that this is a PyTorch model. + """ + return "pt" + + def __init__(self, config: PretrainedConfig, *inputs, **kwargs): + super().__init__() + if not isinstance(config, PretrainedConfig): + raise ValueError( + f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " + "`PretrainedConfig`. To create a model from a pretrained model use " + f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + # Save config and origin of the pretrained weights if given in model + config = self._autoset_attn_implementation( + config, torch_dtype=torch.get_default_dtype(), check_device_map=False + ) + self.config = config + + self.name_or_path = config.name_or_path + self.warnings_issued = {} + self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + # Overwrite the class attribute to make it an instance attribute, so models like + # `InstructBlipForConditionalGeneration` can dynamically update it without modifying the class attribute + # when a different component (e.g. language_model) is used. + self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules) + + def post_init(self): + """ + A method executed at the end of each Transformer model initialization, to execute code that needs the model's + modules properly initialized (such as weight initialization). + """ + self.init_weights() + self._backward_compatibility_gradient_checkpointing() + + def _backward_compatibility_gradient_checkpointing(self): + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() + # Remove the attribute now that is has been consumed, so it's no saved in the config. + delattr(self.config, "gradient_checkpointing") + + def add_model_tags(self, tags: Union[List[str], str]) -> None: + r""" + Add custom tags into the model that gets pushed to the Hugging Face Hub. Will + not overwrite existing tags in the model. + + Args: + tags (`Union[List[str], str]`): + The desired tags to inject in the model + + Examples: + + ```python + from transformers import AutoModel + + model = AutoModel.from_pretrained("google-bert/bert-base-cased") + + model.add_model_tags(["custom", "custom-bert"]) + + # Push the model to your namespace with the name "my-custom-bert". + model.push_to_hub("my-custom-bert") + ``` + """ + if isinstance(tags, str): + tags = [tags] + + if self.model_tags is None: + self.model_tags = [] + + for tag in tags: + if tag not in self.model_tags: + self.model_tags.append(tag) + + @classmethod + def _from_config(cls, config, **kwargs): + """ + All context managers that the model should be initialized under go here. + + Args: + torch_dtype (`torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. + """ + torch_dtype = kwargs.pop("torch_dtype", None) + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) + + # override default dtype if needed + dtype_orig = None + if torch_dtype is not None: + dtype_orig = cls._set_default_torch_dtype(torch_dtype) + + config = copy.deepcopy(config) # We do not want to modify the config inplace in _from_config. + config._attn_implementation = kwargs.pop("attn_implementation", None) + config = cls._autoset_attn_implementation( + config, + use_flash_attention_2=use_flash_attention_2, + check_device_map=False, + torch_dtype=torch_dtype, + ) + + if is_deepspeed_zero3_enabled(): + import deepspeed + + logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") + # this immediately partitions the model across all gpus, to avoid the overhead in time + # and memory copying it on CPU or each GPU first + with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): + model = cls(config, **kwargs) + else: + model = cls(config, **kwargs) + + # restore default dtype if it was modified + if dtype_orig is not None: + torch.set_default_dtype(dtype_orig) + + return model + + @classmethod + def _autoset_attn_implementation( + cls, + config, + use_flash_attention_2: bool = False, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + ): + """ + Automatically checks and dispatches to a default attention implementation. In order of priority: + 1. An implementation specified in `config._attn_implementation` (due for example to the argument attn_implementation="sdpa" in from_pretrained). + 2. DEPRECATED: if use_flash_attention_2 is set to `True` and `flash_attn` is available, flash attention. (`LlamaFlashAttention` for example) + 3. SDPA implementation, if available and supported by the model type. (`LlamaSdpaAttention` for example) + 4. The default model's implementation otherwise (`LlamaAttention` for example) . + """ + # Here we use config._attn_implementation_internal to check whether the attention implementation was explicitely set by the user. + # The property `PretrainedConfig._attn_implementation` is never `None`, for backward compatibility (always fall back on "eager"). + # The `hasattr` here is used as some Transformers tests for some reason do not call PretrainedConfig __init__ (e.g. test_no_super_init_config_and_model) + requested_attn_implementation = None + if hasattr(config, "_attn_implementation_internal") and config._attn_implementation_internal is not None: + if config._attn_implementation != "flash_attention_2" and use_flash_attention_2: + raise ValueError( + f'Both attn_implementation="{config._attn_implementation}" and `use_flash_attention_2=True` were used when loading the model, which are not compatible.' + ' We recommend to just use `attn_implementation="flash_attention_2"` when loading the model.' + ) + + if config._attn_implementation not in ["eager", "sdpa", "flash_attention_2"]: + message = f'Specified `attn_implementation="{config._attn_implementation}"` is not supported. The only possible arguments are `attn_implementation="eager"` (manual attention implementation)' + if cls._supports_flash_attn_2: + message += ', `"attn_implementation=flash_attention_2"` (implementation using flash attention 2)' + if cls._supports_sdpa: + message += ', `"attn_implementation=sdpa"` (implementation using torch.nn.functional.scaled_dot_product_attention)' + raise ValueError(message + ".") + + # If a config is passed with a preset attn_implementation, we skip the automatic dispatch and use the user-provided config, with hard checks that the requested attention implementation is available. + requested_attn_implementation = config._attn_implementation_internal + + if use_flash_attention_2: + logger.warning_once( + 'The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.' + ) + config._attn_implementation = "flash_attention_2" + + if config._attn_implementation == "flash_attention_2": + cls._check_and_enable_flash_attn_2( + config, + torch_dtype=torch_dtype, + device_map=device_map, + hard_check_only=False, + check_device_map=check_device_map, + ) + elif requested_attn_implementation in [None, "sdpa"] and not is_torch_xla_available(): + # use_flash_attention_2 takes priority over SDPA, hence SDPA treated in this elif. + config = cls._check_and_enable_sdpa( + config, + hard_check_only=False if requested_attn_implementation is None else True, + ) + else: + config._attn_implementation = "eager" + + return config + + @classmethod + def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: + """ + Change the default dtype and return the previous one. This is needed when wanting to instantiate the model + under specific dtype. + + Args: + dtype (`torch.dtype`): + a floating dtype to set to. + + Returns: + `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was + modified. If it wasn't, returns `None`. + + Note `set_default_dtype` currently only works with floating-point types and asserts if for example, + `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. + """ + if not dtype.is_floating_point: + raise ValueError( + f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" + ) + + logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") + dtype_orig = torch.get_default_dtype() + torch.set_default_dtype(dtype) + return dtype_orig + + @property + def base_model(self) -> nn.Module: + """ + `torch.nn.Module`: The main body of the model. + """ + return getattr(self, self.base_model_prefix, self) + + @classmethod + def can_generate(cls) -> bool: + """ + Returns whether this model can generate sequences with `.generate()`. + + Returns: + `bool`: Whether this model can generate sequences with `.generate()`. + """ + # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. + # Alternativelly, the model can also have a custom `generate` function. + if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): + return False + return True + + @classmethod + def _check_and_enable_flash_attn_2( + cls, + config, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + hard_check_only: bool = False, + ) -> PretrainedConfig: + """ + Checks the availability of Flash Attention 2 and compatibility with the current model. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. + """ + if not cls._supports_flash_attn_2: + raise ValueError( + f"{cls.__name__} does not support Flash Attention 2.0 yet. Please request to add support where" + f" the model is hosted, on its model hub page: https://huggingface.co/{config._name_or_path}/discussions/new" + " or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new" + ) + + if not is_flash_attn_2_available(): + preface = "FlashAttention2 has been toggled on, but it cannot be used due to the following error:" + install_message = "Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2." + + if importlib.util.find_spec("flash_attn") is None: + raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}") + + flash_attention_version = version.parse(importlib.metadata.version("flash_attn")) + if torch.version.cuda: + if flash_attention_version < version.parse("2.1.0"): + raise ImportError( + f"{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}" + ) + else: + raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") + elif torch.version.hip: + if flash_attention_version < version.parse("2.0.4"): + raise ImportError( + f"{preface} you need flash_attn package version to be greater or equal than 2.0.4. Make sure to have that version installed - detected version {flash_attention_version}. {install_message}" + ) + else: + raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") + + _is_bettertransformer = getattr(cls, "use_bettertransformer", False) + + if _is_bettertransformer: + raise ValueError( + "Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()" + ) + + if torch_dtype is None: + logger.warning_once( + "You are attempting to use Flash Attention 2.0 without specifying a torch dtype. This might lead to unexpected behaviour" + ) + elif torch_dtype is not None and torch_dtype not in [torch.float16, torch.bfloat16]: + logger.warning_once( + "Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but" + f" the current dype in {cls.__name__} is {torch_dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator," + ' or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`' + ) + + # The check `torch.empty(0).device.type != "cuda"` is needed as the model may be initialized after `torch.set_default_device` has been called, + # or the model may be initialized under the context manager `with torch.device("cuda"):`. + if check_device_map and device_map is None and torch.empty(0).device.type != "cuda": + if torch.cuda.is_available(): + logger.warning_once( + "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU" + " after initializing it on CPU with `model.to('cuda')`." + ) + else: + raise ValueError( + "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU and with no GPU available. " + "This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map " + "or initialising the model on CPU and then moving it to GPU." + ) + elif ( + check_device_map + and device_map is not None + and isinstance(device_map, dict) + and ("cpu" in device_map.values() or "disk" in device_map.values()) + ): + raise ValueError( + "You are attempting to use Flash Attention 2.0 with a model dispatched on CPU or disk. This is not supported. Please make sure to " + "initialise the model on a GPU by passing a device_map that contains only GPU devices as keys." + ) + if not hard_check_only: + config._attn_implementation = "flash_attention_2" + return config + + @classmethod + def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig: + """ + Checks the availability of SDPA for a given model. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. + """ + if hard_check_only: + if not cls._supports_sdpa: + raise ValueError( + f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." + " Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe" + ' this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`' + ) + if not is_torch_sdpa_available(): + raise ImportError( + "PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.1.1." + ) + + if not is_torch_sdpa_available() or not cls._supports_sdpa: + return config + + _is_bettertransformer = getattr(cls, "use_bettertransformer", False) + if _is_bettertransformer: + return config + + if not hard_check_only: + config._attn_implementation = "sdpa" + return config + + def enable_input_require_grads(self): + """ + Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping + the model weights fixed. + """ + + def make_inputs_require_grads(module, input, output): + output.requires_grad_(True) + + self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) + + def disable_input_require_grads(self): + """ + Removes the `_require_grads_hook`. + """ + self._require_grads_hook.remove() + + def get_input_embeddings(self) -> nn.Module: + """ + Returns the model's input embeddings. + + Returns: + `nn.Module`: A torch module mapping vocabulary to hidden states. + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + return base_model.get_input_embeddings() + else: + raise NotImplementedError + + def set_input_embeddings(self, value: nn.Module): + """ + Set model's input embeddings. + + Args: + value (`nn.Module`): A module mapping vocabulary to hidden states. + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + base_model.set_input_embeddings(value) + else: + raise NotImplementedError + + def get_output_embeddings(self) -> nn.Module: + """ + Returns the model's output embeddings. + + Returns: + `nn.Module`: A torch module mapping hidden states to vocabulary. + """ + return None # Overwrite for models with output embeddings + + def _init_weights(self, module): + """ + Initialize the weights. This method should be overridden by derived class and is + the only initialization method that will be called when loading a checkpoint + using `from_pretrained`. Any attempt to initialize outside of this function + will be useless as the torch.nn.init function are all replaced with skip. + """ + pass + + def _initialize_weights(self, module): + """ + Initialize the weights if they are not already initialized. + """ + if getattr(module, "_is_hf_initialized", False): + return + self._init_weights(module) + module._is_hf_initialized = True + + def tie_weights(self): + """ + Tie the weights between the input embeddings and the output embeddings. + + If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the + weights instead. + """ + if getattr(self.config, "tie_word_embeddings", True): + output_embeddings = self.get_output_embeddings() + if output_embeddings is not None: + self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) + + if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False): + if hasattr(self, self.base_model_prefix): + self = getattr(self, self.base_model_prefix) + self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix) + + for module in self.modules(): + if hasattr(module, "_tie_weights"): + module._tie_weights() + + @staticmethod + def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str): + uninitialized_encoder_weights: List[str] = [] + if decoder.__class__ != encoder.__class__: + logger.info( + f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder" + " weights are correctly initialized." + ) + + def tie_encoder_to_decoder_recursively( + decoder_pointer: nn.Module, + encoder_pointer: nn.Module, + module_name: str, + uninitialized_encoder_weights: List[str], + depth=0, + ): + assert isinstance(decoder_pointer, nn.Module) and isinstance( + encoder_pointer, nn.Module + ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" + if hasattr(decoder_pointer, "weight"): + assert hasattr(encoder_pointer, "weight") + encoder_pointer.weight = decoder_pointer.weight + if hasattr(decoder_pointer, "bias"): + assert hasattr(encoder_pointer, "bias") + encoder_pointer.bias = decoder_pointer.bias + return + + encoder_modules = encoder_pointer._modules + decoder_modules = decoder_pointer._modules + if len(decoder_modules) > 0: + assert ( + len(encoder_modules) > 0 + ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" + + all_encoder_weights = {module_name + "/" + sub_name for sub_name in encoder_modules.keys()} + encoder_layer_pos = 0 + for name, module in decoder_modules.items(): + if name.isdigit(): + encoder_name = str(int(name) + encoder_layer_pos) + decoder_name = name + if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len( + encoder_modules + ) != len(decoder_modules): + # this can happen if the name corresponds to the position in a list module list of layers + # in this case the decoder has added a cross-attention that the encoder does not have + # thus skip this step and subtract one layer pos from encoder + encoder_layer_pos -= 1 + continue + elif name not in encoder_modules: + continue + elif depth > 500: + raise ValueError( + "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is" + " a circular dependency between two or more `nn.Modules` of your model." + ) + else: + decoder_name = encoder_name = name + tie_encoder_to_decoder_recursively( + decoder_modules[decoder_name], + encoder_modules[encoder_name], + module_name + "/" + name, + uninitialized_encoder_weights, + depth=depth + 1, + ) + all_encoder_weights.remove(module_name + "/" + encoder_name) + + uninitialized_encoder_weights += list(all_encoder_weights) + + # tie weights recursively + tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights) + if len(uninitialized_encoder_weights) > 0: + logger.warning( + f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" + ) + + def _tie_or_clone_weights(self, output_embeddings, input_embeddings): + """Tie or clone module weights depending of whether we are using TorchScript or not""" + if self.config.torchscript: + output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) + else: + output_embeddings.weight = input_embeddings.weight + + if getattr(output_embeddings, "bias", None) is not None: + output_embeddings.bias.data = nn.functional.pad( + output_embeddings.bias.data, + ( + 0, + output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], + ), + "constant", + 0, + ) + if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): + output_embeddings.out_features = input_embeddings.num_embeddings + + def _get_no_split_modules(self, device_map: str): + """ + Get the modules of the model that should not be spit when using device_map. We iterate through the modules to + get the underlying `_no_split_modules`. + + Args: + device_map (`str`): + The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] + + Returns: + `List[str]`: List of modules that should not be split + """ + _no_split_modules = set() + modules_to_check = [self] + while len(modules_to_check) > 0: + module = modules_to_check.pop(-1) + # if the module does not appear in _no_split_modules, we also check the children + if module.__class__.__name__ not in _no_split_modules: + if isinstance(module, PreTrainedModel): + if module._no_split_modules is None: + raise ValueError( + f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " + "class needs to implement the `_no_split_modules` attribute." + ) + else: + _no_split_modules = _no_split_modules | set(module._no_split_modules) + modules_to_check += list(module.children()) + return list(_no_split_modules) + + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None + ) -> nn.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + new_num_tokens (`int`, *optional*): + The new number of tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + Return: + `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. + """ + model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + if new_num_tokens is None and pad_to_multiple_of is None: + return model_embeds + + # Update base model and current model config + self.config.vocab_size = model_embeds.weight.shape[0] + self.vocab_size = model_embeds.weight.shape[0] + + # Tie weights again if needed + self.tie_weights() + + return model_embeds + + def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) + if hasattr(old_embeddings, "_hf_hook"): + hook = old_embeddings._hf_hook + add_hook_to_module(new_embeddings, hook) + old_embeddings_requires_grad = old_embeddings.weight.requires_grad + new_embeddings.requires_grad_(old_embeddings_requires_grad) + self.set_input_embeddings(new_embeddings) + is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None + + # Update new_num_tokens with the actual size of new_embeddings + if pad_to_multiple_of is not None: + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): + new_num_tokens = new_embeddings.weight.shape[0] + else: + new_num_tokens = new_embeddings.weight.shape[0] + + # if word embeddings are not tied, make sure that lm head is resized as well + if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: + old_lm_head = self.get_output_embeddings() + new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) + if hasattr(old_lm_head, "_hf_hook"): + hook = old_lm_head._hf_hook + add_hook_to_module(new_lm_head, hook) + old_lm_head_requires_grad = old_lm_head.weight.requires_grad + new_lm_head.requires_grad_(old_lm_head_requires_grad) + self.set_output_embeddings(new_lm_head) + + return self.get_input_embeddings() + + def _get_resized_embeddings( + self, + old_embeddings: nn.Embedding, + new_num_tokens: Optional[int] = None, + pad_to_multiple_of: Optional[int] = None, + ) -> nn.Embedding: + """ + Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly + initialized vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_embeddings (`torch.nn.Embedding`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + + Return: + `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if + `new_num_tokens` is `None` + """ + + if pad_to_multiple_of is not None: + if not isinstance(pad_to_multiple_of, int): + raise ValueError( + f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer" + ) + if new_num_tokens is None: + new_num_tokens = old_embeddings.weight.shape[0] + new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of + else: + logger.info( + "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" + f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." + " For more details about this, or help on choosing the correct value for resizing, refer to this guide:" + " https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc" + ) + + if new_num_tokens is None: + return old_embeddings + + is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + else: + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + + if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + return old_embeddings + + if not isinstance(old_embeddings, nn.Embedding): + raise TypeError( + f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You" + " should either use a different resize function or make sure that `old_embeddings` are an instance of" + f" {nn.Embedding}." + ) + + # Build new embeddings + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_embeddings = nn.Embedding( + new_num_tokens, + old_embedding_dim, + device=old_embeddings.weight.device, + dtype=old_embeddings.weight.dtype, + ) + + # initialize all new embeddings (in particular added tokens) + self._init_weights(new_embeddings) + + # Copy token embeddings from the previous weights + + # numbers of tokens to copy + n = min(old_num_tokens, new_num_tokens) + + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + params = [old_embeddings.weight, new_embeddings.weight] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + else: + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + + return new_embeddings + + def _get_resized_lm_head( + self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False + ) -> nn.Linear: + """ + Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_lm_head (`torch.nn.Linear`): + Old lm head liner layer to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the linear matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults + to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, + vocab_size` else `vocab_size, lm_head_dim`. + + Return: + `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is + `None` + """ + if new_num_tokens is None: + return old_lm_head + + is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + else: + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + + if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + return old_lm_head + + if not isinstance(old_lm_head, nn.Linear): + raise TypeError( + f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You" + " should either use a different resize function or make sure that `old_lm_head` are an instance of" + f" {nn.Linear}." + ) + + # Build new lm head + new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) + has_new_lm_head_bias = old_lm_head.bias is not None + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_lm_head = nn.Linear( + *new_lm_head_shape, + bias=has_new_lm_head_bias, + device=old_lm_head.weight.device, + dtype=old_lm_head.weight.dtype, + ) + + # initialize new lm head (in particular added tokens) + self._init_weights(new_lm_head) + + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + else: + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + + return new_lm_head + + def _copy_lm_head_original_to_resized( + self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ): + # Copy old lm head weights to new lm head + if not transposed: + new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] + else: + new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] + + # Copy bias weights to new lm head + if has_new_lm_head_bias: + new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] + + def resize_position_embeddings(self, new_num_position_embeddings: int): + raise NotImplementedError( + f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " + f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" + ) + + def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: + raise NotImplementedError( + f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " + f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" + ) + + def init_weights(self): + """ + If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any + initialization logic in `_init_weights`. + """ + # Prune heads if needed + if self.config.pruned_heads: + self.prune_heads(self.config.pruned_heads) + + if _init_weights: + # Initialize weights + self.apply(self._initialize_weights) + + # Tie weights should be skipped when not initializing all weights + # since from_pretrained(...) calls tie weights anyways + self.tie_weights() + + def prune_heads(self, heads_to_prune: Dict[int, List[int]]): + """ + Prunes heads of the base model. + + Arguments: + heads_to_prune (`Dict[int, List[int]]`): + Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads + to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on + layer 1 and heads 2 and 3 on layer 2. + """ + # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads + for layer, heads in heads_to_prune.items(): + union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) + self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON + + self.base_model._prune_heads(heads_to_prune) + + def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): + """ + Activates gradient checkpointing for the current model. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + + We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of + the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + + Args: + gradient_checkpointing_kwargs (dict, *optional*): + Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. + """ + if not self.supports_gradient_checkpointing: + raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") + + if gradient_checkpointing_kwargs is None: + gradient_checkpointing_kwargs = {"use_reentrant": True} + + gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) + + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` method + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) + else: + self.apply(partial(self._set_gradient_checkpointing, value=True)) + logger.warn( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + + if getattr(self, "_hf_peft_config_loaded", False): + # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True + # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 + # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate + # the gradients to make sure the gradient flows. + self.enable_input_require_grads() + + def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint): + is_gradient_checkpointing_set = False + + # Apply it on the top-level module in case the top-level modules supports it + # for example, LongT5Stack inherits from `PreTrainedModel`. + if hasattr(self, "gradient_checkpointing"): + self._gradient_checkpointing_func = gradient_checkpointing_func + self.gradient_checkpointing = enable + is_gradient_checkpointing_set = True + + for module in self.modules(): + if hasattr(module, "gradient_checkpointing"): + module._gradient_checkpointing_func = gradient_checkpointing_func + module.gradient_checkpointing = enable + is_gradient_checkpointing_set = True + + if not is_gradient_checkpointing_set: + raise ValueError( + f"{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute" + " `gradient_checkpointing` to modules of the model that uses checkpointing." + ) + + def gradient_checkpointing_disable(self): + """ + Deactivates gradient checkpointing for the current model. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + """ + if self.supports_gradient_checkpointing: + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` methid + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=False) + else: + logger.warn( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + self.apply(partial(self._set_gradient_checkpointing, value=False)) + + if getattr(self, "_hf_peft_config_loaded", False): + self.disable_input_require_grads() + + @property + def is_gradient_checkpointing(self) -> bool: + """ + Whether gradient checkpointing is activated for this model or not. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + """ + return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + state_dict: Optional[dict] = None, + save_function: Callable = torch.save, + push_to_hub: bool = False, + max_shard_size: Union[int, str] = "5GB", + safe_serialization: bool = True, + variant: Optional[str] = None, + token: Optional[Union[str, bool]] = None, + save_peft_format: bool = True, + **kwargs, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + [`~PreTrainedModel.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + state_dict (nested dictionary of `torch.Tensor`): + The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only + save parts of the model or if special precautions need to be taken when recovering the state dictionary + of a model (like when using model parallelism). + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + We default it to 5GB in order for models to be able to run easily on free-tier google colab instances + without CPU OOM issues. + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + save_peft_format (`bool`, *optional*, defaults to `True`): + For backward compatibility with PEFT library, in case adapter weights are attached to the model, all + keys of the state dict of adapters needs to be pre-pended with `base_model.model`. Advanced users can + disable this behaviours by setting `save_peft_format` to `False`. + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + ignore_metadata_errors = kwargs.pop("ignore_metadata_errors", False) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + _hf_peft_config_loaded = getattr(self, "_hf_peft_config_loaded", False) + + hf_quantizer = getattr(self, "hf_quantizer", None) + quantization_serializable = ( + hf_quantizer is not None and isinstance(hf_quantizer, HfQuantizer) and hf_quantizer.is_serializable + ) + + if hf_quantizer is not None and not _hf_peft_config_loaded and not quantization_serializable: + raise ValueError( + f"The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from" + " the logger on the traceback to understand the reason why the quantized model is not serializable." + ) + + if "save_config" in kwargs: + warnings.warn( + "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead." + ) + is_main_process = kwargs.pop("save_config") + if safe_serialization and not is_safetensors_available(): + raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # Only save the model itself if we are using distributed training + model_to_save = unwrap_model(self) + + # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" + # we currently don't use this setting automatically, but may start to use with v5 + dtype = get_parameter_dtype(model_to_save) + model_to_save.config.torch_dtype = str(dtype).split(".")[1] + + # Attach architecture to the config + model_to_save.config.architectures = [model_to_save.__class__.__name__] + + # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self.config) + + # Save the config + if is_main_process: + if not _hf_peft_config_loaded: + model_to_save.config.save_pretrained(save_directory) + if self.can_generate(): + # generation config built from the model config + the model config holds generation kwargs -> generate + # may revert to legacy behavior if the two don't match + if ( + model_to_save.generation_config._from_model_config + and model_to_save.config._has_non_default_generation_parameters() + ): + new_generation_config = GenerationConfig.from_model_config(model_to_save.config) + if new_generation_config != model_to_save.generation_config: + logger.warning( + "Your generation config was originally created from the model config, but the model " + "config has changed since then. Unless you pass the `generation_config` argument to this " + "model's `generate` calls, they will revert to the legacy behavior where the base " + "`generate` parameterization is loaded from the model config instead. " + "To avoid this behavior and this warning, we recommend you to overwrite the generation " + "config model attribute before calling the model's `save_pretrained`, preferably also " + "removing any generation kwargs from the model config. This warning will be raised to an " + "exception in v4.41." + ) + model_to_save.generation_config.save_pretrained(save_directory) + + if _hf_peft_config_loaded: + logger.info( + "Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved." + ) + state_dict = model_to_save.get_adapter_state_dict() + + if save_peft_format: + logger.info( + "To match the expected format of the PEFT library, all keys of the state dict of adapters will be pre-pended with `base_model.model`." + ) + peft_state_dict = {} + for key, value in state_dict.items(): + peft_state_dict[f"base_model.model.{key}"] = value + state_dict = peft_state_dict + + active_adapter = self.active_adapters() + + if len(active_adapter) > 1: + raise ValueError( + "Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one " + "by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`" + ) + active_adapter = active_adapter[0] + + current_peft_config = self.peft_config[active_adapter] + current_peft_config.save_pretrained(save_directory) + + # Save the model + if state_dict is None: + state_dict = model_to_save.state_dict() + + # Translate state_dict from smp to hf if saving with smp >= 1.10 + if IS_SAGEMAKER_MP_POST_1_10: + for smp_to_hf, _ in smp.state.module_manager.translate_functions: + state_dict = smp_to_hf(state_dict) + + # Handle the case where some state_dict keys shouldn't be saved + if self._keys_to_ignore_on_save is not None: + for ignore_key in self._keys_to_ignore_on_save: + if ignore_key in state_dict.keys(): + del state_dict[ignore_key] + if safe_serialization: + # Safetensors does not allow tensor aliasing. + # We're going to remove aliases before saving + ptrs = collections.defaultdict(list) + for name, tensor in state_dict.items(): + # Sometimes in the state_dict we have non-tensor objects. + # e.g. in bitsandbytes we have some `str` objects in the state_dict + if isinstance(tensor, torch.Tensor): + ptrs[id_tensor_storage(tensor)].append(name) + else: + # In the non-tensor case, fall back to the pointer of the object itself + ptrs[id(tensor)].append(name) + + # These are all the pointers of shared tensors. + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + warn_names = set() + for names in shared_ptrs.values(): + # Removing the keys which are declared as known duplicates on + # load. This allows to make sure the name which is kept is consistent. + if self._tied_weights_keys is not None: + found = 0 + for name in sorted(names): + matches_pattern = any(re.search(pat, name) for pat in self._tied_weights_keys) + if matches_pattern and name in state_dict: + found += 1 + if found < len(names): + del state_dict[name] + + # When not all duplicates have been cleaned, still remove those keys, but put a clear warning. + # If the link between tensors was done at runtime then `from_pretrained` will not get + # the key back leading to random tensor. A proper warning will be shown + # during reload (if applicable), but since the file is not necessarily compatible with + # the config, better show a proper warning. + found = 0 + for name in names: + if name in state_dict: + found += 1 + if found > 1: + del state_dict[name] + warn_names.add(name) + if len(warn_names) > 0: + logger.warning_once( + f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", + ) + + # Shard the model if it is too big. + if not _hf_peft_config_loaded: + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + weights_name = _add_variant(weights_name, variant) + else: + weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME + + shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") + + # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 + filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "") + reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") + + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + and is_main_process + and reg.fullmatch(filename_no_suffix) is not None + ): + os.remove(full_filename) + + # Save the model + for shard_file, shard in shards.items(): + if safe_serialization: + # At some point we will need to deal better with save_function (used for TPU and other distributed + # joyfulness), but for now this enough. + safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"}) + else: + save_function(shard, os.path.join(save_directory, shard_file)) + + if index is None: + path_to_weights = os.path.join(save_directory, weights_name) + logger.info(f"Model weights saved in {path_to_weights}") + else: + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + + if push_to_hub: + # Eventually create an empty model card + model_card = create_and_tag_model_card( + repo_id, self.model_tags, token=token, ignore_metadata_errors=ignore_metadata_errors + ) + + # Update model card if needed: + model_card.save(os.path.join(save_directory, "README.md")) + + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + ) + + @wraps(PushToHubMixin.push_to_hub) + def push_to_hub(self, *args, **kwargs): + tags = self.model_tags if self.model_tags is not None else [] + + tags_kwargs = kwargs.get("tags", []) + if isinstance(tags_kwargs, str): + tags_kwargs = [tags_kwargs] + + for tag in tags_kwargs: + if tag not in tags: + tags.append(tag) + + if tags: + kwargs["tags"] = tags + return super().push_to_hub(*args, **kwargs) + + def get_memory_footprint(self, return_buffers=True): + r""" + Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. + Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the + PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 + + Arguments: + return_buffers (`bool`, *optional*, defaults to `True`): + Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers + are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch + norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 + """ + mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) + if return_buffers: + mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) + mem = mem + mem_bufs + return mem + + @wraps(torch.nn.Module.cuda) + def cuda(self, *args, **kwargs): + # Checks if the model has been loaded in 8-bit + if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + raise ValueError( + "Calling `cuda()` is not supported for `4-bit` or `8-bit` quantized models. Please use the model as it is, since the" + " model has already been set to the correct devices and casted to the correct `dtype`." + ) + else: + return super().cuda(*args, **kwargs) + + @wraps(torch.nn.Module.to) + def to(self, *args, **kwargs): + # Checks if the model has been loaded in 8-bit + if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + raise ValueError( + "`.to` is not supported for `4-bit` or `8-bit` bitsandbytes models. Please use the model as it is, since the" + " model has already been set to the correct devices and casted to the correct `dtype`." + ) + elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ: + # For GPTQ models, we prevent users from casting the model to another dytpe to restrict unwanted behaviours. + # the correct API should be to load the model with the desired dtype directly through `from_pretrained`. + dtype_present_in_args = False + + if "dtype" not in kwargs: + for arg in args: + if isinstance(arg, torch.dtype): + dtype_present_in_args = True + break + else: + dtype_present_in_args = True + + if dtype_present_in_args: + raise ValueError( + "You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired" + " `dtype` by passing the correct `torch_dtype` argument." + ) + return super().to(*args, **kwargs) + + def half(self, *args): + # Checks if the model is quantized + if getattr(self, "is_quantized", False): + raise ValueError( + "`.half()` is not supported for quantized model. Please use the model as it is, since the" + " model has already been casted to the correct `dtype`." + ) + else: + return super().half(*args) + + def float(self, *args): + # Checks if the model is quantized + if getattr(self, "is_quantized", False): + raise ValueError( + "`.float()` is not supported for quantized model. Please use the model as it is, since the" + " model has already been casted to the correct `dtype`." + ) + else: + return super().float(*args) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + *model_args, + config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + ignore_mismatched_sizes: bool = False, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + use_safetensors: bool = None, + **kwargs, + ): + r""" + Instantiate a pretrained pytorch model from a pre-trained model configuration. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g, + `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to + `True`. + - `None` if you are both providing the configuration and state dictionary (resp. with keyword + arguments `config` and `state_dict`). + model_args (sequence of positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): + Can be either: + + - an instance of a class derived from [`PretrainedConfig`], + - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. + + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + A state dictionary to use instead of a state dictionary loaded from saved weights file. + + This option can be used if you want to create a model from a pretrained configuration but load your own + weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and + [`~PreTrainedModel.from_pretrained`] is not a simpler option. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_tf (`bool`, *optional*, defaults to `False`): + Load the model weights from a TensorFlow checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + from_flax (`bool`, *optional*, defaults to `False`): + Load the model weights from a Flax checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): + Whether or not to raise an error if some of the weights from the checkpoint do not have the same size + as the weights of the model (if for instance, you are instantiating a model with 10 labels from a + checkpoint with 3 labels). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + mirror (`str`, *optional*): + Mirror source to accelerate downloads in China. If you are from China and have an accessibility + problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. + Please refer to the mirror site for more information. + _fast_init(`bool`, *optional*, defaults to `True`): + Whether or not to disable fast initialization. + + + + One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ < + 4.6.0` for seeded model initialization. This argument will be removed at the next major version. See + [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information. + + + attn_implementation (`str`, *optional*): + The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. + + > Parameters for big model inference + + low_cpu_mem_usage(`bool`, *optional*): + Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + This is an experimental feature and a subject to change at any moment. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under a specific `dtype`. The different options + are: + + 1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified + `dtype`, ignoring the model's `config.torch_dtype` if one exists. If not specified + - the model will get loaded in `torch.float` (fp32). + + 2. `"auto"` - A `torch_dtype` entry in the `config.json` file of the model will be + attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in + the checkpoint that's of a floating point type and use that as `dtype`. This will load the model + using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how + the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. + + + + For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or + reach out to the authors and ask them to add this information to the model's card and to insert the + `torch_dtype` entry in `config.json` on the hub. + + + + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_state_dict (`bool`, *optional*): + If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU + RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to + `True` when there is some disk offload. + offload_buffers (`bool`, *optional*): + Whether or not to offload the buffers with the model parameters. + quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): + A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g + bitsandbytes, gptq). There may be other quantization-related kwargs, including `load_in_4bit` and + `load_in_8bit`, which are parsed by QuantizationConfigParser. Supported only for bitsandbytes + quantizations and not preferred. consider inserting all such arguments into quantization_config + instead. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_tf` or `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` + is not installed, it will be set to `False`. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + + + Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to + use this method in a firewalled environment. + + + + Examples: + + ```python + >>> from transformers import BertConfig, BertModel + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased") + >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). + >>> model = BertModel.from_pretrained("./test/saved_model/") + >>> # Update configuration during loading. + >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) + >>> assert model.config.output_attentions == True + >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). + >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") + >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) + >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower) + >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", from_flax=True) + ``` + + * `low_cpu_mem_usage` algorithm: + + This is an experimental function that loads the model using ~1x model size CPU memory + + Here is how it works: + + 1. save which state_dict keys we have + 2. drop state_dict before the model is created, since the latter takes 1x model size CPU memory + 3. after the model has been instantiated switch to the meta device all params/buffers that + are going to be replaced from the loaded state_dict + 4. load state_dict 2nd time + 5. replace the params/buffers from the state_dict + + Currently, it can't handle deepspeed ZeRO stage 3 and ignores loading errors + + """ + state_dict = kwargs.pop("state_dict", None) + from_tf = kwargs.pop("from_tf", False) + from_flax = kwargs.pop("from_flax", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + output_loading_info = kwargs.pop("output_loading_info", False) + use_auth_token = kwargs.pop("use_auth_token", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + _ = kwargs.pop("mirror", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + _fast_init = kwargs.pop("_fast_init", True) + torch_dtype = kwargs.pop("torch_dtype", None) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", None) + device_map = kwargs.pop("device_map", None) + max_memory = kwargs.pop("max_memory", None) + offload_folder = kwargs.pop("offload_folder", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + offload_buffers = kwargs.pop("offload_buffers", False) + load_in_8bit = kwargs.pop("load_in_8bit", False) + load_in_4bit = kwargs.pop("load_in_4bit", False) + quantization_config = kwargs.pop("quantization_config", None) + subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) + variant = kwargs.pop("variant", None) + adapter_kwargs = kwargs.pop("adapter_kwargs", {}) + adapter_name = kwargs.pop("adapter_name", "default") + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) + + if is_fsdp_enabled(): + low_cpu_mem_usage = True + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None and adapter_kwargs is not None and "token" not in adapter_kwargs: + adapter_kwargs["token"] = token + + if use_safetensors is None and not is_safetensors_available(): + use_safetensors = False + if trust_remote_code is True: + logger.warning( + "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" + " ignored." + ) + + if commit_hash is None: + if not isinstance(config, PretrainedConfig): + # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible + resolved_config_file = cached_file( + pretrained_model_name_or_path, + CONFIG_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + else: + commit_hash = getattr(config, "_commit_hash", None) + + if is_peft_available(): + _adapter_model_path = adapter_kwargs.pop("_adapter_model_path", None) + + if _adapter_model_path is None: + _adapter_model_path = find_adapter_config_file( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + _commit_hash=commit_hash, + **adapter_kwargs, + ) + if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): + with open(_adapter_model_path, "r", encoding="utf-8") as f: + _adapter_model_path = pretrained_model_name_or_path + pretrained_model_name_or_path = json.load(f)["base_model_name_or_path"] + else: + _adapter_model_path = None + + # change device_map into a map if we passed an int, a str or a torch.device + if isinstance(device_map, torch.device): + device_map = {"": device_map} + elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + try: + device_map = {"": torch.device(device_map)} + except RuntimeError: + raise ValueError( + "When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or " + f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}." + ) + elif isinstance(device_map, int): + if device_map < 0: + raise ValueError( + "You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' " + ) + else: + device_map = {"": device_map} + + if device_map is not None: + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + elif not low_cpu_mem_usage: + raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`") + + if low_cpu_mem_usage: + if is_deepspeed_zero3_enabled(): + raise ValueError( + "DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`." + ) + elif not is_accelerate_available(): + raise ImportError( + "Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install accelerate`" + ) + + # handling bnb config from kwargs, remove after `load_in_{4/8}bit` deprecation. + if load_in_4bit or load_in_8bit: + if quantization_config is not None: + raise ValueError( + "You can't pass `load_in_4bit`or `load_in_8bit` as a kwarg when passing " + "`quantization_config` argument at the same time." + ) + + # preparing BitsAndBytesConfig from kwargs + config_dict = {k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters} + config_dict = {**config_dict, "load_in_4bit": load_in_4bit, "load_in_8bit": load_in_8bit} + quantization_config, kwargs = BitsAndBytesConfig.from_dict( + config_dict=config_dict, return_unused_kwargs=True, **kwargs + ) + logger.warning( + "The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. " + "Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead." + ) + + from_pt = not (from_tf | from_flax) + + user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Load config if we don't provide a configuration + if not isinstance(config, PretrainedConfig): + config_path = config if config is not None else pretrained_model_name_or_path + config, model_kwargs = cls.config_class.from_pretrained( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + else: + # In case one passes a config to `from_pretrained` + "attn_implementation" + # override the `_attn_implementation` attribute to `attn_implementation` of the kwargs + # Please see: https://github.com/huggingface/transformers/issues/28038 + + # Overwrite `config._attn_implementation` by the one from the kwargs --> in auto-factory + # we pop attn_implementation from the kwargs but this handles the case where users + # passes manually the config to `from_pretrained`. + config = copy.deepcopy(config) + + kwarg_attn_imp = kwargs.pop("attn_implementation", None) + if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp: + config._attn_implementation = kwarg_attn_imp + model_kwargs = kwargs + + pre_quantized = getattr(config, "quantization_config", None) is not None + if pre_quantized or quantization_config is not None: + if pre_quantized: + config.quantization_config = AutoHfQuantizer.merge_quantization_configs( + config.quantization_config, quantization_config + ) + else: + config.quantization_config = quantization_config + hf_quantizer = AutoHfQuantizer.from_config(config.quantization_config, pre_quantized=pre_quantized) + else: + hf_quantizer = None + + if hf_quantizer is not None: + hf_quantizer.validate_environment( + torch_dtype=torch_dtype, from_tf=from_tf, from_flax=from_flax, device_map=device_map + ) + torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype) + device_map = hf_quantizer.update_device_map(device_map) + + # Force-set to `True` for more mem efficiency + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + logger.warning("`low_cpu_mem_usage` was None, now set to True since model is quantized.") + is_quantized = hf_quantizer is not None + + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the + # index of the files. + is_sharded = False + sharded_metadata = None + # Load model + loading_info = None + + # Keep in fp32 modules + keep_in_fp32_modules = None + use_keep_in_fp32_modules = False + + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + if from_tf and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + ): + # Load from a TF 1.0 checkpoint in priority if from_tf + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + elif from_tf and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) + ): + # Load from a TF 2.0 checkpoint in priority if from_tf + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) + elif from_flax and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + ): + # Load from a Flax checkpoint in priority if from_flax + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + elif use_safetensors is not False and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) + ) + elif use_safetensors is not False and os.path.isfile( + os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + ) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + ) + is_sharded = True + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) + ): + # Load from a PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant) + ) + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) + ): + # Load from a sharded PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant) + ) + is_sharded = True + # At this stage we don't have a weight file so we will raise an error. + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + ) or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use" + " `from_tf=True` to load this model from those weights." + ) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`" + " to load this model from those weights." + ) + elif use_safetensors: + raise EnvironmentError( + f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path}." + ) + else: + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}," + f" {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory" + f" {pretrained_model_name_or_path}." + ) + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): + archive_file = pretrained_model_name_or_path + is_local = True + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")): + if not from_tf: + raise ValueError( + f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " + "from_tf to True to load from this checkpoint." + ) + archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + filename = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) + else: + # set correct filename + if from_tf: + filename = TF2_WEIGHTS_NAME + elif from_flax: + filename = FLAX_WEIGHTS_NAME + elif use_safetensors is not False: + filename = _add_variant(SAFE_WEIGHTS_NAME, variant) + else: + filename = _add_variant(WEIGHTS_NAME, variant) + + try: + # Load from URL or cache if already cached + cached_file_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "resume_download": resume_download, + "local_files_only": local_files_only, + "token": token, + "user_agent": user_agent, + "revision": revision, + "subfolder": subfolder, + "_raise_exceptions_for_gated_repo": False, + "_raise_exceptions_for_missing_entries": False, + "_commit_hash": commit_hash, + } + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant): + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, + _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant), + **cached_file_kwargs, + ) + if resolved_archive_file is not None: + is_sharded = True + elif use_safetensors: + if revision == "main": + resolved_archive_file, revision, is_sharded = auto_conversion( + pretrained_model_name_or_path, **cached_file_kwargs + ) + cached_file_kwargs["revision"] = revision + if resolved_archive_file is None: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} " + "and thus cannot be loaded with `safetensors`. Please make sure that the model has " + "been saved with `safe_serialization=True` or do not set `use_safetensors=True`." + ) + else: + # This repo has no safetensors file of any kind, we switch to PyTorch. + filename = _add_variant(WEIGHTS_NAME, variant) + resolved_archive_file = cached_file( + pretrained_model_name_or_path, filename, **cached_file_kwargs + ) + if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant): + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, + _add_variant(WEIGHTS_INDEX_NAME, variant), + **cached_file_kwargs, + ) + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None: + # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error + # message. + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "token": token, + } + if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights." + " Use `from_tf=True` to load this model from those weights." + ) + elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use" + " `from_flax=True` to load this model from those weights." + ) + elif variant is not None and has_file( + pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs + ): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant" + f" {variant}. Use `variant=None` to load this model from those weights." + ) + else: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or" + f" {FLAX_WEIGHTS_NAME}." + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception as e: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}," + f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." + ) from e + + if is_local: + logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file + else: + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") + else: + resolved_archive_file = None + + # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. + if is_sharded: + # rsolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. + resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( + pretrained_model_name_or_path, + resolved_archive_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + + if ( + is_safetensors_available() + and isinstance(resolved_archive_file, str) + and resolved_archive_file.endswith(".safetensors") + ): + with safe_open(resolved_archive_file, framework="pt") as f: + metadata = f.metadata() + + if metadata.get("format") == "pt": + pass + elif metadata.get("format") == "tf": + from_tf = True + logger.info("A TensorFlow safetensors file is being loaded in a PyTorch model.") + elif metadata.get("format") == "flax": + from_flax = True + logger.info("A Flax safetensors file is being loaded in a PyTorch model.") + elif metadata.get("format") == "mlx": + # This is a mlx file, we assume weights are compatible with pt + pass + else: + raise ValueError( + f"Incompatible safetensors file. File metadata is not ['pt', 'tf', 'flax', 'mlx'] but {metadata.get('format')}" + ) + + from_pt = not (from_tf | from_flax) + + # load pt weights early so that we know which dtype to init the model under + if from_pt: + if not is_sharded and state_dict is None: + # Time to load the checkpoint + state_dict = load_state_dict(resolved_archive_file) + + # set dtype to instantiate the model under: + # 1. If torch_dtype is not None, we use that dtype + # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first + # weights entry that is of a floating type - we assume all floating dtype weights are of the same dtype + # we also may have config.torch_dtype available, but we won't rely on it till v5 + dtype_orig = None + + if torch_dtype is not None: + if isinstance(torch_dtype, str): + if torch_dtype == "auto": + if hasattr(config, "torch_dtype") and config.torch_dtype is not None: + torch_dtype = config.torch_dtype + logger.info(f"Will use torch_dtype={torch_dtype} as defined in model's config object") + else: + if is_sharded and "dtype" in sharded_metadata: + torch_dtype = sharded_metadata["dtype"] + elif not is_sharded: + torch_dtype = get_state_dict_dtype(state_dict) + else: + one_state_dict = load_state_dict(resolved_archive_file[0]) + torch_dtype = get_state_dict_dtype(one_state_dict) + del one_state_dict # free CPU memory + logger.info( + "Since the `torch_dtype` attribute can't be found in model's config object, " + "will use torch_dtype={torch_dtype} as derived from model's weights" + ) + else: + raise ValueError( + f'`torch_dtype` can be either `torch.dtype` or `"auto"`, but received {torch_dtype}' + ) + dtype_orig = cls._set_default_torch_dtype(torch_dtype) + + # Check if `_keep_in_fp32_modules` is not None + use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and ( + (torch_dtype == torch.float16) or hasattr(hf_quantizer, "use_keep_in_fp32_modules") + ) + + if is_sharded: + loaded_state_dict_keys = sharded_metadata["all_checkpoint_keys"] + else: + loaded_state_dict_keys = list(state_dict.keys()) + if low_cpu_mem_usage or (use_keep_in_fp32_modules and is_accelerate_available()): + # In case some weights need to be kept in float32 and accelerate is not installed, + # we later on want to take the path where state_dict is not None, that is the one + # that do not require accelerate. + state_dict = None + + config.name_or_path = pretrained_model_name_or_path + + # Instantiate model. + init_contexts = [no_init_weights(_enable=_fast_init)] + + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") + init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts + elif low_cpu_mem_usage: + init_contexts.append(init_empty_weights()) + + config = copy.deepcopy(config) # We do not want to modify the config inplace in from_pretrained. + config = cls._autoset_attn_implementation( + config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch_dtype, device_map=device_map + ) + + with ContextManagers(init_contexts): + # Let's make sure we don't run the init function of buffer modules + model = cls(config, *model_args, **model_kwargs) + + # make sure we use the model's config since the __init__ call might have copied it + config = model.config + + # Check first if we are `from_pt` + if use_keep_in_fp32_modules: + if is_accelerate_available() and not is_deepspeed_zero3_enabled(): + low_cpu_mem_usage = True + keep_in_fp32_modules = model._keep_in_fp32_modules + else: + keep_in_fp32_modules = [] + + if hf_quantizer is not None: + hf_quantizer.preprocess_model( + model=model, device_map=device_map, keep_in_fp32_modules=keep_in_fp32_modules + ) + + # We store the original dtype for quantized models as we cannot easily retrieve it + # once the weights have been quantized + # Note that once you have loaded a quantized model, you can't change its dtype so this will + # remain a single source of truth + config._pre_quantization_dtype = torch_dtype + + if isinstance(device_map, str): + special_dtypes = {} + + if hf_quantizer is not None: + special_dtypes.update(hf_quantizer.get_special_dtypes_update(model, torch_dtype)) + + special_dtypes.update( + { + name: torch.float32 + for name, _ in model.named_parameters() + if any(m in name for m in keep_in_fp32_modules) + } + ) + + target_dtype = torch_dtype + + if hf_quantizer is not None: + target_dtype = hf_quantizer.adjust_target_dtype(target_dtype) + + no_split_modules = model._get_no_split_modules(device_map) + if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + + device_map_kwargs = {"no_split_module_classes": no_split_modules} + if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters: + device_map_kwargs["special_dtypes"] = special_dtypes + elif len(special_dtypes) > 0: + logger.warning( + "This model has some weights that should be kept in higher precision, you need to upgrade " + "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)." + ) + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + dtype=target_dtype, + low_zero=(device_map == "balanced_low_0"), + max_memory=max_memory, + **device_map_kwargs, + ) + else: + max_memory = get_max_memory(max_memory) + if hf_quantizer is not None: + max_memory = hf_quantizer.adjust_max_memory(max_memory) + device_map_kwargs["max_memory"] = max_memory + + # Make sure tied weights are tied before creating the device map. + model.tie_weights() + device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) + + if hf_quantizer is not None: + hf_quantizer.validate_environment(device_map=device_map) + + elif device_map is not None: + model.tie_weights() + tied_params = find_tied_parameters(model) + # check if we don't have tied param in different devices + check_tied_parameters_on_same_device(tied_params, device_map) + + if from_tf: + if resolved_archive_file.endswith(".index"): + # Load from a TensorFlow 1.X checkpoint - provided by original authors + model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' + else: + # Load from our TensorFlow 2.0 checkpoints + try: + from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model + + model, loading_info = load_tf2_checkpoint_in_pytorch_model( + model, resolved_archive_file, allow_missing_keys=True, output_loading_info=True + ) + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed." + " Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation" + " instructions." + ) + raise + elif from_flax: + try: + from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model + + model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) + except ImportError: + logger.error( + "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for" + " installation instructions." + ) + raise + elif from_pt: + # restore default dtype + if dtype_orig is not None: + torch.set_default_dtype(dtype_orig) + ( + model, + missing_keys, + unexpected_keys, + mismatched_keys, + offload_index, + error_msgs, + ) = cls._load_pretrained_model( + model, + state_dict, + loaded_state_dict_keys, # XXX: rename? + resolved_archive_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=ignore_mismatched_sizes, + sharded_metadata=sharded_metadata, + _fast_init=_fast_init, + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + dtype=torch_dtype, + hf_quantizer=hf_quantizer, + keep_in_fp32_modules=keep_in_fp32_modules, + ) + + # make sure token embedding weights are still tied if needed + model.tie_weights() + + # Set model in evaluation mode to deactivate DropOut modules by default + model.eval() + + # If it is a model with generation capabilities, attempt to load the generation config + if model.can_generate() and pretrained_model_name_or_path is not None: + try: + model.generation_config = GenerationConfig.from_pretrained( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + except OSError: + logger.info( + "Generation config file not found, using a generation config created from the model config." + ) + pass + + # Dispatch model with hooks on all devices if necessary + if device_map is not None: + device_map_kwargs = { + "device_map": device_map, + "offload_dir": offload_folder, + "offload_index": offload_index, + "offload_buffers": offload_buffers, + } + if "skip_keys" in inspect.signature(dispatch_model).parameters: + device_map_kwargs["skip_keys"] = model._skip_keys_device_placement + if not is_fsdp_enabled() and not is_deepspeed_zero3_enabled(): + dispatch_model(model, **device_map_kwargs) + + if hf_quantizer is not None: + hf_quantizer.postprocess_model(model) + model.hf_quantizer = hf_quantizer + + if _adapter_model_path is not None: + model.load_adapter( + _adapter_model_path, + adapter_name=adapter_name, + token=token, + adapter_kwargs=adapter_kwargs, + ) + + if output_loading_info: + if loading_info is None: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + "error_msgs": error_msgs, + } + return model, loading_info + + return model + + @classmethod + def _load_pretrained_model( + cls, + model, + state_dict, + loaded_keys, + resolved_archive_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=False, + sharded_metadata=None, + _fast_init=True, + low_cpu_mem_usage=False, + device_map=None, + offload_folder=None, + offload_state_dict=None, + dtype=None, + hf_quantizer=None, + keep_in_fp32_modules=None, + ): + is_safetensors = False + is_quantized = hf_quantizer is not None + + if device_map is not None and "disk" in device_map.values(): + archive_file = ( + resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file + ) + is_safetensors = archive_file.endswith(".safetensors") + if offload_folder is None and not is_safetensors: + raise ValueError( + "The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`" + " for them. Alternatively, make sure you have `safetensors` installed if the model you are using" + " offers the weights in this format." + ) + if offload_folder is not None: + os.makedirs(offload_folder, exist_ok=True) + if offload_state_dict is None: + offload_state_dict = True + + is_sharded_safetensors = is_safetensors and sharded_metadata is not None + + # tie the model weights before retrieving the state_dict + model.tie_weights() + + # Retrieve missing & unexpected_keys + model_state_dict = model.state_dict() + expected_keys = list(model_state_dict.keys()) + prefix = model.base_model_prefix + + def _fix_key(key): + if "beta" in key: + return key.replace("beta", "bias") + if "gamma" in key: + return key.replace("gamma", "weight") + return key + + original_loaded_keys = loaded_keys + loaded_keys = [_fix_key(key) for key in loaded_keys] + + if len(prefix) > 0: + has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) + expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) + else: + has_prefix_module = False + expects_prefix_module = False + + # key re-naming operations are never done on the keys + # that are loaded, but always on the keys of the newly initialized model + remove_prefix_from_model = not has_prefix_module and expects_prefix_module + add_prefix_to_model = has_prefix_module and not expects_prefix_module + + if remove_prefix_from_model: + _prefix = f"{prefix}." + expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)] + expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys] + elif add_prefix_to_model: + expected_keys = [".".join([prefix, s]) for s in expected_keys] + + missing_keys = sorted(set(expected_keys) - set(loaded_keys)) + unexpected_keys = set(loaded_keys) - set(expected_keys) + # Remove nonpersistent buffers from unexpected keys: they are not in the state dict but will be in the model + # buffers + model_buffers = {n for n, _ in model.named_buffers()} + if remove_prefix_from_model: + model_buffers = {key[len(_prefix) :] if key.startswith(_prefix) else key for key in model_buffers} + elif add_prefix_to_model: + model_buffers = {".".join([prefix, key]) for key in model_buffers} + unexpected_keys = sorted(unexpected_keys - model_buffers) + + model.tie_weights() + if device_map is None and not is_fsdp_enabled() and not is_deepspeed_zero3_enabled(): + ptrs = collections.defaultdict(list) + for name, tensor in model.state_dict().items(): + id_tensor = id_tensor_storage(tensor) + ptrs[id_tensor].append(name) + + # These are all the pointers of shared tensors. + tied_params = [names for _, names in ptrs.items() if len(names) > 1] + else: + # id function doesn't work for meta tensor so we need this function + tied_params = find_tied_parameters(model) + + for group in tied_params: + if remove_prefix_from_model: + group = [key[len(_prefix) :] if key.startswith(_prefix) else key for key in group] + elif add_prefix_to_model: + group = [".".join([prefix, key]) for key in group] + missing_in_group = [k for k in missing_keys if k in group] + if len(missing_in_group) > 0 and len(missing_in_group) < len(group): + missing_keys = [k for k in missing_keys if k not in missing_in_group] + + # Some models may have keys that are not in the state by design, removing them before needlessly warning + # the user. + if cls._keys_to_ignore_on_load_missing is not None: + for pat in cls._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + + if cls._keys_to_ignore_on_load_unexpected is not None: + for pat in cls._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if hf_quantizer is not None: + missing_keys = hf_quantizer.update_missing_keys(model, missing_keys, prefix) + + # retrieve weights on meta device and put them back on CPU. + # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step + if low_cpu_mem_usage: + for key in missing_keys: + if key in list(model_state_dict.keys()): + key = key + elif f"{prefix}.{key}" in list(model_state_dict.keys()): + key = f"{prefix}.{key}" + elif key.startswith(prefix) and ".".join(key.split(".")[1:]) in list(model_state_dict.keys()): + key = ".".join(key.split(".")[1:]) + param = model_state_dict[key] + + # upcast in fp32 if any + target_dtype = dtype + if ( + keep_in_fp32_modules is not None + and dtype == torch.float16 + and any( + module_to_keep_in_fp32 in key.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules + ) + ): + target_dtype = torch.float32 + + if param.device == torch.device("meta"): + value = torch.empty(*param.size(), dtype=target_dtype) + if ( + not is_quantized + or getattr(hf_quantizer, "requires_parameters_quantization", False) + or not hf_quantizer.check_quantized_param( + model, param_value=value, param_name=key, state_dict={} + ) + ): + set_module_tensor_to_device(model, key, "cpu", value) + else: + hf_quantizer.create_quantized_param(model, value, key, "cpu", state_dict, unexpected_keys) + + # retrieve uninitialized modules and initialize before maybe overriding that with the pretrained weights. + if _fast_init: + if not ignore_mismatched_sizes: + if remove_prefix_from_model: + _loaded_keys = [f"{prefix}.{k}" for k in loaded_keys] + elif add_prefix_to_model: + _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys] + else: + _loaded_keys = loaded_keys + not_initialized_submodules = set_initialized_submodules(model, _loaded_keys) + # If we're about to tie the output embeds to the input embeds we don't need to init them + if hasattr(model.config, "tie_word_embeddings") and model.config.tie_word_embeddings: + output_embeddings = model.get_output_embeddings() + if output_embeddings is not None: + # Still need to initialize if there is a bias term since biases are not tied. + if not hasattr(output_embeddings, "bias") or output_embeddings.bias is None: + output_embeddings._is_hf_initialized = True + else: + not_initialized_submodules = dict(model.named_modules()) + # This will only initialize submodules that are not marked as initialized by the line above. + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + not_initialized_parameters = list( + set( + itertools.chain.from_iterable( + submodule.parameters(recurse=False) for submodule in not_initialized_submodules.values() + ) + ) + ) + with deepspeed.zero.GatheredParameters(not_initialized_parameters, modifier_rank=0): + model.apply(model._initialize_weights) + else: + model.apply(model._initialize_weights) + + # Set some modules to fp32 if any + if keep_in_fp32_modules is not None: + for name, param in model.named_parameters(): + if any(module_to_keep_in_fp32 in name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules): + # param = param.to(torch.float32) does not work here as only in the local scope. + param.data = param.data.to(torch.float32) + + # Make sure we are able to load base models as well as derived models (with heads) + start_prefix = "" + model_to_load = model + if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module: + start_prefix = cls.base_model_prefix + "." + if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module: + model_to_load = getattr(model, cls.base_model_prefix) + base_model_expected_keys = list(model_to_load.state_dict().keys()) + if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys): + raise ValueError( + "The state dictionary of the model you are trying to load is corrupted. Are you sure it was " + "properly saved?" + ) + if device_map is not None: + device_map = {k.replace(f"{cls.base_model_prefix}.", ""): v for k, v in device_map.items()} + + def _find_mismatched_keys( + state_dict, + model_state_dict, + loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ): + mismatched_keys = [] + if ignore_mismatched_sizes: + for checkpoint_key in loaded_keys: + # If the checkpoint is sharded, we may not have the key here. + if checkpoint_key not in state_dict: + continue + model_key = checkpoint_key + if remove_prefix_from_model: + # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it. + model_key = f"{prefix}.{checkpoint_key}" + elif add_prefix_to_model: + # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it. + model_key = ".".join(checkpoint_key.split(".")[1:]) + + if ( + model_key in model_state_dict + and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape + ): + if ( + state_dict[checkpoint_key].shape[-1] == 1 + and state_dict[checkpoint_key].numel() * 2 == model_state_dict[model_key].numel() + ): + # This skips size mismatches for 4-bit weights. Two 4-bit values share an 8-bit container, causing size differences. + # Without matching with module type or paramter type it seems like a practical way to detect valid 4bit weights. + pass + else: + mismatched_keys.append( + (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) + ) + del state_dict[checkpoint_key] + return mismatched_keys + + if resolved_archive_file is not None: + folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1]) + else: + folder = None + if device_map is not None and is_safetensors: + param_device_map = expand_device_map(device_map, original_loaded_keys, start_prefix) + str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32" + if sharded_metadata is None: + archive_file = ( + resolved_archive_file[0] + if isinstance(resolved_archive_file, (list, tuple)) + else resolved_archive_file + ) + weight_map = {p: archive_file for p in original_loaded_keys} + else: + weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata["weight_map"].items()} + offload_index = { + p[len(start_prefix) :]: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype} + for p, f in weight_map.items() + if p.startswith(start_prefix) and param_device_map[p[len(start_prefix) :]] == "disk" + } + + if state_dict is not None: + # Whole checkpoint + mismatched_keys = _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ) + error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix) + offload_index = None + else: + # Sharded checkpoint or whole but low_cpu_mem_usage==True + + # This should always be a list but, just to be sure. + if not isinstance(resolved_archive_file, list): + resolved_archive_file = [resolved_archive_file] + + error_msgs = [] + mismatched_keys = [] + if not is_safetensors: + offload_index = {} if device_map is not None and "disk" in device_map.values() else None + if offload_state_dict: + state_dict_folder = tempfile.mkdtemp() + state_dict_index = {} + else: + state_dict_folder = None + state_dict_index = None + + if is_sharded_safetensors: + disk_only_shard_files = get_disk_only_shard_files( + device_map, sharded_metadata=sharded_metadata, start_prefix=start_prefix + ) + disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files] + else: + disk_only_shard_files = [] + + if len(resolved_archive_file) > 1: + resolved_archive_file = logging.tqdm(resolved_archive_file, desc="Loading checkpoint shards") + for shard_file in resolved_archive_file: + # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload. + if shard_file in disk_only_shard_files: + continue + state_dict = load_state_dict(shard_file, is_quantized=is_quantized) + + # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not + # matching the weights in the model. + mismatched_keys += _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ) + if low_cpu_mem_usage: + if is_fsdp_enabled() and not is_local_dist_rank_0() and not is_quantized: + for key, param in model_to_load.state_dict().items(): + if param.device == torch.device("meta"): + set_module_tensor_to_device( + model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) + ) + else: + new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( + model_to_load, + state_dict, + loaded_keys, + start_prefix, + expected_keys, + device_map=device_map, + offload_folder=offload_folder, + offload_index=offload_index, + state_dict_folder=state_dict_folder, + state_dict_index=state_dict_index, + dtype=dtype, + hf_quantizer=hf_quantizer, + is_safetensors=is_safetensors, + keep_in_fp32_modules=keep_in_fp32_modules, + unexpected_keys=unexpected_keys, + ) + error_msgs += new_error_msgs + else: + error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix) + + # force memory release + del state_dict + gc.collect() + + if offload_index is not None and len(offload_index) > 0: + if model != model_to_load: + # We need to add the prefix of the base model + prefix = cls.base_model_prefix + if not is_safetensors: + for weight_name in offload_index: + shutil.move( + os.path.join(offload_folder, f"{weight_name}.dat"), + os.path.join(offload_folder, f"{prefix}.{weight_name}.dat"), + ) + offload_index = {f"{prefix}.{key}": value for key, value in offload_index.items()} + if not is_safetensors: + save_offload_index(offload_index, offload_folder) + offload_index = None + + if offload_state_dict: + # Load back temporarily offloaded state dict + load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder) + shutil.rmtree(state_dict_folder) + + if len(error_msgs) > 0: + error_msg = "\n\t".join(error_msgs) + if "size mismatch" in error_msg: + error_msg += ( + "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." + ) + raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") + + if len(unexpected_keys) > 0: + archs = [] if model.config.architectures is None else model.config.architectures + warner = logger.warning if model.__class__.__name__ in archs else logger.info + warner( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" + " with another architecture (e.g. initializing a BertForSequenceClassification model from a" + " BertForPreTraining model).\n- This IS NOT expected if you are initializing" + f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" + " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." + ) + else: + logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" + f" was trained on, you can already use {model.__class__.__name__} for predictions without further" + " training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs + + def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): + module_keys = {".".join(key.split(".")[:-1]) for key in names} + + # torch.nn.ParameterList is a special case where two parameter keywords + # are appended to the module name, *e.g.* bert.special_embeddings.0 + module_keys = module_keys.union( + {".".join(key.split(".")[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()} + ) + + retrieved_modules = [] + # retrieve all modules that has at least one missing weight name + for name, module in self.named_modules(): + if remove_prefix: + _prefix = f"{self.base_model_prefix}." + name = name[len(_prefix) :] if name.startswith(_prefix) else name + elif add_prefix: + name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix + + if name in module_keys: + retrieved_modules.append(module) + + return retrieved_modules + + @staticmethod + def _load_pretrained_model_low_mem( + model, loaded_state_dict_keys, resolved_archive_file, start_prefix="", hf_quantizer=None + ): + """ + This is an experimental function that loads the model using ~1.x model size CPU memory + + Before you call it do: + + 1. save which state_dict keys are available + 2. drop state_dict before model is created, since the latter takes 1x model size memory + + Here then we continue: + + 3. switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict + 4. load state_dict 2nd time + 5. replace the params/buffers from the state_dict + + Currently, it doesn't handle missing_keys, unexpected_keys, mismatched_keys. It can't handle deepspeed. To + handle bitsandbytes, needs non-empty hf_quantizer argument. + """ + + _move_model_to_meta(model, loaded_state_dict_keys, start_prefix) + state_dict = load_state_dict(resolved_archive_file) + expected_keys = loaded_state_dict_keys # plug for missing expected_keys. TODO: replace with proper keys + error_msgs = _load_state_dict_into_meta_model( + model, + state_dict, + loaded_state_dict_keys, + start_prefix, + expected_keys=expected_keys, + hf_quantizer=hf_quantizer, + ) + return error_msgs + + @classmethod + def register_for_auto_class(cls, auto_class="AutoModel"): + """ + Register this class with a given auto class. This should only be used for custom models as the ones in the + library are already mapped with an auto class. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): + The auto class to register this new model with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + def to_bettertransformer(self) -> "PreTrainedModel": + """ + Converts the model to use [PyTorch's native attention + implementation](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html), integrated to + Transformers through [Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). Only a + subset of all Transformers models are supported. + + PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested + tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog + post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). + + Returns: + [`PreTrainedModel`]: The model converted to BetterTransformer. + """ + if not is_optimum_available(): + raise ImportError("The package `optimum` is required to use Better Transformer.") + + from optimum.version import __version__ as optimum_version + + if version.parse(optimum_version) < version.parse("1.7.0"): + raise ImportError( + f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." + ) + + from optimum.bettertransformer import BetterTransformer + + return BetterTransformer.transform(self) + + def reverse_bettertransformer(self): + """ + Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is + used, for example in order to save the model. + + Returns: + [`PreTrainedModel`]: The model converted back to the original modeling. + """ + if not is_optimum_available(): + raise ImportError("The package `optimum` is required to use Better Transformer.") + + from optimum.version import __version__ as optimum_version + + if version.parse(optimum_version) < version.parse("1.7.0"): + raise ImportError( + f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." + ) + + from optimum.bettertransformer import BetterTransformer + + return BetterTransformer.reverse(self) + + def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): + """ + Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given. + """ + + # Skip the check during tracing. + if is_torch_fx_proxy(input_ids) or torch.jit.is_tracing() or is_torchdynamo_compiling(): + return + + if (attention_mask is not None) or (self.config.pad_token_id is None): + return + + # Check only the first and last input IDs to reduce overhead. + if self.config.pad_token_id in input_ids[:, [-1, 0]]: + warn_string = ( + "We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See " + "https://huggingface.co/docs/transformers/troubleshooting" + "#incorrect-output-when-padding-tokens-arent-masked." + ) + + # If the pad token is equal to either BOS, EOS, or SEP, we do not know whether the user should use an + # attention_mask or not. In this case, we should still show a warning because this is a rare case. + if ( + (self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id) + or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id) + or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id) + ): + warn_string += ( + f"\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical " + f"to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), " + f"or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded." + ) + + logger.warning_once(warn_string) + + @property + def _is_quantized_training_enabled(self): + warnings.warn( + "`_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead", + FutureWarning, + ) + + if not hasattr(self, "hf_quantizer"): + return False + + return self.hf_quantizer.is_trainable + + +PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) +if PreTrainedModel.push_to_hub.__doc__ is not None: + PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( + object="model", object_class="AutoModel", object_files="model file" + ) + + +class PoolerStartLogits(nn.Module): + """ + Compute SQuAD start logits from sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, 1) + + def forward( + self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + + Returns: + `torch.FloatTensor`: The start logits for SQuAD. + """ + x = self.dense(hidden_states).squeeze(-1) + + if p_mask is not None: + if get_parameter_dtype(self) == torch.float16: + x = x * (1 - p_mask) - 65500 * p_mask + else: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerEndLogits(nn.Module): + """ + Compute SQuAD end logits from sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` + to use. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dense_1 = nn.Linear(config.hidden_size, 1) + + def forward( + self, + hidden_states: torch.FloatTensor, + start_states: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + p_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + The hidden states of the first tokens for the labeled span. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + The position of the first token for the labeled span. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + + + + One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides + `start_states`. + + + + Returns: + `torch.FloatTensor`: The end logits for SQuAD. + """ + assert ( + start_states is not None or start_positions is not None + ), "One of start_states, start_positions should be not None" + if start_positions is not None: + slen, hsz = hidden_states.shape[-2:] + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) + start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) + + x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) + x = self.activation(x) + x = self.LayerNorm(x) + x = self.dense_1(x).squeeze(-1) + + if p_mask is not None: + if get_parameter_dtype(self) == torch.float16: + x = x * (1 - p_mask) - 65500 * p_mask + else: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerAnswerClass(nn.Module): + """ + Compute SQuAD 2.0 answer class from classification and start tokens hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model. + """ + + def __init__(self, config): + super().__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) + + def forward( + self, + hidden_states: torch.FloatTensor, + start_states: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + cls_index: Optional[torch.LongTensor] = None, + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + The hidden states of the first tokens for the labeled span. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + The position of the first token for the labeled span. + cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Position of the CLS token for each sentence in the batch. If `None`, takes the last token. + + + + One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides + `start_states`. + + + + Returns: + `torch.FloatTensor`: The SQuAD 2.0 answer class. + """ + # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. + hsz = hidden_states.shape[-1] + assert ( + start_states is not None or start_positions is not None + ), "One of start_states, start_positions should be not None" + if start_positions is not None: + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) + + if cls_index is not None: + cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) + else: + cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) + + x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) + x = self.activation(x) + x = self.dense_1(x).squeeze(-1) + + return x + + +@dataclass +class SquadHeadOutput(ModelOutput): + """ + Base class for outputs of question answering models using a [`~modeling_utils.SQuADHead`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): + Classification loss as the sum of start token, end token (and is_impossible if provided) classification + losses. + start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the top config.start_n_top start token possibilities (beam-search). + start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Indices for the top config.start_n_top start token possibilities (beam-search). + end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities + (beam-search). + end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). + cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the `is_impossible` label of the answers. + + """ + + loss: Optional[torch.FloatTensor] = None + start_top_log_probs: Optional[torch.FloatTensor] = None + start_top_index: Optional[torch.LongTensor] = None + end_top_log_probs: Optional[torch.FloatTensor] = None + end_top_index: Optional[torch.LongTensor] = None + cls_logits: Optional[torch.FloatTensor] = None + + +class SQuADHead(nn.Module): + r""" + A SQuAD head inspired by XLNet. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` + to use. + """ + + def __init__(self, config): + super().__init__() + self.start_n_top = config.start_n_top + self.end_n_top = config.end_n_top + + self.start_logits = PoolerStartLogits(config) + self.end_logits = PoolerEndLogits(config) + self.answer_class = PoolerAnswerClass(config) + + @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) + def forward( + self, + hidden_states: torch.FloatTensor, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + cls_index: Optional[torch.LongTensor] = None, + is_impossible: Optional[torch.LongTensor] = None, + p_mask: Optional[torch.FloatTensor] = None, + return_dict: bool = False, + ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + Final hidden states of the model on the sequence tokens. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Positions of the first token for the labeled span. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Positions of the last token for the labeled span. + cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Position of the CLS token for each sentence in the batch. If `None`, takes the last token. + is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Whether the question has a possible answer in the paragraph or not. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + return_dict (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + """ + start_logits = self.start_logits(hidden_states, p_mask=p_mask) + + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, let's remove the dimension added by batch splitting + for x in (start_positions, end_positions, cls_index, is_impossible): + if x is not None and x.dim() > 1: + x.squeeze_(-1) + + # during training, compute the end logits based on the ground truth of the start position + end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) + + loss_fct = CrossEntropyLoss() + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if cls_index is not None and is_impossible is not None: + # Predict answerability from the representation of CLS and START + cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) + loss_fct_cls = nn.BCEWithLogitsLoss() + cls_loss = loss_fct_cls(cls_logits, is_impossible) + + # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss + total_loss += cls_loss * 0.5 + + return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) + + else: + # during inference, compute the end logits based on beam search + bsz, slen, hsz = hidden_states.size() + start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) + + start_top_log_probs, start_top_index = torch.topk( + start_log_probs, self.start_n_top, dim=-1 + ) # shape (bsz, start_n_top) + start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) + start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) + start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) + + hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( + start_states + ) # shape (bsz, slen, start_n_top, hsz) + p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None + end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) + end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) + + end_top_log_probs, end_top_index = torch.topk( + end_log_probs, self.end_n_top, dim=1 + ) # shape (bsz, end_n_top, start_n_top) + end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) + end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) + + start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) + cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) + + if not return_dict: + return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + else: + return SquadHeadOutput( + start_top_log_probs=start_top_log_probs, + start_top_index=start_top_index, + end_top_log_probs=end_top_log_probs, + end_top_index=end_top_index, + cls_logits=cls_logits, + ) + + +class SequenceSummary(nn.Module): + r""" + Compute a single vector summary of a sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model. Relevant arguments in the config class of the model are (refer to the actual + config class of your model for the default values it uses): + + - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: + + - `"last"` -- Take the last token hidden state (like XLNet) + - `"first"` -- Take the first token hidden state (like Bert) + - `"mean"` -- Take the mean of all tokens hidden states + - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) + - `"attn"` -- Not implemented now, use multi-head attention + + - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. + - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes + (otherwise to `config.hidden_size`). + - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, + another string or `None` will add no activation. + - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. + - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + + self.summary_type = getattr(config, "summary_type", "last") + if self.summary_type == "attn": + # We should use a standard multi-head attention module with absolute positional embedding for that. + # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 + # We can probably just use the multi-head attention module of PyTorch >=1.1.0 + raise NotImplementedError + + self.summary = Identity() + if hasattr(config, "summary_use_proj") and config.summary_use_proj: + if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: + num_classes = config.num_labels + else: + num_classes = config.hidden_size + self.summary = nn.Linear(config.hidden_size, num_classes) + + activation_string = getattr(config, "summary_activation", None) + self.activation: Callable = get_activation(activation_string) if activation_string else Identity() + + self.first_dropout = Identity() + if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: + self.first_dropout = nn.Dropout(config.summary_first_dropout) + + self.last_dropout = Identity() + if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: + self.last_dropout = nn.Dropout(config.summary_last_dropout) + + def forward( + self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None + ) -> torch.FloatTensor: + """ + Compute a single vector summary of a sequence hidden states. + + Args: + hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): + The hidden states of the last layer. + cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): + Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. + + Returns: + `torch.FloatTensor`: The summary of the sequence hidden states. + """ + if self.summary_type == "last": + output = hidden_states[:, -1] + elif self.summary_type == "first": + output = hidden_states[:, 0] + elif self.summary_type == "mean": + output = hidden_states.mean(dim=1) + elif self.summary_type == "cls_index": + if cls_index is None: + cls_index = torch.full_like( + hidden_states[..., :1, :], + hidden_states.shape[-2] - 1, + dtype=torch.long, + ) + else: + cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) + cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) + # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states + output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) + elif self.summary_type == "attn": + raise NotImplementedError + + output = self.first_dropout(output) + output = self.summary(output) + output = self.activation(output) + output = self.last_dropout(output) + + return output + + +def unwrap_model(model: nn.Module) -> nn.Module: + """ + Recursively unwraps a model from potential containers (as used in distributed training). + + Args: + model (`torch.nn.Module`): The model to unwrap. + """ + # since there could be multiple levels of wrapping, unwrap recursively + if hasattr(model, "module"): + return unwrap_model(model.module) + else: + return model + + +def expand_device_map(device_map, param_names, start_prefix): + """ + Expand a device map to return the correspondance parameter name to device. + """ + new_device_map = {} + param_names = [p[len(start_prefix) :] for p in param_names if p.startswith(start_prefix)] + for module, device in device_map.items(): + new_device_map.update( + {p: device for p in param_names if p == module or p.startswith(f"{module}.") or module == ""} + ) + return new_device_map + + +def get_disk_only_shard_files(device_map, sharded_metadata, start_prefix): + """ + Returns the list of shard files containing only weights offloaded to disk. + """ + + weight_map = { + p[len(start_prefix) :]: v for p, v in sharded_metadata["weight_map"].items() if p.startswith(start_prefix) + } + files_content = collections.defaultdict(list) + for weight_name, filename in weight_map.items(): + while len(weight_name) > 0 and weight_name not in device_map: + weight_name = ".".join(weight_name.split(".")[:-1]) + files_content[filename].append(device_map[weight_name]) + + return [fname for fname, devices in files_content.items() if set(devices) == {"disk"}] diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pytorch_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cab0b0d4aec72babb54fbd8edac5df83285abe06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/pytorch_utils.py @@ -0,0 +1,296 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Callable, List, Optional, Set, Tuple, Union + +import torch +from packaging import version +from safetensors.torch import storage_ptr, storage_size +from torch import nn + +from .utils import is_torch_xla_available, logging + + +ALL_LAYERNORM_LAYERS = [nn.LayerNorm] + +logger = logging.get_logger(__name__) + +parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) + +is_torch_greater_or_equal_than_2_2 = parsed_torch_version_base >= version.parse("2.2") +is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1") +is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0") +is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13") +is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") + + +def softmax_backward_data(parent, grad_output, output, dim, self): + """ + A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according + to the torch version detected. + """ + + from torch import _softmax_backward_data + + return _softmax_backward_data(grad_output, output, parent.dim, self.dtype) + + +def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: + """ + Prune a linear layer to keep only entries in index. + + Used to remove heads. + + Args: + layer (`torch.nn.Linear`): The layer to prune. + index (`torch.LongTensor`): The indices to keep in the layer. + dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices. + + Returns: + `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`. + """ + index = index.to(layer.weight.device) + W = layer.weight.index_select(dim, index).clone().detach() + if layer.bias is not None: + if dim == 1: + b = layer.bias.clone().detach() + else: + b = layer.bias[index].clone().detach() + new_size = list(layer.weight.size()) + new_size[dim] = len(index) + new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) + new_layer.weight.requires_grad = False + new_layer.weight.copy_(W.contiguous()) + new_layer.weight.requires_grad = True + if layer.bias is not None: + new_layer.bias.requires_grad = False + new_layer.bias.copy_(b.contiguous()) + new_layer.bias.requires_grad = True + return new_layer + + +class Conv1D(nn.Module): + """ + 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). + + Basically works like a linear layer but the weights are transposed. + + Args: + nf (`int`): The number of output features. + nx (`int`): The number of input features. + """ + + def __init__(self, nf, nx): + super().__init__() + self.nf = nf + self.weight = nn.Parameter(torch.empty(nx, nf)) + self.bias = nn.Parameter(torch.zeros(nf)) + nn.init.normal_(self.weight, std=0.02) + + def forward(self, x): + size_out = x.size()[:-1] + (self.nf,) + x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) + x = x.view(size_out) + return x + + +def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: + """ + Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights + are transposed. + + Used to remove heads. + + Args: + layer ([`~pytorch_utils.Conv1D`]): The layer to prune. + index (`torch.LongTensor`): The indices to keep in the layer. + dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices. + + Returns: + [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. + """ + index = index.to(layer.weight.device) + W = layer.weight.index_select(dim, index).clone().detach() + if dim == 0: + b = layer.bias.clone().detach() + else: + b = layer.bias[index].clone().detach() + new_size = list(layer.weight.size()) + new_size[dim] = len(index) + new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) + new_layer.weight.requires_grad = False + new_layer.weight.copy_(W.contiguous()) + new_layer.weight.requires_grad = True + new_layer.bias.requires_grad = False + new_layer.bias.copy_(b.contiguous()) + new_layer.bias.requires_grad = True + return new_layer + + +def prune_layer( + layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None +) -> Union[nn.Linear, Conv1D]: + """ + Prune a Conv1D or linear layer to keep only entries in index. + + Used to remove heads. + + Args: + layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. + index (`torch.LongTensor`): The indices to keep in the layer. + dim (`int`, *optional*): The dimension on which to keep the indices. + + Returns: + `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. + """ + if isinstance(layer, nn.Linear): + return prune_linear_layer(layer, index, dim=0 if dim is None else dim) + elif isinstance(layer, Conv1D): + return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) + else: + raise ValueError(f"Can't prune layer of class {layer.__class__}") + + +def apply_chunking_to_forward( + forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors +) -> torch.Tensor: + """ + This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension + `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. + + If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly + applying `forward_fn` to `input_tensors`. + + Args: + forward_fn (`Callable[..., torch.Tensor]`): + The forward function of the model. + chunk_size (`int`): + The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. + chunk_dim (`int`): + The dimension over which the `input_tensors` should be chunked. + input_tensors (`Tuple[torch.Tensor]`): + The input tensors of `forward_fn` which will be chunked + + Returns: + `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. + + + Examples: + + ```python + # rename the usual forward() fn to forward_chunk() + def forward_chunk(self, hidden_states): + hidden_states = self.decoder(hidden_states) + return hidden_states + + + # implement a chunked forward function + def forward(self, hidden_states): + return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) + ```""" + + assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" + + # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility + num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) + if num_args_in_forward_chunk_fn != len(input_tensors): + raise ValueError( + f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " + "tensors are given" + ) + + if chunk_size > 0: + tensor_shape = input_tensors[0].shape[chunk_dim] + for input_tensor in input_tensors: + if input_tensor.shape[chunk_dim] != tensor_shape: + raise ValueError( + f"All input tenors have to be of the same shape: {tensor_shape}, " + f"found shape {input_tensor.shape[chunk_dim]}" + ) + + if input_tensors[0].shape[chunk_dim] % chunk_size != 0: + raise ValueError( + f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " + f"size {chunk_size}" + ) + + num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size + + # chunk input tensor into tuples + input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) + # apply forward fn to every tuple + output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) + # concatenate output at same dimension + return torch.cat(output_chunks, dim=chunk_dim) + + return forward_fn(*input_tensors) + + +def find_pruneable_heads_and_indices( + heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] +) -> Tuple[Set[int], torch.LongTensor]: + """ + Finds the heads and their indices taking `already_pruned_heads` into account. + + Args: + heads (`List[int]`): List of the indices of heads to prune. + n_heads (`int`): The number of heads in the model. + head_size (`int`): The size of each head. + already_pruned_heads (`Set[int]`): A set of already pruned heads. + + Returns: + `Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads` + into account and the indices of rows/columns to keep in the layer weight. + """ + mask = torch.ones(n_heads, head_size) + heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads + for head in heads: + # Compute how many pruned heads are before the head and move the index accordingly + head = head - sum(1 if h < head else 0 for h in already_pruned_heads) + mask[head] = 0 + mask = mask.view(-1).contiguous().eq(1) + index: torch.LongTensor = torch.arange(len(mask))[mask].long() + return heads, index + + +def meshgrid( + *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None +) -> Tuple[torch.Tensor, ...]: + """ + Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument. + + Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html + """ + return torch.meshgrid(*tensors, indexing=indexing) + + +def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: + """ + Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For + example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is + guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with + non-overlapping lifetimes may have the same id. + """ + if tensor.device.type == "xla" and is_torch_xla_available(): + # NOTE: xla tensors dont have storage + # use some other unique id to distinguish. + # this is a XLA tensor, it must be created using torch_xla's + # device. So the following import is safe: + import torch_xla + + unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) + else: + unique_id = storage_ptr(tensor) + + return tensor.device, unique_id, storage_size(tensor) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/safetensors_conversion.py b/env-llmeval/lib/python3.10/site-packages/transformers/safetensors_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..46de0e5755fdf0ab4a29a629f861fa55ce0c28d6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/safetensors_conversion.py @@ -0,0 +1,107 @@ +import json +import uuid +from typing import Optional + +import requests +from huggingface_hub import Discussion, HfApi, get_repo_discussions + +from .utils import cached_file, logging + + +logger = logging.get_logger(__name__) + + +def previous_pr(api: HfApi, model_id: str, pr_title: str, token: str) -> Optional["Discussion"]: + main_commit = api.list_repo_commits(model_id, token=token)[0].commit_id + for discussion in get_repo_discussions(repo_id=model_id, token=token): + if discussion.title == pr_title and discussion.status == "open" and discussion.is_pull_request: + commits = api.list_repo_commits(model_id, revision=discussion.git_reference, token=token) + + if main_commit == commits[1].commit_id: + return discussion + return None + + +def spawn_conversion(token: str, private: bool, model_id: str): + logger.info("Attempting to convert .bin model on the fly to safetensors.") + + safetensors_convert_space_url = "https://safetensors-convert.hf.space" + sse_url = f"{safetensors_convert_space_url}/queue/join" + sse_data_url = f"{safetensors_convert_space_url}/queue/data" + + # The `fn_index` is necessary to indicate to gradio that we will use the `run` method of the Space. + hash_data = {"fn_index": 1, "session_hash": str(uuid.uuid4())} + + def start(_sse_connection, payload): + for line in _sse_connection.iter_lines(): + line = line.decode() + if line.startswith("data:"): + resp = json.loads(line[5:]) + logger.debug(f"Safetensors conversion status: {resp['msg']}") + if resp["msg"] == "queue_full": + raise ValueError("Queue is full! Please try again.") + elif resp["msg"] == "send_data": + event_id = resp["event_id"] + response = requests.post( + sse_data_url, + stream=True, + params=hash_data, + json={"event_id": event_id, **payload, **hash_data}, + ) + response.raise_for_status() + elif resp["msg"] == "process_completed": + return + + with requests.get(sse_url, stream=True, params=hash_data) as sse_connection: + data = {"data": [model_id, private, token]} + try: + logger.debug("Spawning safetensors automatic conversion.") + start(sse_connection, data) + except Exception as e: + logger.warning(f"Error during conversion: {repr(e)}") + + +def get_conversion_pr_reference(api: HfApi, model_id: str, **kwargs): + private = api.model_info(model_id).private + + logger.info("Attempting to create safetensors variant") + pr_title = "Adding `safetensors` variant of this model" + token = kwargs.get("token") + + # This looks into the current repo's open PRs to see if a PR for safetensors was already open. If so, it + # returns it. It checks that the PR was opened by the bot and not by another user so as to prevent + # security breaches. + pr = previous_pr(api, model_id, pr_title, token=token) + + if pr is None or (not private and pr.author != "SFConvertBot"): + spawn_conversion(token, private, model_id) + pr = previous_pr(api, model_id, pr_title, token=token) + else: + logger.info("Safetensors PR exists") + + sha = f"refs/pr/{pr.num}" + + return sha + + +def auto_conversion(pretrained_model_name_or_path: str, **cached_file_kwargs): + api = HfApi(token=cached_file_kwargs.get("token")) + sha = get_conversion_pr_reference(api, pretrained_model_name_or_path, **cached_file_kwargs) + + if sha is None: + return None, None + cached_file_kwargs["revision"] = sha + del cached_file_kwargs["_commit_hash"] + + # This is an additional HEAD call that could be removed if we could infer sharded/non-sharded from the PR + # description. + sharded = api.file_exists( + pretrained_model_name_or_path, + "model.safetensors.index.json", + revision=sha, + token=cached_file_kwargs.get("token"), + ) + filename = "model.safetensors.index.json" if sharded else "model.safetensors" + + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + return resolved_archive_file, sha, sharded diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/time_series_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/time_series_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02eddd72cebd3562702cb1ea9439f313bc01642a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/time_series_utils.py @@ -0,0 +1,225 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Time series distributional output classes and utilities. +""" +from typing import Callable, Dict, Optional, Tuple + +import torch +from torch import nn +from torch.distributions import ( + AffineTransform, + Distribution, + Independent, + NegativeBinomial, + Normal, + StudentT, + TransformedDistribution, +) + + +class AffineTransformed(TransformedDistribution): + def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0): + self.scale = 1.0 if scale is None else scale + self.loc = 0.0 if loc is None else loc + + super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)]) + + @property + def mean(self): + """ + Returns the mean of the distribution. + """ + return self.base_dist.mean * self.scale + self.loc + + @property + def variance(self): + """ + Returns the variance of the distribution. + """ + return self.base_dist.variance * self.scale**2 + + @property + def stddev(self): + """ + Returns the standard deviation of the distribution. + """ + return self.variance.sqrt() + + +class ParameterProjection(nn.Module): + def __init__( + self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs + ) -> None: + super().__init__(**kwargs) + self.args_dim = args_dim + self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) + self.domain_map = domain_map + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: + params_unbounded = [proj(x) for proj in self.proj] + + return self.domain_map(*params_unbounded) + + +class LambdaLayer(nn.Module): + def __init__(self, function): + super().__init__() + self.function = function + + def forward(self, x, *args): + return self.function(x, *args) + + +class DistributionOutput: + distribution_class: type + in_features: int + args_dim: Dict[str, int] + + def __init__(self, dim: int = 1) -> None: + self.dim = dim + self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim} + + def _base_distribution(self, distr_args): + if self.dim == 1: + return self.distribution_class(*distr_args) + else: + return Independent(self.distribution_class(*distr_args), 1) + + def distribution( + self, + distr_args, + loc: Optional[torch.Tensor] = None, + scale: Optional[torch.Tensor] = None, + ) -> Distribution: + distr = self._base_distribution(distr_args) + if loc is None and scale is None: + return distr + else: + return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim) + + @property + def event_shape(self) -> Tuple: + r""" + Shape of each individual event contemplated by the distributions that this object constructs. + """ + return () if self.dim == 1 else (self.dim,) + + @property + def event_dim(self) -> int: + r""" + Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object + constructs. + """ + return len(self.event_shape) + + @property + def value_in_support(self) -> float: + r""" + A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By + default 0.0. This value will be used when padding data series. + """ + return 0.0 + + def get_parameter_projection(self, in_features: int) -> nn.Module: + r""" + Return the parameter projection layer that maps the input to the appropriate parameters of the distribution. + """ + return ParameterProjection( + in_features=in_features, + args_dim=self.args_dim, + domain_map=LambdaLayer(self.domain_map), + ) + + def domain_map(self, *args: torch.Tensor): + r""" + Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the + correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a + distribution of the right event_shape. + """ + raise NotImplementedError() + + @staticmethod + def squareplus(x: torch.Tensor) -> torch.Tensor: + r""" + Helper to map inputs to the positive orthant by applying the square-plus operation. Reference: + https://twitter.com/jon_barron/status/1387167648669048833 + """ + return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0 + + +class StudentTOutput(DistributionOutput): + """ + Student-T distribution output class. + """ + + args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} + distribution_class: type = StudentT + + @classmethod + def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): + scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) + df = 2.0 + cls.squareplus(df) + return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) + + +class NormalOutput(DistributionOutput): + """ + Normal distribution output class. + """ + + args_dim: Dict[str, int] = {"loc": 1, "scale": 1} + distribution_class: type = Normal + + @classmethod + def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): + scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) + return loc.squeeze(-1), scale.squeeze(-1) + + +class NegativeBinomialOutput(DistributionOutput): + """ + Negative Binomial distribution output class. + """ + + args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} + distribution_class: type = NegativeBinomial + + @classmethod + def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): + total_count = cls.squareplus(total_count) + return total_count.squeeze(-1), logits.squeeze(-1) + + def _base_distribution(self, distr_args) -> Distribution: + total_count, logits = distr_args + if self.dim == 1: + return self.distribution_class(total_count=total_count, logits=logits) + else: + return Independent(self.distribution_class(total_count=total_count, logits=logits), 1) + + # Overwrites the parent class method. We cannot scale using the affine + # transformation since negative binomial should return integers. Instead + # we scale the parameters. + def distribution( + self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None + ) -> Distribution: + total_count, logits = distr_args + + if scale is not None: + # See scaling property of Gamma. + logits += scale.log() + + return self._base_distribution((total_count, logits)) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..b1daa1ec1be92fb52df0e62d11456632067abb03 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py @@ -0,0 +1,846 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library). For slow (python) tokenizers + see tokenization_utils.py +""" +import copy +import json +import os +from collections import defaultdict +from typing import Any, Dict, List, Optional, Tuple, Union + +import tokenizers.pre_tokenizers as pre_tokenizers_fast +from tokenizers import Encoding as EncodingFast +from tokenizers import Tokenizer as TokenizerFast +from tokenizers.decoders import Decoder as DecoderFast +from tokenizers.trainers import BpeTrainer, UnigramTrainer, WordLevelTrainer, WordPieceTrainer + +from .convert_slow_tokenizer import convert_slow_tokenizer +from .tokenization_utils import PreTrainedTokenizer +from .tokenization_utils_base import ( + INIT_TOKENIZER_DOCSTRING, + AddedToken, + BatchEncoding, + PreTokenizedInput, + PreTokenizedInputPair, + PreTrainedTokenizerBase, + SpecialTokensMixin, + TextInput, + TextInputPair, + TruncationStrategy, +) +from .utils import PaddingStrategy, add_end_docstrings, logging + + +logger = logging.get_logger(__name__) + +# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file +TOKENIZER_FILE = "tokenizer.json" +SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" +TOKENIZER_CONFIG_FILE = "tokenizer_config.json" + +# Slow tokenizers have an additional added tokens files +ADDED_TOKENS_FILE = "added_tokens.json" + +INIT_TOKENIZER_DOCSTRING += """ + tokenizer_object ([`tokenizers.Tokenizer`]): + A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗 + tokenizers](../fast_tokenizers) for more information. + tokenizer_file ([`str`]): + A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗 + tokenizers. +""" + +MODEL_TO_TRAINER_MAPPING = { + "BPE": BpeTrainer, + "Unigram": UnigramTrainer, + "WordLevel": WordLevelTrainer, + "WordPiece": WordPieceTrainer, +} + +VOCAB_FILES_NAMES = {"tokenizer_file": TOKENIZER_FILE} + + +@add_end_docstrings(INIT_TOKENIZER_DOCSTRING) +class PreTrainedTokenizerFast(PreTrainedTokenizerBase): + """ + Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). + + Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. + + Handles all the shared methods for tokenization and special tokens, as well as methods for + downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. + + This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the + specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class: PreTrainedTokenizer = None + + def __init__(self, *args, **kwargs): + tokenizer_object = kwargs.pop("tokenizer_object", None) + slow_tokenizer = kwargs.pop("__slow_tokenizer", None) + fast_tokenizer_file = kwargs.pop("tokenizer_file", None) + from_slow = kwargs.pop("from_slow", False) + added_tokens_decoder = kwargs.pop("added_tokens_decoder", {}) + + if from_slow and slow_tokenizer is None and self.slow_tokenizer_class is None: + raise ValueError( + "Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you " + "have sentencepiece installed." + ) + + if tokenizer_object is not None: + fast_tokenizer = copy.deepcopy(tokenizer_object) + elif fast_tokenizer_file is not None and not from_slow: + # We have a serialization from tokenizers which let us directly build the backend + fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file) + elif slow_tokenizer is not None: + # We need to convert a slow tokenizer to build the backend + fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) + elif self.slow_tokenizer_class is not None: + # We need to create and convert a slow tokenizer to build the backend + slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs) + fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) + else: + raise ValueError( + "Couldn't instantiate the backend tokenizer from one of: \n" + "(1) a `tokenizers` library serialization file, \n" + "(2) a slow tokenizer instance to convert or \n" + "(3) an equivalent slow tokenizer class to instantiate and convert. \n" + "You need to have sentencepiece installed to convert a slow tokenizer to a fast one." + ) + + self._tokenizer = fast_tokenizer + + if slow_tokenizer is not None: + kwargs.update(slow_tokenizer.init_kwargs) + + self._decode_use_source_tokenizer = False + + _truncation = self._tokenizer.truncation + + if _truncation is not None: + self._tokenizer.enable_truncation(**_truncation) + kwargs.setdefault("max_length", _truncation["max_length"]) + kwargs.setdefault("truncation_side", _truncation["direction"]) + kwargs.setdefault("stride", _truncation["stride"]) + kwargs.setdefault("truncation_strategy", _truncation["strategy"]) + else: + self._tokenizer.no_truncation() + + _padding = self._tokenizer.padding + if _padding is not None: + self._tokenizer.enable_padding(**_padding) + kwargs.setdefault("pad_token", _padding["pad_token"]) + kwargs.setdefault("pad_token_type_id", _padding["pad_type_id"]) + kwargs.setdefault("padding_side", _padding["direction"]) + kwargs.setdefault("max_length", _padding["length"]) + kwargs.setdefault("pad_to_multiple_of", _padding["pad_to_multiple_of"]) + + # We call this after having initialized the backend tokenizer because we update it. + super().__init__(**kwargs) + + # The following logic will be replace with a single add_tokens once a fix is pushed to tokenizers + # allows converting a slow -> fast, non-legacy: if the `tokenizer.json` does not have all the added tokens + # uses the information stored in `added_tokens_decoder`. + # this is costly for fast tokenizers as we re-compute the regex again. But not all tokens are added tokens + tokens_to_add = [ + token + for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0]) + if token not in self.added_tokens_decoder + ] + encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add] + # if some of the special tokens are strings, we check if we don't already have a token + tokens_to_add += [ + token for token in self.all_special_tokens_extended if token not in encoder and token not in tokens_to_add + ] + if len(tokens_to_add) > 0: + # super hack: if a token.special is set, tokenizer ignores it for now so FIXME @ArthurZ + # Accumulate added tokens into batches of special/non-special tokens, because calling add_tokens() for + # individual tokens would repeatedly rebuild a trie, which can be slow. + is_last_special = None + tokens = [] + special_tokens = self.all_special_tokens + for token in tokens_to_add: + is_special = ( + (token.special or str(token) in special_tokens) + if isinstance(token, AddedToken) + else str(token) in special_tokens + ) + if is_last_special is None or is_last_special == is_special: + tokens.append(token) + else: + self._add_tokens(tokens, special_tokens=is_last_special) + tokens = [token] + is_last_special = is_special + if tokens: + self._add_tokens(tokens, special_tokens=is_last_special) + + @property + def is_fast(self) -> bool: + return True + + @property + def can_save_slow_tokenizer(self) -> bool: + """ + `bool`: Whether or not the slow tokenizer can be saved. Usually for sentencepiece based slow tokenizer, this + can only be `True` if the original `"sentencepiece.model"` was not deleted. + """ + return True + + @property + def vocab_size(self) -> int: + """ + `int`: Size of the base vocabulary (without the added tokens). + """ + return self._tokenizer.get_vocab_size(with_added_tokens=False) + + def get_vocab(self) -> Dict[str, int]: + return self._tokenizer.get_vocab(with_added_tokens=True) + + @property + def vocab(self) -> Dict[str, int]: + return self.get_vocab() + + @property + def added_tokens_encoder(self) -> Dict[str, int]: + """ + Returns the sorted mapping from string to index. The added tokens encoder is cached for performance + optimisation in `self._added_tokens_encoder` for the slow tokenizers. + """ + return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} + + @property + def added_tokens_decoder(self) -> Dict[int, AddedToken]: + """ + Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. + + Returns: + `Dict[str, int]`: The added tokens. + """ + return self._tokenizer.get_added_tokens_decoder() + + def get_added_vocab(self) -> Dict[str, int]: + """ + Returns the added tokens in the vocabulary as a dictionary of token to index. + + Returns: + `Dict[str, int]`: The added tokens. + """ + return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} + + def __len__(self) -> int: + """ + Size of the full vocabulary with the added tokens. + """ + return self._tokenizer.get_vocab_size(with_added_tokens=True) + + @property + def backend_tokenizer(self) -> TokenizerFast: + """ + `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. + """ + return self._tokenizer + + @property + def decoder(self) -> DecoderFast: + """ + `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. + """ + return self._tokenizer.decoder + + def _convert_encoding( + self, + encoding: EncodingFast, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> Tuple[Dict[str, Any], List[EncodingFast]]: + """ + Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list + of encodings, take care of building a batch from overflowing tokens. + + Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are + lists (overflows) of lists (tokens). + + Output shape: (overflows, sequence length) + """ + if return_token_type_ids is None: + return_token_type_ids = "token_type_ids" in self.model_input_names + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + if return_overflowing_tokens and encoding.overflowing is not None: + encodings = [encoding] + encoding.overflowing + else: + encodings = [encoding] + + encoding_dict = defaultdict(list) + for e in encodings: + encoding_dict["input_ids"].append(e.ids) + + if return_token_type_ids: + encoding_dict["token_type_ids"].append(e.type_ids) + if return_attention_mask: + encoding_dict["attention_mask"].append(e.attention_mask) + if return_special_tokens_mask: + encoding_dict["special_tokens_mask"].append(e.special_tokens_mask) + if return_offsets_mapping: + encoding_dict["offset_mapping"].append(e.offsets) + if return_length: + encoding_dict["length"].append(len(e.ids)) + + return encoding_dict, encodings + + def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: + """ + Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the + vocabulary. + + Args: + tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s). + + Returns: + `int` or `List[int]`: The token id or list of token ids. + """ + if tokens is None: + return None + + if isinstance(tokens, str): + return self._convert_token_to_id_with_added_voc(tokens) + + return [self._convert_token_to_id_with_added_voc(token) for token in tokens] + + def _convert_token_to_id_with_added_voc(self, token: str) -> int: + index = self._tokenizer.token_to_id(token) + if index is None: + return self.unk_token_id + return index + + def _convert_id_to_token(self, index: int) -> Optional[str]: + return self._tokenizer.id_to_token(int(index)) + + def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int: + if special_tokens: + return self._tokenizer.add_special_tokens(new_tokens) + + return self._tokenizer.add_tokens(new_tokens) + + def num_special_tokens_to_add(self, pair: bool = False) -> int: + """ + Returns the number of added tokens when encoding a sequence with special tokens. + + + + This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put + this inside your training loop. + + + + Args: + pair (`bool`, *optional*, defaults to `False`): + Whether the number of added tokens should be computed in the case of a sequence pair or a single + sequence. + + Returns: + `int`: Number of special tokens added to sequences. + """ + return self._tokenizer.num_special_tokens_to_add(pair) + + def convert_ids_to_tokens( + self, ids: Union[int, List[int]], skip_special_tokens: bool = False + ) -> Union[str, List[str]]: + """ + Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and + added tokens. + + Args: + ids (`int` or `List[int]`): + The token id (or token ids) to convert to tokens. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + + Returns: + `str` or `List[str]`: The decoded token(s). + """ + if isinstance(ids, int): + return self._tokenizer.id_to_token(ids) + tokens = [] + for index in ids: + index = int(index) + if skip_special_tokens and index in self.all_special_ids: + continue + tokens.append(self._tokenizer.id_to_token(index)) + return tokens + + def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: + return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens() + + def set_truncation_and_padding( + self, + padding_strategy: PaddingStrategy, + truncation_strategy: TruncationStrategy, + max_length: int, + stride: int, + pad_to_multiple_of: Optional[int], + ): + """ + Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers + library) and restore the tokenizer settings afterwards. + + The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a + padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed + section. + + Args: + padding_strategy ([`~utils.PaddingStrategy`]): + The kind of padding that will be applied to the input + truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]): + The kind of truncation that will be applied to the input + max_length (`int`): + The maximum size of a sequence. + stride (`int`): + The stride to use when handling overflow. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. This is especially useful to enable + the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). + """ + _truncation = self._tokenizer.truncation + _padding = self._tokenizer.padding + # Set truncation and padding on the backend tokenizer + if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE: + if _truncation is not None: + self._tokenizer.no_truncation() + else: + target = { + "max_length": max_length, + "stride": stride, + "strategy": truncation_strategy.value, + "direction": self.truncation_side, + } + + # _truncation might contain more keys that the target `transformers` + # supports. Use only the target keys to trigger `enable_truncation`. + # This should enable this code to works on various `tokenizers` + # targets. + if _truncation is None: + current = None + else: + current = {k: _truncation.get(k, None) for k in target} + + if current != target: + self._tokenizer.enable_truncation(**target) + + if padding_strategy == PaddingStrategy.DO_NOT_PAD: + if _padding is not None: + self._tokenizer.no_padding() + else: + length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None + target = { + "length": length, + "direction": self.padding_side, + "pad_id": self.pad_token_id, + "pad_token": self.pad_token, + "pad_type_id": self.pad_token_type_id, + "pad_to_multiple_of": pad_to_multiple_of, + } + if _padding != target: + self._tokenizer.enable_padding(**target) + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] + ], + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[str] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> BatchEncoding: + if not isinstance(batch_text_or_text_pairs, (tuple, list)): + raise TypeError( + f"batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})" + ) + + # Set the truncation and padding strategy and restore the initial configuration + self.set_truncation_and_padding( + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + ) + + encodings = self._tokenizer.encode_batch( + batch_text_or_text_pairs, + add_special_tokens=add_special_tokens, + is_pretokenized=is_split_into_words, + ) + + # Convert encoding to dict + # `Tokens` has type: Tuple[ + # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], + # List[EncodingFast] + # ] + # with nested dimensions corresponding to batch, overflows, sequence length + tokens_and_encodings = [ + self._convert_encoding( + encoding=encoding, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + ) + for encoding in encodings + ] + + # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension + # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) + # (we say ~ because the number of overflow varies with the example in the batch) + # + # To match each overflowing sample with the original sample in the batch + # we add an overflow_to_sample_mapping array (see below) + sanitized_tokens = {} + for key in tokens_and_encodings[0][0].keys(): + stack = [e for item, _ in tokens_and_encodings for e in item[key]] + sanitized_tokens[key] = stack + sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] + + # If returning overflowing tokens, we need to return a mapping + # from the batch idx to the original sample + if return_overflowing_tokens: + overflow_to_sample_mapping = [] + for i, (toks, _) in enumerate(tokens_and_encodings): + overflow_to_sample_mapping += [i] * len(toks["input_ids"]) + sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping + + for input_ids in sanitized_tokens["input_ids"]: + self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) + return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) + + def _encode_plus( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[bool] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + batched_input = [(text, text_pair)] if text_pair else [text] + batched_output = self._batch_encode_plus( + batched_input, + is_split_into_words=is_split_into_words, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + # Return tensor is None, then we can remove the leading batch axis + # Overflowing tokens are returned as a batch of output so we keep them in this case + if return_tensors is None and not return_overflowing_tokens: + batched_output = BatchEncoding( + { + key: value[0] if len(value) > 0 and isinstance(value[0], list) else value + for key, value in batched_output.items() + }, + batched_output.encodings, + ) + + self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) + + return batched_output + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + return self.backend_tokenizer.decoder.decode(tokens) + + def _decode( + self, + token_ids: Union[int, List[int]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = None, + **kwargs, + ) -> str: + self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) + + if isinstance(token_ids, int): + token_ids = [token_ids] + text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) + + clean_up_tokenization_spaces = ( + clean_up_tokenization_spaces + if clean_up_tokenization_spaces is not None + else self.clean_up_tokenization_spaces + ) + if clean_up_tokenization_spaces: + clean_text = self.clean_up_tokenization(text) + return clean_text + else: + return text + + def _save_pretrained( + self, + save_directory: Union[str, os.PathLike], + file_names: Tuple[str], + legacy_format: Optional[bool] = None, + filename_prefix: Optional[str] = None, + ) -> Tuple[str]: + """ + Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON + file containing {config + vocab + added-tokens}. + """ + save_directory = str(save_directory) + + if self.slow_tokenizer_class is None and legacy_format is True: + raise ValueError( + "Your tokenizer does not have a legacy version defined and therefore cannot register this version. You" + " might consider leaving the legacy_format at `None` or setting it to `False`." + ) + + save_slow = ( + (legacy_format is None or legacy_format is True) + and self.slow_tokenizer_class is not None + and self.can_save_slow_tokenizer + ) + save_fast = legacy_format is None or legacy_format is False + + if save_slow: + added_tokens_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE + ) + # make sure to be foward compatible + added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} + if added_vocab: + with open(added_tokens_file, "w", encoding="utf-8") as f: + out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + f.write(out_str) + + vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) + file_names = file_names + vocab_files + (added_tokens_file,) + + if save_fast: + tokenizer_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_FILE + ) + self.backend_tokenizer.save(tokenizer_file) + file_names = file_names + (tokenizer_file,) + + return file_names + + def train_new_from_iterator( + self, + text_iterator, + vocab_size, + length=None, + new_special_tokens=None, + special_tokens_map=None, + **kwargs, + ): + """ + Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) + as the current one. + + Args: + text_iterator (generator of `List[str]`): + The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts + if you have everything in memory. + vocab_size (`int`): + The size of the vocabulary you want for your tokenizer. + length (`int`, *optional*): + The total number of sequences in the iterator. This is used to provide meaningful progress tracking + new_special_tokens (list of `str` or `AddedToken`, *optional*): + A list of new special tokens to add to the tokenizer you are training. + special_tokens_map (`Dict[str, str]`, *optional*): + If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special + token name to new special token name in this argument. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library. + + Returns: + [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on + `text_iterator`. + + """ + tokenizer_json = json.loads(self._tokenizer.to_str()) + # Remove added tokens for now (uses IDs of tokens) + added_tokens = tokenizer_json.pop("added_tokens") + # Remove post processor for now (uses IDs of tokens) + post_processor = tokenizer_json.pop("post_processor") + + unk_token = None + # Remove vocab + if tokenizer_json["model"]["type"] == "BPE": + tokenizer_json["model"]["vocab"] = {} + tokenizer_json["model"]["merges"] = [] + elif tokenizer_json["model"]["type"] == "Unigram": + if tokenizer_json["model"]["unk_id"] is not None: + unk_id = tokenizer_json["model"]["unk_id"] + unk_token = tokenizer_json["model"]["vocab"][unk_id][0] + if special_tokens_map is not None and unk_token in special_tokens_map: + unk_token = special_tokens_map[unk_token] + tokenizer_json["model"]["unk_id"] = 0 + tokenizer_json["model"]["vocab"] = [[unk_token, 0.0]] + elif tokenizer_json["model"]["type"] in ["WordLevel", "WordPiece"]: + tokenizer_json["model"]["vocab"] = {} + else: + raise ValueError( + f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) " + "only BPE, Unigram, WordLevel and WordPiece." + ) + + if ( + special_tokens_map is not None + and "unk_token" in tokenizer_json["model"] + and tokenizer_json["model"]["unk_token"] in special_tokens_map + ): + tokenizer_json["model"]["unk_token"] = special_tokens_map[tokenizer_json["model"]["unk_token"]] + + tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json)) + + # Get the special tokens from the current tokenizer if none are specified. + special_tokens = [] + for added_token in added_tokens: + special = added_token.pop("special", None) + _ = added_token.pop("id", None) + if tokenizer_json["model"]["type"] != "Unigram" and not special: + continue + if special_tokens_map is not None and added_token["content"] in special_tokens_map: + added_token["content"] = special_tokens_map[added_token["content"]] + special_tokens.append(AddedToken(**added_token)) + + if new_special_tokens is not None: + special_tokens.extend(new_special_tokens) + + # Trainer needs to know the end of word / continuing subword thingies in BPE + if ( + tokenizer_json["model"]["type"] == "BPE" + and "continuing_subword_prefix" not in kwargs + and tokenizer_json["model"]["continuing_subword_prefix"] is not None + ): + kwargs["continuing_subword_prefix"] = tokenizer_json["model"]["continuing_subword_prefix"] + if ( + tokenizer_json["model"]["type"] == "BPE" + and "end_of_word_suffix" not in kwargs + and tokenizer_json["model"]["end_of_word_suffix"] is not None + ): + kwargs["end_of_word_suffix"] = tokenizer_json["model"]["end_of_word_suffix"] + if tokenizer_json["model"]["type"] == "Unigram" and unk_token is not None: + kwargs["unk_token"] = unk_token + if tokenizer_json["pre_tokenizer"] is not None and tokenizer_json["pre_tokenizer"]["type"] == "ByteLevel": + kwargs["initial_alphabet"] = pre_tokenizers_fast.ByteLevel.alphabet() + + trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json["model"]["type"]] + trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs) + tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer) + + if post_processor is not None: + trained_tokenizer_json = json.loads(tokenizer.to_str()) + # Almost done, we just have to adjust the token IDs in the post processor + if "special_tokens" in post_processor: + for key in post_processor["special_tokens"]: + tokens = post_processor["special_tokens"][key]["tokens"] + if special_tokens_map is not None: + tokens = [special_tokens_map.get(token, token) for token in tokens] + post_processor["special_tokens"][key]["tokens"] = tokens + post_processor["special_tokens"][key]["ids"] = [tokenizer.token_to_id(token) for token in tokens] + + for special_token in ["cls", "sep"]: + if special_token in post_processor: + token, _ = post_processor[special_token] + if special_tokens_map is not None and token in special_tokens_map: + token = special_tokens_map[token] + token_id = tokenizer.token_to_id(token) + post_processor[special_token] = [token, token_id] + + trained_tokenizer_json["post_processor"] = post_processor + tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json)) + + kwargs = self.init_kwargs.copy() + # Map pad/cls/mask token at the Transformers level + special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() + special_tokens_list.remove("additional_special_tokens") + for token in special_tokens_list: + # Get the private one to avoid unnecessary warnings. + if getattr(self, f"_{token}") is not None: + special_token = getattr(self, token) + if special_tokens_map is not None and special_token in special_tokens_map: + special_token = special_tokens_map[special_token] + + special_token_full = getattr(self, f"_{token}") + if isinstance(special_token_full, AddedToken): + # Create an added token with the same parameters except the content + kwargs[token] = AddedToken( + special_token, + single_word=special_token_full.single_word, + lstrip=special_token_full.lstrip, + rstrip=special_token_full.rstrip, + normalized=special_token_full.normalized, + special=True, + ) + else: + kwargs[token] = special_token + + additional_special_tokens = self.additional_special_tokens + if new_special_tokens is not None: + additional_special_tokens.extend(new_special_tokens) + if len(additional_special_tokens) > 0: + kwargs["additional_special_tokens"] = additional_special_tokens + + return self.__class__(tokenizer_object=tokenizer, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/trainer.py b/env-llmeval/lib/python3.10/site-packages/transformers/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..ac014c672e73e31826f4a8cccc0a4334c530e828 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/trainer.py @@ -0,0 +1,4325 @@ +# coding=utf-8 +# Copyright 2020-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. +""" + +import contextlib +import copy +import functools +import glob +import importlib.metadata +import inspect +import math +import os +import random +import re +import shutil +import sys +import tempfile +import time +import warnings +from collections.abc import Mapping +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + + +# Integrations must be imported before ML frameworks: +# isort: off +from .integrations import ( + get_reporting_integration_callbacks, + hp_params, +) + +# isort: on + +import huggingface_hub.utils as hf_hub_utils +import numpy as np +import torch +import torch.distributed as dist +from huggingface_hub import ModelCard, create_repo, upload_folder +from packaging import version +from torch import nn +from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler + +from . import __version__ +from .configuration_utils import PretrainedConfig +from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator +from .debug_utils import DebugOption, DebugUnderflowOverflow +from .hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS, default_hp_search_backend +from .integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint, is_deepspeed_available +from .integrations.tpu import tpu_spmd_dataloader +from .modelcard import TrainingSummary +from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model +from .models.auto.modeling_auto import ( + MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + MODEL_MAPPING_NAMES, +) +from .optimization import Adafactor, get_scheduler +from .pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13 +from .tokenization_utils_base import PreTrainedTokenizerBase +from .trainer_callback import ( + CallbackHandler, + DefaultFlowCallback, + PrinterCallback, + ProgressCallback, + TrainerCallback, + TrainerControl, + TrainerState, +) +from .trainer_pt_utils import ( + DistributedTensorGatherer, + IterableDatasetShard, + LabelSmoother, + LayerWiseDummyOptimizer, + LengthGroupedSampler, + SequentialDistributedSampler, + distributed_broadcast_scalars, + distributed_concat, + find_batch_size, + get_dataloader_sampler, + get_model_param_count, + get_module_class_from_name, + get_parameter_names, + nested_concat, + nested_detach, + nested_numpify, + nested_xla_mesh_reduce, + reissue_pt_warnings, + remove_dummy_checkpoint, +) +from .trainer_utils import ( + PREFIX_CHECKPOINT_DIR, + BestRun, + EvalLoopOutput, + EvalPrediction, + HPSearchBackend, + HubStrategy, + IntervalStrategy, + PredictionOutput, + RemoveColumnsCollator, + TrainerMemoryTracker, + TrainOutput, + check_target_module_exists, + default_compute_objective, + denumpify_detensorize, + enable_full_determinism, + find_executable_batch_size, + get_last_checkpoint, + has_length, + neftune_post_forward_hook, + number_of_arguments, + seed_worker, + set_seed, + speed_metrics, +) +from .training_args import OptimizerNames, ParallelMode, TrainingArguments +from .utils import ( + ADAPTER_CONFIG_NAME, + ADAPTER_SAFE_WEIGHTS_NAME, + ADAPTER_WEIGHTS_NAME, + CONFIG_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + PushInProgress, + PushToHubMixin, + can_return_loss, + find_labels, + is_accelerate_available, + is_apex_available, + is_bitsandbytes_available, + is_datasets_available, + is_galore_torch_available, + is_in_notebook, + is_ipex_available, + is_peft_available, + is_safetensors_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_torch_compile_available, + is_torch_neuroncore_available, + is_torch_npu_available, + is_torch_xla_available, + logging, + strtobool, +) +from .utils.quantization_config import QuantizationMethod + + +DEFAULT_CALLBACKS = [DefaultFlowCallback] +DEFAULT_PROGRESS_CALLBACK = ProgressCallback + +if is_in_notebook(): + from .utils.notebook import NotebookProgressCallback + + DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback + +if is_apex_available(): + from apex import amp + +if is_datasets_available(): + import datasets + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + import torch_xla.debug.metrics as met + import torch_xla.distributed.spmd as xs + import torch_xla.runtime as xr + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") + + from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat +else: + IS_SAGEMAKER_MP_POST_1_10 = False + + +if is_safetensors_available(): + import safetensors.torch + +if is_peft_available(): + from peft import PeftModel + + +if is_accelerate_available(): + from accelerate import Accelerator, skip_first_batches + from accelerate import __version__ as accelerate_version + from accelerate.utils import ( + DistributedDataParallelKwargs, + DistributedType, + GradientAccumulationPlugin, + load_fsdp_model, + load_fsdp_optimizer, + save_fsdp_model, + save_fsdp_optimizer, + ) + + DATA_SAMPLERS = [RandomSampler] + if version.parse(accelerate_version) > version.parse("0.23.0"): + from accelerate.data_loader import SeedableRandomSampler + + DATA_SAMPLERS += [SeedableRandomSampler] + + if is_deepspeed_available(): + from accelerate.utils import DeepSpeedSchedulerWrapper + + +def _is_peft_model(model): + if is_peft_available(): + classes_to_check = (PeftModel,) if is_peft_available() else () + # Here we also check if the model is an instance of `PeftMixedModel` introduced in peft>=0.7.0: https://github.com/huggingface/transformers/pull/28321 + if version.parse(importlib.metadata.version("peft")) >= version.parse("0.7.0"): + from peft import PeftMixedModel + + classes_to_check = (*classes_to_check, PeftMixedModel) + return isinstance(model, classes_to_check) + return False + + +def _get_fsdp_ckpt_kwargs(): + # TODO: @AjayP13, @younesbelkada replace this check with version check at the next `accelerate` release + if is_accelerate_available() and "adapter_only" in list(inspect.signature(save_fsdp_model).parameters): + return {"adapter_only": True} + else: + return {} + + +if TYPE_CHECKING: + import optuna + + +logger = logging.get_logger(__name__) + + +# Name of the files used for checkpointing +TRAINING_ARGS_NAME = "training_args.bin" +TRAINER_STATE_NAME = "trainer_state.json" +OPTIMIZER_NAME = "optimizer.pt" +OPTIMIZER_NAME_BIN = "optimizer.bin" +SCHEDULER_NAME = "scheduler.pt" +SCALER_NAME = "scaler.pt" +FSDP_MODEL_NAME = "pytorch_model_fsdp" + + +class Trainer: + """ + Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. + + Args: + model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): + The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. + + + + [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use + your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers + models. + + + + args ([`TrainingArguments`], *optional*): + The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the + `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. + data_collator (`DataCollator`, *optional*): + The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will + default to [`default_data_collator`] if no `tokenizer` is provided, an instance of + [`DataCollatorWithPadding`] otherwise. + train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*): + The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. + + Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a + distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a + `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will + manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally + sets the seed of the RNGs used. + eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*): + The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each + dataset prepending the dictionary key to the metric name. + tokenizer ([`PreTrainedTokenizerBase`], *optional*): + The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the + maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an + interrupted training or reuse the fine-tuned model. + model_init (`Callable[[], PreTrainedModel]`, *optional*): + A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start + from a new instance of the model as given by this function. + + The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to + be able to choose different architectures according to hyper parameters (such as layer count, sizes of + inner layers, dropout probabilities etc). + compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): + The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return + a dictionary string to metric values. + callbacks (List of [`TrainerCallback`], *optional*): + A list of callbacks to customize the training loop. Will add those to the list of default callbacks + detailed in [here](callback). + + If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. + optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`): + A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your + model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. + preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): + A function that preprocess the logits right before caching them at each evaluation step. Must take two + tensors, the logits and the labels, and return the logits once processed as desired. The modifications made + by this function will be reflected in the predictions received by `compute_metrics`. + + Note that the labels (second parameter) will be `None` if the dataset does not have them. + + Important attributes: + + - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] + subclass. + - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the + original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, + the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner + model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. + - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from + data parallelism, this means some of the model layers are split on different GPUs). + - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set + to `False` if model parallel or deepspeed is used, or if the default + `TrainingArguments.place_model_on_device` is overridden to return `False` . + - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while + in `train`) + + """ + + # Those are used as methods of the Trainer in examples. + from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state + + def __init__( + self, + model: Union[PreTrainedModel, nn.Module] = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + model_init: Optional[Callable[[], PreTrainedModel]] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), + preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + ): + if args is None: + output_dir = "tmp_trainer" + logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") + args = TrainingArguments(output_dir=output_dir) + self.args = args + # Seed must be set before instantiating the model when using model + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.hp_name = None + self.deepspeed = None + self.is_in_train = False + + self.create_accelerator_and_postprocess() + + # memory metrics - must set up as early as possible + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) + self._memory_tracker.start() + + # set the correct log level depending on the node + log_level = args.get_process_log_level() + logging.set_verbosity(log_level) + + # force device and distributed setup init explicitly + args._setup_devices + + if model is None: + if model_init is not None: + self.model_init = model_init + model = self.call_model_init() + else: + raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") + else: + if model_init is not None: + warnings.warn( + "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" + " overwrite your model when calling the `train` method. This will become a fatal error in the next" + " release.", + FutureWarning, + ) + self.model_init = model_init + + if model.__class__.__name__ in MODEL_MAPPING_NAMES: + raise ValueError( + f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " + "computes hidden states and does not accept any labels. You should choose a model with a head " + "suitable for your task like any of the `AutoModelForXxx` listed at " + "https://huggingface.co/docs/transformers/model_doc/auto" + ) + + if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel: + self.is_model_parallel = True + else: + self.is_model_parallel = False + + if getattr(model, "hf_device_map", None) is not None: + devices = [device for device in set(model.hf_device_map.values()) if device not in ["cpu", "disk"]] + if len(devices) > 1: + self.is_model_parallel = True + elif len(devices) == 1: + self.is_model_parallel = self.args.device != torch.device(devices[0]) + else: + self.is_model_parallel = False + + # warn users + if self.is_model_parallel: + logger.info( + "You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set" + " to `True` to avoid any unexpected behavior such as device placement mismatching." + ) + + _is_quantized_and_base_model = getattr(model, "is_quantized", False) and not getattr( + model, "_hf_peft_config_loaded", False + ) + _quantization_method_supports_training = ( + getattr(model, "hf_quantizer", None) is not None and model.hf_quantizer.is_trainable + ) + + # Filter out quantized + compiled models + if _is_quantized_and_base_model and hasattr(model, "_orig_mod"): + raise ValueError( + "You cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFT" + ) + + # At this stage the model is already loaded + if _is_quantized_and_base_model and not _is_peft_model(model): + raise ValueError( + "You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of" + " the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft" + " for more details" + ) + elif _is_quantized_and_base_model and not _quantization_method_supports_training: + raise ValueError( + f"The model you are trying to fine-tune is quantized with {model.hf_quantizer.quantization_config.quant_method}" + " but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers" + f" to request the support for training support for {model.hf_quantizer.quantization_config.quant_method}" + ) + + self.is_fsdp_xla_enabled = args.fsdp_config["xla"] + if len(args.fsdp) > 0: + if self.is_deepspeed_enabled: + raise ValueError( + "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if not args.fsdp_config["xla"] and args.parallel_mode != ParallelMode.DISTRIBUTED: + raise ValueError("Using fsdp only works in distributed training.") + + # one place to sort out whether to place the model on device or not + # postpone switching model to cuda when: + # 1. MP - since we are trying to fit a much bigger than 1 gpu model + # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway, + # and we only use deepspeed for training at the moment + # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first + # 4. FSDP - same as MP + self.place_model_on_device = args.place_model_on_device + if ( + self.is_model_parallel + or self.is_deepspeed_enabled + or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) + or self.is_fsdp_xla_enabled + or self.is_fsdp_enabled + ): + self.place_model_on_device = False + + default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) + self.data_collator = data_collator if data_collator is not None else default_collator + self.train_dataset = train_dataset + self.eval_dataset = eval_dataset + self.tokenizer = tokenizer + + # Bnb Quantized models doesn't support `.to` operation. + if ( + self.place_model_on_device + and not getattr(model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES + ): + self._move_model_to_device(model, args.device) + + # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs + if self.is_model_parallel: + self.args._n_gpu = 1 + + # later use `self.model is self.model_wrapped` to check if it's wrapped or not + self.model_wrapped = model + self.model = model + + self.neftune_noise_alpha = args.neftune_noise_alpha + + self.compute_metrics = compute_metrics + self.preprocess_logits_for_metrics = preprocess_logits_for_metrics + self.optimizer, self.lr_scheduler = optimizers + if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): + raise RuntimeError( + "Passing a `model_init` is incompatible with providing the `optimizers` argument. " + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + if is_torch_xla_available() and self.optimizer is not None: + for param in self.model.parameters(): + model_device = param.device + break + for param_group in self.optimizer.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + if model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you" + " created an optimizer around your model **before** putting on the device and passing it to the" + " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" + " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." + ) + if (self.is_deepspeed_enabled or self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and ( + self.optimizer is not None or self.lr_scheduler is not None + ): + raise RuntimeError( + "Passing `optimizers` is not allowed if Deepspeed or PyTorch FSDP is enabled. " + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) + callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks + self.callback_handler = CallbackHandler( + callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler + ) + self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) + + # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. + self._loggers_initialized = False + + # Create distant repo and output directory if needed + self.hub_model_id = None + if self.args.push_to_hub: + self.init_hf_repo() + if self.args.should_save: + os.makedirs(self.args.output_dir, exist_ok=True) + + if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): + raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") + + if args.max_steps > 0: + logger.info("max_steps is given, it will override any value given in num_train_epochs") + + if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: + raise ValueError( + "The train_dataset does not implement __len__, max_steps has to be specified. " + "The number of steps needs to be known in advance for the learning rate scheduler." + ) + + if ( + train_dataset is not None + and isinstance(train_dataset, torch.utils.data.IterableDataset) + and args.group_by_length + ): + raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") + + self._signature_columns = None + + # Mixed precision setup + self.use_apex = False + self.use_cpu_amp = False + + # Mixed precision setup for SageMaker Model Parallel + if is_sagemaker_mp_enabled(): + # BF16 + model parallelism in SageMaker: currently not supported, raise an error + if args.bf16: + raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") + + if IS_SAGEMAKER_MP_POST_1_10: + # When there's mismatch between SMP config and trainer argument, use SMP config as truth + if args.fp16 != smp.state.cfg.fp16: + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " + f"but FP16 provided in trainer argument is {args.fp16}, " + f"setting to {smp.state.cfg.fp16}" + ) + args.fp16 = smp.state.cfg.fp16 + else: + # smp < 1.10 does not support fp16 in trainer. + if hasattr(smp.state.cfg, "fp16"): + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " + "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." + ) + if (args.fp16 or args.bf16) and args.half_precision_backend == "auto": + if args.device == torch.device("cpu"): + if args.fp16: + raise ValueError("Tried to use `fp16` but it is not supported on cpu") + else: + args.half_precision_backend = "cpu_amp" + logger.info(f"Using {args.half_precision_backend} half precision backend") + + if (args.fp16 or args.bf16) and not (self.is_deepspeed_enabled or is_sagemaker_mp_enabled()): + # deepspeed and SageMaker Model Parallel manage their own half precision + if args.half_precision_backend == "cpu_amp": + self.use_cpu_amp = True + self.amp_dtype = torch.bfloat16 + elif args.half_precision_backend == "apex": + if not is_apex_available(): + raise ImportError( + "Using FP16 with APEX but APEX is not installed, please refer to" + " https://www.github.com/nvidia/apex." + ) + self.use_apex = True + + # Label smoothing + if self.args.label_smoothing_factor != 0: + self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) + else: + self.label_smoother = None + + self.state = TrainerState( + is_local_process_zero=self.is_local_process_zero(), + is_world_process_zero=self.is_world_process_zero(), + ) + + self.control = TrainerControl() + # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then + # returned to 0 every time flos need to be logged + self.current_flos = 0 + self.hp_search_backend = None + default_label_names = find_labels(self.model.__class__) + self.label_names = default_label_names if self.args.label_names is None else self.args.label_names + self.can_return_loss = can_return_loss(self.model.__class__) + self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) + + # Internal variables to help with automatic batch size reduction + self._train_batch_size = args.train_batch_size + self._created_lr_scheduler = False + + # very last + self._memory_tracker.stop_and_update_metrics() + + # torch.compile + if args.torch_compile and not is_torch_compile_available(): + raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") + + self.is_fsdp_xla_v2_enabled = args.fsdp_config.get("xla_fsdp_v2", False) + if self.is_fsdp_xla_v2_enabled: + # Prepare the SPMD mesh that is going to be used by the data loader and the FSDPv2 wrapper. + # Tensor axis is just a placeholder where it will not be used in FSDPv2. + num_devices = xr.global_runtime_device_count() + xs.set_global_mesh(xs.Mesh(np.array(range(num_devices)), (num_devices, 1), axis_names=("fsdp", "tensor"))) + + def _activate_neftune(self, model): + r""" + Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: + https://arxiv.org/abs/2310.05914 + """ + unwrapped_model = unwrap_model(model) + + if _is_peft_model(unwrapped_model): + embeddings = unwrapped_model.base_model.model.get_input_embeddings() + else: + embeddings = unwrapped_model.get_input_embeddings() + + del unwrapped_model + + embeddings.neftune_noise_alpha = self.neftune_noise_alpha + hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) + self.neftune_hook_handle = hook_handle + return model + + def _deactivate_neftune(self, model): + """ + Deactivates the neftune method. Make sure to call `_activate_neftune` first. + """ + if not hasattr(self, "neftune_hook_handle"): + raise ValueError("Neftune is not activated make sure to call `trainer._activate_neftune()` first") + + unwrapped_model = unwrap_model(model) + + if _is_peft_model(unwrapped_model): + embeddings = unwrapped_model.base_model.model.get_input_embeddings() + else: + embeddings = unwrapped_model.get_input_embeddings() + + self.neftune_hook_handle.remove() + del embeddings.neftune_noise_alpha, unwrapped_model + + def add_callback(self, callback): + """ + Add a callback to the current list of [`~transformers.TrainerCallback`]. + + Args: + callback (`type` or [`~transformers.TrainerCallback`]): + A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the + first case, will instantiate a member of that class. + """ + self.callback_handler.add_callback(callback) + + def pop_callback(self, callback): + """ + Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it. + + If the callback is not found, returns `None` (and no error is raised). + + Args: + callback (`type` or [`~transformers.TrainerCallback`]): + A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the + first case, will pop the first member of that class found in the list of callbacks. + + Returns: + [`~transformers.TrainerCallback`]: The callback removed, if found. + """ + return self.callback_handler.pop_callback(callback) + + def remove_callback(self, callback): + """ + Remove a callback from the current list of [`~transformers.TrainerCallback`]. + + Args: + callback (`type` or [`~transformers.TrainerCallback`]): + A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the + first case, will remove the first member of that class found in the list of callbacks. + """ + self.callback_handler.remove_callback(callback) + + def _move_model_to_device(self, model, device): + model = model.to(device) + # Moving a model to an XLA device disconnects the tied weights, so we have to retie them. + if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): + model.tie_weights() + + def _set_signature_columns_if_needed(self): + if self._signature_columns is None: + # Inspect model forward signature to keep only the arguments it accepts. + model_to_inspect = self.model + if _is_peft_model(self.model): + if hasattr(self.model, "get_base_model"): + model_to_inspect = self.model.get_base_model() + else: + # PeftMixedModel do not provide a `get_base_model` method + model_to_inspect = self.model.base_model.model + signature = inspect.signature(model_to_inspect.forward) + self._signature_columns = list(signature.parameters.keys()) + # Labels may be named label or label_ids, the default data collator handles that. + self._signature_columns += list(set(["label", "label_ids"] + self.label_names)) + + def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): + if not self.args.remove_unused_columns: + return dataset + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + ignored_columns = list(set(dataset.column_names) - set(signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if description is None else f"in the {description} set" + logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " + " you can safely ignore this message." + ) + + columns = [k for k in signature_columns if k in dataset.column_names] + + if version.parse(datasets.__version__) < version.parse("1.4.0"): + dataset.set_format( + type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] + ) + return dataset + else: + return dataset.remove_columns(ignored_columns) + + def _get_collator_with_removed_columns( + self, data_collator: Callable, description: Optional[str] = None + ) -> Callable: + """Wrap the data collator in a callable removing unused columns.""" + if not self.args.remove_unused_columns: + return data_collator + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + remove_columns_collator = RemoveColumnsCollator( + data_collator=data_collator, + signature_columns=signature_columns, + logger=logger, + description=description, + model_name=self.model.__class__.__name__, + ) + return remove_columns_collator + + def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: + if self.train_dataset is None or not has_length(self.train_dataset): + return None + + # Build the sampler. + if self.args.group_by_length: + if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset): + lengths = ( + self.train_dataset[self.args.length_column_name] + if self.args.length_column_name in self.train_dataset.column_names + else None + ) + else: + lengths = None + model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None + return LengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + lengths=lengths, + model_input_name=model_input_name, + ) + + else: + return RandomSampler(self.train_dataset) + + def get_train_dataloader(self) -> DataLoader: + """ + Returns the training [`~torch.utils.data.DataLoader`]. + + Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed + training if necessary) otherwise. + + Subclass and override this method if you want to inject some custom behavior. + """ + if self.train_dataset is None: + raise ValueError("Trainer: training requires a train_dataset.") + + train_dataset = self.train_dataset + data_collator = self.data_collator + if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): + train_dataset = self._remove_unused_columns(train_dataset, description="training") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="training") + + dataloader_params = { + "batch_size": self._train_batch_size, + "collate_fn": data_collator, + "num_workers": self.args.dataloader_num_workers, + "pin_memory": self.args.dataloader_pin_memory, + "persistent_workers": self.args.dataloader_persistent_workers, + } + + if not isinstance(train_dataset, torch.utils.data.IterableDataset): + dataloader_params["sampler"] = self._get_train_sampler() + dataloader_params["drop_last"] = self.args.dataloader_drop_last + dataloader_params["worker_init_fn"] = seed_worker + dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor + + return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) + + def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: + # Deprecated code + if self.args.use_legacy_prediction_loop: + if is_torch_xla_available(): + return SequentialDistributedSampler( + eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() + ) + elif is_sagemaker_mp_enabled(): + return SequentialDistributedSampler( + eval_dataset, + num_replicas=smp.dp_size(), + rank=smp.dp_rank(), + batch_size=self.args.per_device_eval_batch_size, + ) + else: + return SequentialSampler(eval_dataset) + + if self.args.world_size <= 1: + return SequentialSampler(eval_dataset) + else: + return None + + def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: + """ + Returns the evaluation [`~torch.utils.data.DataLoader`]. + + Subclass and override this method if you want to inject some custom behavior. + + Args: + eval_dataset (`torch.utils.data.Dataset`, *optional*): + If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted + by the `model.forward()` method are automatically removed. It must implement `__len__`. + """ + if eval_dataset is None and self.eval_dataset is None: + raise ValueError("Trainer: evaluation requires an eval_dataset.") + + # If we have persistent workers, don't do a fork bomb especially as eval datasets + # don't change during training + if hasattr(self, "_eval_dataloader") and self.args.dataloader_persistent_workers: + return self.accelerator.prepare(self._eval_dataloader) + eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset + data_collator = self.data_collator + + if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): + eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation") + + dataloader_params = { + "batch_size": self.args.eval_batch_size, + "collate_fn": data_collator, + "num_workers": self.args.dataloader_num_workers, + "pin_memory": self.args.dataloader_pin_memory, + "persistent_workers": self.args.dataloader_persistent_workers, + } + + if not isinstance(eval_dataset, torch.utils.data.IterableDataset): + dataloader_params["sampler"] = self._get_eval_sampler(eval_dataset) + dataloader_params["drop_last"] = self.args.dataloader_drop_last + dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor + + # accelerator.free_memory() will destroy the references, so + # we need to store the non-prepared version + eval_dataloader = DataLoader(eval_dataset, **dataloader_params) + if self.args.dataloader_persistent_workers: + self._eval_dataloader = eval_dataloader + + return self.accelerator.prepare(eval_dataloader) + + def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: + """ + Returns the test [`~torch.utils.data.DataLoader`]. + + Subclass and override this method if you want to inject some custom behavior. + + Args: + test_dataset (`torch.utils.data.Dataset`, *optional*): + The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. It must implement `__len__`. + """ + data_collator = self.data_collator + + if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): + test_dataset = self._remove_unused_columns(test_dataset, description="test") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="test") + + dataloader_params = { + "batch_size": self.args.eval_batch_size, + "collate_fn": data_collator, + "num_workers": self.args.dataloader_num_workers, + "pin_memory": self.args.dataloader_pin_memory, + "persistent_workers": self.args.dataloader_persistent_workers, + } + + if not isinstance(test_dataset, torch.utils.data.IterableDataset): + dataloader_params["sampler"] = self._get_eval_sampler(test_dataset) + dataloader_params["drop_last"] = self.args.dataloader_drop_last + dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor + + # We use the same batch_size as for eval. + return self.accelerator.prepare(DataLoader(test_dataset, **dataloader_params)) + + def create_optimizer_and_scheduler(self, num_training_steps: int): + """ + Setup the optimizer and the learning rate scheduler. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or + `create_scheduler`) in a subclass. + """ + self.create_optimizer() + if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: + # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer + optimizer = self.optimizer.optimizer + else: + optimizer = self.optimizer + self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) + + def get_decay_parameter_names(self, model) -> List[str]: + """ + Get all parameter names that weight decay will be applied to + + Note that some models implement their own layernorm instead of calling nn.LayerNorm, weight decay could still + apply to those modules since this function only filter out instance of nn.LayerNorm + """ + decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + return decay_parameters + + def create_optimizer(self): + """ + Setup the optimizer. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method in a subclass. + """ + opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + + if self.optimizer is None: + decay_parameters = self.get_decay_parameter_names(opt_model) + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) + ], + "weight_decay": self.args.weight_decay, + }, + { + "params": [ + p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) + ], + "weight_decay": 0.0, + }, + ] + + optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args, opt_model) + + # Overwrite `params` in case it's created by `get_optimizer_cls_and_kwargs` + # e.g. for GaLore optimizer. + if "params" in optimizer_kwargs: + optimizer_grouped_parameters = optimizer_kwargs.pop("params") + + # For layer-wise dummy optimizers we overwrite optimizer_grouped_parameters with `optimizer_dict` + # to avoid arguments conflicts. + if "optimizer_dict" in optimizer_kwargs: + optimizer_grouped_parameters = optimizer_kwargs.pop("optimizer_dict") + + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes + + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + logger.info(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + logger.info(f"skipped: {skipped/2**20}M params") + + if is_sagemaker_mp_enabled(): + self.optimizer = smp.DistributedOptimizer(self.optimizer) + + return self.optimizer + + @staticmethod + def get_optimizer_cls_and_kwargs( + args: TrainingArguments, model: Optional[PreTrainedModel] = None + ) -> Tuple[Any, Any]: + """ + Returns the optimizer class and optimizer parameters based on the training arguments. + + Args: + args (`transformers.training_args.TrainingArguments`): + The training arguments for the training session. + + """ + + # parse args.optim_args + optim_args = {} + if args.optim_args: + for mapping in args.optim_args.replace(" ", "").split(","): + key, value = mapping.split("=") + optim_args[key] = value + + optimizer_kwargs = {"lr": args.learning_rate} + + adam_kwargs = { + "betas": (args.adam_beta1, args.adam_beta2), + "eps": args.adam_epsilon, + } + if args.optim == OptimizerNames.ADAFACTOR: + optimizer_cls = Adafactor + optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) + elif args.optim == OptimizerNames.ADAMW_HF: + from .optimization import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: + from torch.optim import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: + optimizer_kwargs.update({"fused": True}) + elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: + try: + from torch_xla.amp.syncfree import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") + elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED: + try: + from torch_npu.optim import NpuFusedAdamW + + optimizer_cls = NpuFusedAdamW + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer failed to import FusedAdamW from torch_npu.") + elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: + try: + from apex.optimizers import FusedAdam + + optimizer_cls = FusedAdam + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") + elif args.optim in [ + OptimizerNames.ADAMW_BNB, + OptimizerNames.ADAMW_8BIT, + OptimizerNames.PAGED_ADAMW, + OptimizerNames.PAGED_ADAMW_8BIT, + OptimizerNames.LION, + OptimizerNames.LION_8BIT, + OptimizerNames.PAGED_LION, + OptimizerNames.PAGED_LION_8BIT, + OptimizerNames.RMSPROP_BNB, + OptimizerNames.RMSPROP_8BIT, + OptimizerNames.RMSPROP_32BIT, + ]: + try: + from bitsandbytes.optim import AdamW, Lion, RMSprop + + is_paged = False + optim_bits = 32 + optimizer_cls = None + additional_optim_kwargs = adam_kwargs + if "paged" in args.optim: + is_paged = True + if "8bit" in args.optim: + optim_bits = 8 + if "adam" in args.optim: + optimizer_cls = AdamW + elif "lion" in args.optim: + optimizer_cls = Lion + additional_optim_kwargs = {"betas": (args.adam_beta1, args.adam_beta2)} + elif "rmsprop" in args.optim: + optimizer_cls = RMSprop + # Above we pass all `adam_kwargs` to the optimizer, here + # we only pass `optim_args` which can be passed by the user. + additional_optim_kwargs = optim_args + + bnb_kwargs = {"optim_bits": optim_bits} + if "rmsprop" not in args.optim: + bnb_kwargs["is_paged"] = is_paged + + optimizer_kwargs.update(additional_optim_kwargs) + optimizer_kwargs.update(bnb_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate bnb optimizer but bnb is not installed!") + if is_bitsandbytes_available() and version.parse( + importlib.metadata.version("bitsandbytes") + ) < version.parse("0.41.1"): + logger.warning( + "You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. " + "It is recommended to update your version as a major bug has been fixed in 8-bit optimizers." + ) + elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: + try: + from torchdistx.optimizers import AnyPrecisionAdamW + + optimizer_cls = AnyPrecisionAdamW + optimizer_kwargs.update(adam_kwargs) + + # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. + optimizer_kwargs.update( + { + "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), + "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), + "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), + "compensation_buffer_dtype": getattr( + torch, optim_args.get("compensation_buffer_dtype", "bfloat16") + ), + } + ) + except ImportError: + raise ValueError("Please install https://github.com/pytorch/torchdistx") + elif args.optim == OptimizerNames.SGD: + optimizer_cls = torch.optim.SGD + elif args.optim == OptimizerNames.ADAGRAD: + optimizer_cls = torch.optim.Adagrad + elif args.optim == OptimizerNames.RMSPROP: + optimizer_cls = torch.optim.RMSprop + elif args.optim in [ + OptimizerNames.GALORE_ADAMW, + OptimizerNames.GALORE_ADAMW_8BIT, + OptimizerNames.GALORE_ADAFACTOR, + OptimizerNames.GALORE_ADAMW_LAYERWISE, + OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE, + OptimizerNames.GALORE_ADAFACTOR_LAYERWISE, + ]: + if not is_galore_torch_available(): + raise ImportError( + "You need to install `galore_torch` in order to use GaLore optimizers" + " install it with `pip install git+https://github.com/jiaweizzhao/GaLore`" + ) + from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit + + is_layerwise = args.optim.lower().endswith("layerwise") + if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED: + raise NotImplementedError("Layer-wise GaLore does not support DDP at this time") + + optimizer_mapping = { + OptimizerNames.GALORE_ADAMW: GaLoreAdamW, + OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit, + OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor, + OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW, + OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit, + OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor, + } + + optimizer_cls = optimizer_mapping[args.optim] + + if args.optim_target_modules is None: + raise ValueError( + "You need to define a `optim_target_modules` in order to properly use GaLore optimizers" + ) + + if not isinstance(args.optim_target_modules, (list, str)): + raise ValueError( + f"`optim_target_modules` has to be a list of strings, a string corresponding to a regex, or a specific module or 'all-linear', you passed {args.optim_target_modules}" + ) + + if model is None: + raise ValueError("You need to pass a model in order to correctly initialize a GaLore optimizer.") + + logger.warning( + "Activated GaLoRE fine-tuning, depending on your model size and hardware, the training might take a while before starting. Please be patient !" + ) + + all_linear = ( + isinstance(args.optim_target_modules, str) + and args.optim_target_modules.replace("_", "-") == "all-linear" + ) + + galore_params = [] + galore_params_names = [] + for module_name, module in model.named_modules(): + target_module_exists, is_regex = check_target_module_exists( + args.optim_target_modules, module_name, return_is_regex=True + ) + + if not isinstance(module, nn.Linear): + # Warn in case we match but it's not a linear layer + if target_module_exists and not is_regex: + logger.warning( + f"{module_name} has been matched but ignored as GaLore only supports linear layers. Please double check your `optim_target_modules`!" + ) + + continue + + if not target_module_exists and not all_linear: + continue + + galore_params.append(module.weight) + galore_params_names.append(module_name + ".weight") + + if len(galore_params) == 0: + raise ValueError( + f"None of the target modules were found! ({args.optim_target_modules}). Please make sure to pass a valid `target_modules`." + ) + + non_galore_params = [p for n, p in model.named_parameters() if n not in galore_params_names] + + galore_optim_kwargs = { + "rank": int(optim_args.pop("rank", 128)), + "update_proj_gap": int(optim_args.pop("update_proj_gap", 200)), + "scale": float(optim_args.pop("scale", 0.25)), + "proj_type": optim_args.pop("proj_type", "std"), + } + + # The default args are from the official repository: https://github.com/jiaweizzhao/GaLore + param_groups = [ + {"params": non_galore_params}, + {"params": galore_params, **galore_optim_kwargs}, + ] + + if is_layerwise: + # For layer-wise optimizers, the optimization step is done through post accumulation + # gradient hooks. The trick is to first attach these hooks to the model parameters then + # create a dummy optimizer that will perform no-ops in the Trainer. + # See the original implementation or the nice implementation from @hiyouga + # here: https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba + if args.gradient_accumulation_steps != 1: + raise ValueError("Layerwise GaLoRE optimizer do not support gradient accumulation !") + + optimizer_dict = {} + for param in non_galore_params: + param_groups = [{"params": [param]}] + optimizer_dict[param] = optimizer_cls(param_groups, **optimizer_kwargs) + for param in galore_params: + param_groups = [{"params": [param], **galore_optim_kwargs}] + optimizer_dict[param] = optimizer_cls(param_groups, **optimizer_kwargs) + + def optimizer_hook(param): + if param.grad is not None: + optimizer_dict[param].step() + optimizer_dict[param].zero_grad() + + for param in model.parameters(): + if param.requires_grad: + param.register_post_accumulate_grad_hook(optimizer_hook) + + optimizer_cls = LayerWiseDummyOptimizer + optimizer_kwargs.update({"optimizer_dict": optimizer_dict}) + + optimizer_kwargs.update({"params": param_groups}) + + if args.optim == OptimizerNames.GALORE_ADAFACTOR: + optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) + else: + raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") + return optimizer_cls, optimizer_kwargs + + def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): + """ + Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or + passed as an argument. + + Args: + num_training_steps (int): The number of training steps to do. + """ + if self.lr_scheduler is None: + self.lr_scheduler = get_scheduler( + self.args.lr_scheduler_type, + optimizer=self.optimizer if optimizer is None else optimizer, + num_warmup_steps=self.args.get_warmup_steps(num_training_steps), + num_training_steps=num_training_steps, + scheduler_specific_kwargs=self.args.lr_scheduler_kwargs, + ) + self._created_lr_scheduler = True + return self.lr_scheduler + + def num_examples(self, dataloader: DataLoader) -> int: + """ + Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When + dataloader.dataset does not exist or has no length, estimates as best it can + """ + try: + dataset = dataloader.dataset + # Special case for IterableDatasetShard, we need to dig deeper + if isinstance(dataset, IterableDatasetShard): + return len(dataloader.dataset.dataset) + return len(dataloader.dataset) + except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader + return len(dataloader) * self.args.per_device_train_batch_size + + def num_tokens(self, train_dl: DataLoader, max_steps: Optional[int] = None) -> int: + """ + Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader. + """ + train_tokens = 0 + try: + for step, batch in enumerate(train_dl): + tokens = batch["input_ids"].numel() + if max_steps is not None: + return tokens * max_steps + train_tokens += tokens + return train_tokens + except KeyError: + logger.warning("Cannot get num_tokens from dataloader") + return train_tokens + + def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): + """HP search setup code""" + self._trial = trial + + if self.hp_search_backend is None or trial is None: + return + if self.hp_search_backend == HPSearchBackend.OPTUNA: + params = self.hp_space(trial) + elif self.hp_search_backend == HPSearchBackend.RAY: + params = trial + params.pop("wandb", None) + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} + elif self.hp_search_backend == HPSearchBackend.WANDB: + params = trial + + for key, value in params.items(): + if not hasattr(self.args, key): + logger.warning( + f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" + " `TrainingArguments`." + ) + continue + old_attr = getattr(self.args, key, None) + # Casting value to the proper type + if old_attr is not None: + value = type(old_attr)(value) + + setattr(self.args, key, value) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + logger.info(f"Trial: {trial.params}") + if self.hp_search_backend == HPSearchBackend.SIGOPT: + logger.info(f"SigOpt Assignments: {trial.assignments}") + if self.hp_search_backend == HPSearchBackend.WANDB: + logger.info(f"W&B Sweep parameters: {trial}") + if self.is_deepspeed_enabled: + if self.args.deepspeed is None: + raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set") + # Rebuild the deepspeed config to reflect the updated training parameters + from accelerate.utils import DeepSpeedPlugin + + from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig + + self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) + self.args.hf_deepspeed_config.trainer_config_process(self.args) + self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config) + + self.create_accelerator_and_postprocess() + + def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]): + if self.hp_search_backend is None or trial is None: + return + metrics = metrics.copy() + self.objective = self.compute_objective(metrics) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + import optuna + + if not trial.study._is_multi_objective(): + trial.report(self.objective, step) + if trial.should_prune(): + self.callback_handler.on_train_end(self.args, self.state, self.control) + raise optuna.TrialPruned() + elif self.hp_search_backend == HPSearchBackend.RAY: + import ray.train + + with tempfile.TemporaryDirectory() as temp_checkpoint_dir: + checkpoint = None + if self.control.should_save: + self._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) + checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) + metrics["objective"] = self.objective + ray.train.report(metrics, checkpoint=checkpoint) + + def _tune_save_checkpoint(self, checkpoint_dir: str): + output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") + self.save_model(output_dir, _internal_call=True) + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + + def call_model_init(self, trial=None): + model_init_argcount = number_of_arguments(self.model_init) + if model_init_argcount == 0: + model = self.model_init() + elif model_init_argcount == 1: + model = self.model_init(trial) + else: + raise RuntimeError("model_init should have 0 or 1 argument.") + + if model is None: + raise RuntimeError("model_init should not return None.") + + return model + + def torch_jit_model_eval(self, model, dataloader, training=False): + if not training: + if dataloader is None: + logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") + return model + example_batch = next(iter(dataloader)) + example_batch = self._prepare_inputs(example_batch) + try: + jit_model = copy.copy(model) + jit_model.eval() + original_forward = jit_model.__dict__.pop("_original_forward", None) + # remove mixed precision hooks from the model + if original_forward: + jit_model.forward = original_forward + with self.accelerator.autocast(cache_enabled=False), torch.no_grad(): + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("2.0.0"): + if isinstance(example_batch, dict): + jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) + else: + jit_model = torch.jit.trace( + jit_model, + example_kwarg_inputs={key: example_batch[key] for key in example_batch}, + strict=False, + ) + else: + jit_inputs = [] + for key in example_batch: + example_tensor = torch.ones_like(example_batch[key]) + jit_inputs.append(example_tensor) + jit_inputs = tuple(jit_inputs) + jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) + jit_model = torch.jit.freeze(jit_model) + with torch.no_grad(): + jit_model(**example_batch) + jit_model(**example_batch) + model = jit_model + self.use_cpu_amp = False + except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: + logger.warning(f"failed to use PyTorch jit mode due to: {e}.") + + return model + + def ipex_optimize_model(self, model, training=False, dtype=torch.float32): + if not is_ipex_available(): + raise ImportError( + "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + + import intel_extension_for_pytorch as ipex + + if not training: + model.eval() + dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype + # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings + model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) + else: + if not model.training: + model.train() + model, self.optimizer = ipex.optimize( + model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" + ) + + return model + + def _wrap_model(self, model, training=True, dataloader=None): + if self.args.use_ipex: + dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 + model = self.ipex_optimize_model(model, training, dtype=dtype) + + if is_sagemaker_mp_enabled(): + # Wrapping the base model twice in a DistributedModel will raise an error. + if isinstance(self.model_wrapped, smp.model.DistributedModel): + return self.model_wrapped + return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) + + # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again + if unwrap_model(model) is not model: + return model + + # Mixed precision training with apex (torch < 1.6) + if self.use_apex and training: + model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) + + # Multi-gpu training (should be after apex fp16 initialization) / 8bit models does not support DDP + if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False): + model = nn.DataParallel(model) + + if self.args.jit_mode_eval: + start_time = time.time() + model = self.torch_jit_model_eval(model, dataloader, training) + self.jit_compilation_time = round(time.time() - start_time, 4) + + # Note: in torch.distributed mode, there's no point in wrapping the model + # inside a DistributedDataParallel as we'll be under `no_grad` anyways. + if not training: + return model + + # Distributed training (should be after apex fp16 initialization) + # Distributed training using PyTorch FSDP + if self.is_fsdp_xla_enabled: + try: + from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP + from torch_xla.distributed.fsdp import checkpoint_module + from torch_xla.distributed.fsdp.wrap import ( + size_based_auto_wrap_policy, + transformer_auto_wrap_policy, + ) + + if self.is_fsdp_xla_v2_enabled: + from torch_xla.experimental.spmd_fully_sharded_data_parallel import ( + SpmdFullyShardedDataParallel as FSDPv2, + ) + except ImportError: + raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") + auto_wrap_policy = None + auto_wrapper_callable = None + default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None) + fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get( + "transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap + ) + + if self.args.fsdp_config["min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["min_num_params"] + ) + elif fsdp_transformer_layer_cls_to_wrap is not None: + transformer_cls_to_wrap = set() + for layer_class in fsdp_transformer_layer_cls_to_wrap: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + fsdp_kwargs = self.args.xla_fsdp_config + if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: + # Apply gradient checkpointing to auto-wrapped sub-modules if specified + def auto_wrapper_callable(m, *args, **kwargs): + target_cls = FSDP if not self.is_fsdp_xla_v2_enabled else FSDPv2 + return target_cls(checkpoint_module(m), *args, **kwargs) + + # Wrap the base model with an outer FSDP wrapper + if self.is_fsdp_xla_v2_enabled: + + def shard_output(output, mesh): + from .modeling_outputs import CausalLMOutputWithPast + + real_output = None + if isinstance(output, torch.Tensor): + real_output = output + elif isinstance(output, tuple): + real_output = output[0] + elif isinstance(output, CausalLMOutputWithPast): + real_output = output.logits + + if real_output is None: + raise ValueError("Something went wrong, the output of the model shouldn't be `None`") + xs.mark_sharding(real_output, mesh, ("fsdp", None, None)) + + self.model = model = FSDPv2( + model, + shard_output=shard_output, + auto_wrap_policy=auto_wrap_policy, + auto_wrapper_callable=auto_wrapper_callable, + ) + else: + self.model = model = FSDP( + model, + auto_wrap_policy=auto_wrap_policy, + auto_wrapper_callable=auto_wrapper_callable, + **fsdp_kwargs, + ) + + # Patch `xm.optimizer_step` should not reduce gradients in this case, + # as FSDP does not need gradient reduction over sharded parameters. + def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): + loss = optimizer.step(**optimizer_args) + if barrier: + xm.mark_step() + return loss + + xm.optimizer_step = patched_optimizer_step + elif is_sagemaker_dp_enabled(): + model = nn.parallel.DistributedDataParallel( + model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] + ) + elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: + if is_torch_neuroncore_available(): + return model + kwargs = {} + if self.args.ddp_find_unused_parameters is not None: + kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters + elif isinstance(model, PreTrainedModel): + # find_unused_parameters breaks checkpointing as per + # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 + kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing + else: + kwargs["find_unused_parameters"] = True + + if self.args.ddp_bucket_cap_mb is not None: + kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb + + if self.args.ddp_broadcast_buffers is not None: + kwargs["broadcast_buffers"] = self.args.ddp_broadcast_buffers + + self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) + + return model + + def train( + self, + resume_from_checkpoint: Optional[Union[str, bool]] = None, + trial: Union["optuna.Trial", Dict[str, Any]] = None, + ignore_keys_for_eval: Optional[List[str]] = None, + **kwargs, + ): + """ + Main training entry point. + + Args: + resume_from_checkpoint (`str` or `bool`, *optional*): + If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a + `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance + of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. + trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): + The trial run or the hyperparameter dictionary for hyperparameter search. + ignore_keys_for_eval (`List[str]`, *optional*) + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions for evaluation during the training. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments used to hide deprecated arguments + """ + if resume_from_checkpoint is False: + resume_from_checkpoint = None + + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + args = self.args + + self.is_in_train = True + + # Attach NEFTune hooks if necessary + if self.neftune_noise_alpha is not None: + self.model = self._activate_neftune(self.model) + + # do_train is not a reliable argument, as it might not be set and .train() still called, so + # the following is a workaround: + if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train: + self._move_model_to_device(self.model, args.device) + + if "model_path" in kwargs: + resume_from_checkpoint = kwargs.pop("model_path") + warnings.warn( + "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " + "instead.", + FutureWarning, + ) + if len(kwargs) > 0: + raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") + # This might change the seed so needs to run first. + self._hp_search_setup(trial) + self._train_batch_size = self.args.train_batch_size + + # Model re-init + model_reloaded = False + if self.model_init is not None: + # Seed must be set before instantiating the model when using model_init. + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.model = self.call_model_init(trial) + model_reloaded = True + # Reinitializes optimizer and scheduler + self.optimizer, self.lr_scheduler = None, None + + # Load potential model checkpoint + if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: + resume_from_checkpoint = get_last_checkpoint(args.output_dir) + if resume_from_checkpoint is None: + raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") + + if resume_from_checkpoint is not None: + if not is_sagemaker_mp_enabled() and not self.is_deepspeed_enabled and not self.is_fsdp_enabled: + self._load_from_checkpoint(resume_from_checkpoint) + # In case of repeating the find_executable_batch_size, set `self._train_batch_size` properly + state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) + if state.train_batch_size is not None: + self._train_batch_size = state.train_batch_size + + # If model was re-initialized, put it on the right device and update self.model_wrapped + if model_reloaded: + if self.place_model_on_device: + self._move_model_to_device(self.model, args.device) + self.model_wrapped = self.model + + inner_training_loop = find_executable_batch_size( + self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size + ) + if args.push_to_hub: + try: + # Disable progress bars when uploading models during checkpoints to avoid polluting stdout + hf_hub_utils.disable_progress_bars() + return inner_training_loop( + args=args, + resume_from_checkpoint=resume_from_checkpoint, + trial=trial, + ignore_keys_for_eval=ignore_keys_for_eval, + ) + finally: + hf_hub_utils.enable_progress_bars() + else: + return inner_training_loop( + args=args, + resume_from_checkpoint=resume_from_checkpoint, + trial=trial, + ignore_keys_for_eval=ignore_keys_for_eval, + ) + + def _inner_training_loop( + self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None + ): + self.accelerator.free_memory() + self._train_batch_size = batch_size + if self.args.auto_find_batch_size: + if self.state.train_batch_size != self._train_batch_size: + from accelerate.utils import release_memory + + (self.model_wrapped,) = release_memory(self.model_wrapped) + self.model_wrapped = self.model + + # Check for DeepSpeed *after* the intial pass and modify the config + if self.is_deepspeed_enabled: + # Temporarily unset `self.args.train_batch_size` + original_bs = self.args.per_device_train_batch_size + self.args.per_device_train_batch_size = self._train_batch_size // max(1, self.args.n_gpu) + self.propagate_args_to_deepspeed(True) + self.args.per_device_train_batch_size = original_bs + self.state.train_batch_size = self._train_batch_size + logger.debug(f"Currently training with a batch size of: {self._train_batch_size}") + # Data loader and number of training steps + train_dataloader = self.get_train_dataloader() + if self.is_fsdp_xla_v2_enabled: + train_dataloader = tpu_spmd_dataloader(train_dataloader) + + # Setting up training control variables: + # number of training epochs: num_train_epochs + # number of training steps per epoch: num_update_steps_per_epoch + # total number of training steps to execute: max_steps + total_train_batch_size = self._train_batch_size * args.gradient_accumulation_steps * args.world_size + + len_dataloader = None + num_train_tokens = None + if has_length(train_dataloader): + len_dataloader = len(train_dataloader) + num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps + num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) + num_examples = self.num_examples(train_dataloader) + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's + # the best we can do. + num_train_samples = args.max_steps * total_train_batch_size + if args.include_tokens_per_second: + num_train_tokens = ( + self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps + ) + else: + max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) + num_train_epochs = math.ceil(args.num_train_epochs) + num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs + if args.include_tokens_per_second: + num_train_tokens = self.num_tokens(train_dataloader) * args.num_train_epochs + elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size + max_steps = args.max_steps + # Setting a very large number of epochs so we go as many times as necessary over the iterator. + num_train_epochs = sys.maxsize + num_update_steps_per_epoch = max_steps + num_examples = total_train_batch_size * args.max_steps + num_train_samples = args.max_steps * total_train_batch_size + if args.include_tokens_per_second: + num_train_tokens = self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps + else: + raise ValueError( + "args.max_steps must be set to a positive value if dataloader does not have a length, was" + f" {args.max_steps}" + ) + + if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: + if self.args.n_gpu > 1: + # nn.DataParallel(model) replicates the model, creating new variables and module + # references registered here no longer work on other gpus, breaking the module + raise ValueError( + "Currently --debug underflow_overflow is not supported under DP. Please use DDP" + " (torchrun or torch.distributed.launch (deprecated))." + ) + else: + debug_overflow = DebugUnderflowOverflow(self.model) # noqa + + delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled + + # We need to reset the scheduler, as its parameters may be different on subsequent calls + if self._created_lr_scheduler: + self.lr_scheduler = None + self._created_lr_scheduler = False + + if self.is_deepspeed_enabled: + self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps) + + if not delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + self.state = TrainerState() + self.state.is_hyper_param_search = trial is not None + self.state.train_batch_size = self._train_batch_size + + # Compute absolute values for logging, eval, and save if given as ratio + if args.logging_steps is not None: + if args.logging_steps < 1: + self.state.logging_steps = math.ceil(max_steps * args.logging_steps) + else: + self.state.logging_steps = args.logging_steps + if args.eval_steps is not None: + if args.eval_steps < 1: + self.state.eval_steps = math.ceil(max_steps * args.eval_steps) + else: + self.state.eval_steps = args.eval_steps + if args.save_steps is not None: + if args.save_steps < 1: + self.state.save_steps = math.ceil(max_steps * args.save_steps) + else: + self.state.save_steps = args.save_steps + + # Activate gradient checkpointing if needed + if args.gradient_checkpointing: + if args.gradient_checkpointing_kwargs is None: + gradient_checkpointing_kwargs = {} + else: + gradient_checkpointing_kwargs = args.gradient_checkpointing_kwargs + + self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) + + model = self._wrap_model(self.model_wrapped) + + # as the model is wrapped, don't use `accelerator.prepare` + # this is for unhandled cases such as + # FSDP-XLA, SageMaker MP/DP, DataParallel, IPEX + use_accelerator_prepare = True if model is self.model else False + + if delay_optimizer_creation: + if use_accelerator_prepare: + self._fsdp_qlora_plugin_updates() + self.model = self.accelerator.prepare(self.model) + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + # prepare using `accelerator` prepare + if use_accelerator_prepare: + self.model.train() + if hasattr(self.lr_scheduler, "step"): + if self.use_apex: + model = self.accelerator.prepare(self.model) + else: + model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) + else: + # to handle cases wherein we pass "DummyScheduler" such as when it is specified in DeepSpeed config. + model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( + self.model, self.optimizer, self.lr_scheduler + ) + + if self.is_fsdp_enabled: + self.model = self.model_wrapped = model + + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + + # backward compatibility + if self.is_deepspeed_enabled: + self.deepspeed = self.model_wrapped + + # ckpt loading + if resume_from_checkpoint is not None: + if self.is_deepspeed_enabled: + deepspeed_load_checkpoint( + self.model_wrapped, resume_from_checkpoint, load_module_strict=not _is_peft_model(self.model) + ) + elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled: + self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped) + + # Check if saved optimizer or scheduler states exist + self._load_optimizer_and_scheduler(resume_from_checkpoint) + + # important: at this point: + # self.model is the Transformers Model + # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), + # FSDP(Transformers Model), Dynamo Optimized Module(Transformers Model) etc. + + # Train! + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples:,}") + logger.info(f" Num Epochs = {num_train_epochs:,}") + logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}") + if self.args.per_device_train_batch_size != self._train_batch_size: + logger.info(f" Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps:,}") + logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}") + + self.state.epoch = 0 + start_time = time.time() + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + steps_trained_progress_bar = None + + # Check if continuing training from a checkpoint + if resume_from_checkpoint is not None and os.path.isfile( + os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) + ): + self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) + epochs_trained = self.state.global_step // num_update_steps_per_epoch + if not args.ignore_data_skip: + steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) + steps_trained_in_current_epoch *= args.gradient_accumulation_steps + else: + steps_trained_in_current_epoch = 0 + + logger.info(" Continuing training from checkpoint, will skip to saved global_step") + logger.info(f" Continuing training from epoch {epochs_trained}") + logger.info(f" Continuing training from global step {self.state.global_step}") + if not args.ignore_data_skip: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first" + f" {steps_trained_in_current_epoch} batches in the first epoch." + ) + + # Update the references + self.callback_handler.model = self.model + self.callback_handler.optimizer = self.optimizer + self.callback_handler.lr_scheduler = self.lr_scheduler + self.callback_handler.train_dataloader = train_dataloader + if self.hp_name is not None and self._trial is not None: + # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial + # parameter to Train when using DDP. + self.state.trial_name = self.hp_name(self._trial) + if trial is not None: + assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial + self.state.trial_params = hp_params(assignments) + else: + self.state.trial_params = None + # This should be the same if the state has been saved but in case the training arguments changed, it's safer + # to set this after the load. + self.state.max_steps = max_steps + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + # tr_loss is a tensor to avoid synchronization of TPUs through .item() + tr_loss = torch.tensor(0.0).to(args.device) + # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses + self._total_loss_scalar = 0.0 + self._globalstep_last_logged = self.state.global_step + model.zero_grad() + grad_norm: Optional[float] = None + + self.control = self.callback_handler.on_train_begin(args, self.state, self.control) + + # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. + if not args.ignore_data_skip: + for epoch in range(epochs_trained): + sampler = get_dataloader_sampler(train_dataloader) + sampler_kinds = [RandomSampler] + if version.parse(accelerate_version) > version.parse("0.23.0"): + sampler_kinds.append(SeedableRandomSampler) + is_random_sampler = isinstance(sampler, tuple(sampler_kinds)) + if not is_random_sampler: + # We just need to begin an iteration to create the randomization of the sampler. + for _ in train_dataloader: + break + else: + # Otherwise we need to call the whooooole sampler cause there is some random operation added + # AT THE VERY END! + sampler = sampler if sampler is not None else [] + _ = list(sampler) + + total_batched_samples = 0 + for epoch in range(epochs_trained, num_train_epochs): + epoch_iterator = train_dataloader + if hasattr(epoch_iterator, "set_epoch"): + epoch_iterator.set_epoch(epoch) + + # Reset the past mems state at the beginning of each epoch if necessary. + if args.past_index >= 0: + self._past = None + + steps_in_epoch = ( + len(epoch_iterator) + if len_dataloader is not None + else args.max_steps * args.gradient_accumulation_steps + ) + self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) + + if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + + rng_to_sync = False + steps_skipped = 0 + if steps_trained_in_current_epoch > 0: + epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch) + steps_skipped = steps_trained_in_current_epoch + steps_trained_in_current_epoch = 0 + rng_to_sync = True + + step = -1 + for step, inputs in enumerate(epoch_iterator): + total_batched_samples += 1 + + if self.args.include_num_input_tokens_seen: + main_input_name = getattr(self.model, "main_input_name", "input_ids") + if main_input_name not in inputs: + logger.warning( + "Tried to track the number of tokens seen, however the current model is " + "not configured properly to know what item is the input. To fix this, add " + "a `main_input_name` attribute to the model class you are using." + ) + else: + self.state.num_input_tokens_seen += self.accelerator.gather(inputs[main_input_name]).numel() + if rng_to_sync: + self._load_rng_state(resume_from_checkpoint) + rng_to_sync = False + + # Skip past any already trained steps if resuming training + if steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(1) + if steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + continue + elif steps_trained_progress_bar is not None: + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + + if step % args.gradient_accumulation_steps == 0: + self.control = self.callback_handler.on_step_begin(args, self.state, self.control) + + with self.accelerator.accumulate(model): + tr_loss_step = self.training_step(model, inputs) + + if ( + args.logging_nan_inf_filter + and not is_torch_xla_available() + and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) + ): + # if loss is nan or inf simply add the average of previous logged losses + tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) + else: + if tr_loss.device != tr_loss_step.device: + raise ValueError( + f"Calculated loss must be on the original device: {tr_loss.device} but device in use is {tr_loss_step.device}" + ) + tr_loss += tr_loss_step + + self.current_flos += float(self.floating_point_ops(inputs)) + + is_last_step_and_steps_less_than_grad_acc = ( + steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch + ) + + if ( + total_batched_samples % args.gradient_accumulation_steps == 0 + or + # last step in epoch but step is always smaller than gradient_accumulation_steps + is_last_step_and_steps_less_than_grad_acc + ): + # the `or` condition of `is_last_step_and_steps_less_than_grad_acc` is not covered + # in accelerate. So, explicitly enable sync gradients to True in that case. + if is_last_step_and_steps_less_than_grad_acc: + self.accelerator.gradient_state._set_sync_gradients(True) + + # Gradient clipping + if args.max_grad_norm is not None and args.max_grad_norm > 0: + # deepspeed does its own clipping + + if is_sagemaker_mp_enabled() and args.fp16: + _grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm) + elif self.use_apex: + # Revert to normal clipping otherwise, handling Apex or full precision + _grad_norm = nn.utils.clip_grad_norm_( + amp.master_params(self.optimizer), + args.max_grad_norm, + ) + else: + _grad_norm = self.accelerator.clip_grad_norm_( + model.parameters(), + args.max_grad_norm, + ) + + if ( + is_accelerate_available() + and self.accelerator.distributed_type == DistributedType.DEEPSPEED + ): + grad_norm = model.get_global_grad_norm() + # In some cases the grad norm may not return a float + if hasattr(grad_norm, "item"): + grad_norm = grad_norm.item() + else: + grad_norm = _grad_norm + + # Optimizer step + self.optimizer.step() + optimizer_was_run = not self.accelerator.optimizer_step_was_skipped + if optimizer_was_run: + # Delay optimizer scheduling until metrics are generated + if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): + self.lr_scheduler.step() + + model.zero_grad() + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + + self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval) + else: + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + + if self.control.should_epoch_stop or self.control.should_training_stop: + # PyTorch/XLA relies on the data loader to insert the mark_step for + # each step. Since we are breaking the loop early, we need to manually + # insert the mark_step here. + if is_torch_xla_available(): + xm.mark_step() + break + if step < 0: + logger.warning( + "There seems to be not a single sample in your epoch_iterator, stopping training at step" + f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" + f" num_steps ({max_steps}) higher than the number of available samples." + ) + self.control.should_training_stop = True + + self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) + self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + if is_torch_xla_available(): + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + else: + logger.warning( + "You enabled PyTorch/XLA debug metrics but you don't have a TPU " + "configured. Check your training configuration if this is unexpected." + ) + if self.control.should_training_stop: + break + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of training + delattr(self, "_past") + + logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") + if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: + # Wait for everyone to get here so we are sure the model has been saved by process 0. + if is_torch_xla_available(): + xm.rendezvous("load_best_model_at_end") + elif args.parallel_mode == ParallelMode.DISTRIBUTED: + dist.barrier() + elif is_sagemaker_mp_enabled(): + smp.barrier() + + self._load_best_model() + + # add remaining tr_loss + self._total_loss_scalar += tr_loss.item() + effective_global_step = max(self.state.global_step, 0.001) # Avoid ZeroDivisionError + train_loss = self._total_loss_scalar / effective_global_step + + metrics = speed_metrics( + "train", + start_time, + num_samples=num_train_samples, + num_steps=self.state.max_steps, + num_tokens=num_train_tokens, + ) + self.store_flos() + metrics["total_flos"] = self.state.total_flos + metrics["train_loss"] = train_loss + + self.is_in_train = False + + self._memory_tracker.stop_and_update_metrics(metrics) + + self.log(metrics) + + run_dir = self._get_output_dir(trial) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) + + # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. + if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: + for checkpoint in checkpoints_sorted: + if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint) + + self.control = self.callback_handler.on_train_end(args, self.state, self.control) + + # Wait for the checkpoint to be uploaded. + self._finish_current_push() + + # After training we make sure to retrieve back the original forward pass method + # for the embedding layer by removing the forward post hook. + if self.neftune_noise_alpha is not None: + self._deactivate_neftune(self.model) + + return TrainOutput(self.state.global_step, train_loss, metrics) + + def _get_output_dir(self, trial): + if self.hp_search_backend is not None and trial is not None: + if self.hp_search_backend == HPSearchBackend.OPTUNA: + run_id = trial.number + elif self.hp_search_backend == HPSearchBackend.RAY: + import ray.train + + run_id = ray.train.get_context().get_trial_id() + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + run_id = trial.id + elif self.hp_search_backend == HPSearchBackend.WANDB: + import wandb + + run_id = wandb.run.id + run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" + run_dir = os.path.join(self.args.output_dir, run_name) + else: + run_dir = self.args.output_dir + return run_dir + + def _load_from_checkpoint(self, resume_from_checkpoint, model=None): + if model is None: + model = self.model + + config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME) + adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME) + adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) + weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME) + weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) + safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) + safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) + is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and ( + # this checks the FSDP state dict when `SHARDED_STATE_DICT` is used + any( + FSDP_MODEL_NAME in folder_name + for folder_name in os.listdir(resume_from_checkpoint) + if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) + ) + # this checks the FSDP state dict when `FULL_STATE_DICT` is used + or os.path.isfile(os.path.join(resume_from_checkpoint, f"{FSDP_MODEL_NAME}.bin")) + ) + + if is_fsdp_ckpt and not self.is_fsdp_enabled: + raise ValueError(f"Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP") + + if not ( + any( + os.path.isfile(f) + for f in [ + weights_file, + safe_weights_file, + weights_index_file, + safe_weights_index_file, + adapter_weights_file, + adapter_safe_weights_file, + ] + ) + or is_fsdp_ckpt + ): + raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") + + logger.info(f"Loading model from {resume_from_checkpoint}.") + + if os.path.isfile(config_file): + config = PretrainedConfig.from_json_file(config_file) + checkpoint_version = config.transformers_version + if checkpoint_version is not None and checkpoint_version != __version__: + logger.warning( + f"You are resuming training from a checkpoint trained with {checkpoint_version} of " + f"Transformers but your current version is {__version__}. This is not recommended and could " + "yield to errors or unwanted behaviors." + ) + + if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt: + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + # If the model is on the GPU, it still works! + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + if hasattr(self.args, "fp16") and self.args.fp16 is True: + logger.warning( + "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." + ) + state_dict = torch.load( + weights_file, + map_location="cpu", + **weights_only_kwarg, + ) + # Required for smp to not auto-translate state_dict from hf to smp (is already smp). + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + # release memory + del state_dict + elif self.is_fsdp_enabled: + load_fsdp_model( + self.accelerator.state.fsdp_plugin, + self.accelerator, + model, + resume_from_checkpoint, + **_get_fsdp_ckpt_kwargs(), + ) + else: + # We load the model state dict on the CPU to avoid an OOM error. + if self.args.save_safetensors and os.path.isfile(safe_weights_file): + state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu") + else: + state_dict = torch.load( + weights_file, + map_location="cpu", + **weights_only_kwarg, + ) + + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + # release memory + del state_dict + self._issue_warnings_after_load(load_result) + + # Load adapters following PR # 24096 + elif _is_peft_model(model): + # If train a model using PEFT & LoRA, assume that adapter have been saved properly. + if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): + if os.path.exists(resume_from_checkpoint): + model.load_adapter(resume_from_checkpoint, model.active_adapter, is_trainable=True) + else: + logger.warning( + "The intermediate checkpoints of PEFT may not be saved correctly, " + f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " + "Check some examples here: https://github.com/huggingface/peft/issues/96" + ) + else: + logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") + else: + # We load the sharded checkpoint + load_result = load_sharded_checkpoint( + model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors + ) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + + def _load_best_model(self): + logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") + best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) + best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME) + best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME) + best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) + + model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + if self.is_deepspeed_enabled: + deepspeed_load_checkpoint( + self.model_wrapped, + self.state.best_model_checkpoint, + load_module_strict=not _is_peft_model(self.model), + ) + elif self.is_fsdp_enabled: + load_result = load_fsdp_model( + self.accelerator.state.fsdp_plugin, + self.accelerator, + model, + self.state.best_model_checkpoint, + **_get_fsdp_ckpt_kwargs(), + ) + elif ( + os.path.exists(best_model_path) + or os.path.exists(best_safe_model_path) + or os.path.exists(best_adapter_model_path) + or os.path.exists(best_safe_adapter_model_path) + ): + has_been_loaded = True + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=self.state.best_model_checkpoint, + tag=WEIGHTS_NAME, + partial=False, + load_optimizer=False, + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + if self.args.save_safetensors and os.path.isfile(best_safe_model_path): + state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") + else: + state_dict = torch.load( + best_model_path, + map_location="cpu", + **weights_only_kwarg, + ) + + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + else: + if _is_peft_model(model): + # If train a model using PEFT & LoRA, assume that adapter have been saved properly. + if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): + if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): + model.load_adapter(self.state.best_model_checkpoint, model.active_adapter) + # Load_adapter has no return value present, modify it when appropriate. + from torch.nn.modules.module import _IncompatibleKeys + + load_result = _IncompatibleKeys([], []) + else: + logger.warning( + "The intermediate checkpoints of PEFT may not be saved correctly, " + f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " + "Check some examples here: https://github.com/huggingface/peft/issues/96" + ) + has_been_loaded = False + else: + logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") + has_been_loaded = False + else: + # We load the model state dict on the CPU to avoid an OOM error. + if self.args.save_safetensors and os.path.isfile(best_safe_model_path): + state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") + else: + state_dict = torch.load( + best_model_path, + map_location="cpu", + **weights_only_kwarg, + ) + + # If the model is on the GPU, it still works! + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + if not is_sagemaker_mp_enabled() and has_been_loaded: + self._issue_warnings_after_load(load_result) + elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): + load_result = load_sharded_checkpoint( + model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() + ) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + else: + logger.warning( + f"Could not locate the best model at {best_model_path}, if you are running a distributed training " + "on multiple nodes, you should activate `--save_on_each_node`." + ) + + def _issue_warnings_after_load(self, load_result): + if len(load_result.missing_keys) != 0: + if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( + self.model._keys_to_ignore_on_save + ): + self.model.tie_weights() + else: + logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") + if len(load_result.unexpected_keys) != 0: + logger.warning( + f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." + ) + + def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval): + if self.control.should_log and self.state.global_step > self._globalstep_last_logged: + if is_torch_xla_available(): + xm.mark_step() + + logs: Dict[str, float] = {} + + # all_gather + mean() to get average loss over all processes + tr_loss_scalar = self._nested_gather(tr_loss).mean().item() + + # reset tr_loss to zero + tr_loss -= tr_loss + + logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) + if grad_norm is not None: + logs["grad_norm"] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm + logs["learning_rate"] = self._get_learning_rate() + + self._total_loss_scalar += tr_loss_scalar + self._globalstep_last_logged = self.state.global_step + self.store_flos() + + self.log(logs) + + metrics = None + if self.control.should_evaluate: + metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) + self._report_to_hp_search(trial, self.state.global_step, metrics) + + # Run delayed LR scheduler now that metrics are populated + if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + self.lr_scheduler.step(metrics[metric_to_check]) + + if self.control.should_save: + self._save_checkpoint(model, trial, metrics=metrics) + self.control = self.callback_handler.on_save(self.args, self.state, self.control) + + def _load_rng_state(self, checkpoint): + # Load RNG states from `checkpoint` + if checkpoint is None: + return + + if self.args.world_size > 1: + process_index = self.args.process_index + rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") + if not os.path.isfile(rng_file): + logger.info( + f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " + "wasn't launched in a distributed fashion, reproducibility is not guaranteed." + ) + return + else: + rng_file = os.path.join(checkpoint, "rng_state.pth") + if not os.path.isfile(rng_file): + logger.info( + "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " + "fashion, reproducibility is not guaranteed." + ) + return + + checkpoint_rng_state = torch.load(rng_file) + random.setstate(checkpoint_rng_state["python"]) + np.random.set_state(checkpoint_rng_state["numpy"]) + torch.random.set_rng_state(checkpoint_rng_state["cpu"]) + if torch.cuda.is_available(): + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) + else: + try: + torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) + except Exception as e: + logger.info( + f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" + "\nThis won't yield the same results as if the training had not been interrupted." + ) + if is_torch_xla_available(): + xm.set_rng_state(checkpoint_rng_state["xla"]) + if is_torch_npu_available(): + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + torch.npu.random.set_rng_state_all(checkpoint_rng_state["npu"]) + else: + try: + torch.npu.random.set_rng_state(checkpoint_rng_state["npu"]) + except Exception as e: + logger.info( + f"Didn't manage to set back the RNG states of the NPU because of the following error:\n {e}" + "\nThis won't yield the same results as if the training had not been interrupted." + ) + + def _save_checkpoint(self, model, trial, metrics=None): + # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we + # want to save except FullyShardedDDP. + # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model" + + # Save model checkpoint + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + + if self.hp_search_backend is None and trial is None: + self.store_flos() + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + self.save_model(output_dir, _internal_call=True) + + if not self.args.save_only_model: + # Save optimizer and scheduler + self._save_optimizer_and_scheduler(output_dir) + # Save RNG state + self._save_rng_state(output_dir) + + # Determine the new best metric / best model checkpoint + if metrics is not None and self.args.metric_for_best_model is not None: + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + metric_value = metrics[metric_to_check] + + operator = np.greater if self.args.greater_is_better else np.less + if ( + self.state.best_metric is None + or self.state.best_model_checkpoint is None + or operator(metric_value, self.state.best_metric) + ): + self.state.best_metric = metric_value + self.state.best_model_checkpoint = output_dir + + # Save the Trainer state + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + + if self.args.push_to_hub: + self._push_from_checkpoint(output_dir) + + # Maybe delete some older checkpoints. + if self.args.should_save: + # Solely rely on numerical checkpoint id for rotation. + # mtime is not reliable especially on some fuse fs in cloud environments. + self._rotate_checkpoints(use_mtime=False, output_dir=run_dir) + + def _save_rng_state(self, output_dir): + # Save RNG state in non-distributed training + rng_states = { + "python": random.getstate(), + "numpy": np.random.get_state(), + "cpu": torch.random.get_rng_state(), + } + if torch.cuda.is_available(): + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) + rng_states["cuda"] = torch.cuda.random.get_rng_state_all() + else: + rng_states["cuda"] = torch.cuda.random.get_rng_state() + + if is_torch_xla_available(): + rng_states["xla"] = xm.get_rng_state() + + if is_torch_npu_available(): + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + rng_states["npu"] = torch.npu.random.get_rng_state_all() + else: + rng_states["npu"] = torch.npu.random.get_rng_state() + + # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may + # not yet exist. + os.makedirs(output_dir, exist_ok=True) + + if self.args.world_size <= 1: + torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) + else: + torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) + + def _save_optimizer_and_scheduler(self, output_dir): + if is_torch_xla_available(): + xm.rendezvous("saving_optimizer_states") + xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + elif is_sagemaker_mp_enabled(): + opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) + smp.barrier() + if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: + smp.save( + opt_state_dict, + os.path.join(output_dir, OPTIMIZER_NAME), + partial=True, + v3=smp.state.cfg.shard_optimizer_state, + ) + elif self.is_deepspeed_enabled: + # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed + # config `stage3_gather_16bit_weights_on_model_save` is True + accept_exclude_frozen_parameters = "exclude_frozen_parameters" in set( + inspect.signature(self.model_wrapped.save_checkpoint).parameters.keys() + ) + if accept_exclude_frozen_parameters and _is_peft_model(self.model): + self.model_wrapped.save_checkpoint(output_dir, exclude_frozen_parameters=True) + else: + self.model_wrapped.save_checkpoint(output_dir) + elif self.is_fsdp_enabled: + # save fsdp specific ckpt for resuming from ckpt + save_fsdp_model( + self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir, **_get_fsdp_ckpt_kwargs() + ) + save_fsdp_optimizer( + self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir + ) + elif self.args.should_save: + # deepspeed.save_checkpoint above saves model/optim/sched + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + + # Save SCHEDULER & SCALER + is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( + self.lr_scheduler, DeepSpeedSchedulerWrapper + ) + if ( + self.args.should_save + and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) + and not is_torch_xla_available() + ): + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + + def _load_optimizer_and_scheduler(self, checkpoint): + """If optimizer and scheduler states exist, load them.""" + if checkpoint is None: + return + + if self.is_deepspeed_enabled: + # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init + if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper): + with warnings.catch_warnings(record=True) as caught_warnings: + self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) + reissue_pt_warnings(caught_warnings) + return + + checkpoint_file_exists = ( + glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") + if is_sagemaker_mp_enabled() + else ( + os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) + or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN)) + or ( + os.path.isdir(checkpoint) + and any( + OPTIMIZER_NAME_BIN.split(".")[0] in folder_name + for folder_name in os.listdir(checkpoint) + if os.path.isdir(os.path.join(checkpoint, folder_name)) + ) + ) + ) + ) + if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): + # Load in optimizer and scheduler states + if is_torch_xla_available(): + # On TPU we have to take some extra precautions to properly load the states on the right device. + optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") + with warnings.catch_warnings(record=True) as caught_warnings: + lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu") + reissue_pt_warnings(caught_warnings) + + xm.send_cpu_data_to_device(optimizer_state, self.args.device) + xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) + + self.optimizer.load_state_dict(optimizer_state) + self.lr_scheduler.load_state_dict(lr_scheduler_state) + else: + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): + # Optimizer checkpoint was saved with smp >= 1.10 + def opt_load_hook(mod, opt): + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + else: + # Optimizer checkpoint was saved with smp < 1.10 + def opt_load_hook(mod, opt): + if IS_SAGEMAKER_MP_POST_1_10: + opt.load_state_dict( + smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) + ) + else: + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + self.model_wrapped.register_post_step_hook(opt_load_hook) + else: + # We use the CPU when training on one GPU to avoid OOM for GPU RAM when training big models. + # In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more + # likely to get OOM on CPU (since we load num_gpu times the optimizer state + map_location = self.args.device if self.args.world_size > 1 else "cpu" + if self.is_fsdp_enabled: + load_fsdp_optimizer( + self.accelerator.state.fsdp_plugin, + self.accelerator, + self.optimizer, + self.model, + checkpoint, + **_get_fsdp_ckpt_kwargs(), + ) + else: + self.optimizer.load_state_dict( + torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) + ) + with warnings.catch_warnings(record=True) as caught_warnings: + self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) + reissue_pt_warnings(caught_warnings) + + def hyperparameter_search( + self, + hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, + compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, + n_trials: int = 20, + direction: Union[str, List[str]] = "minimize", + backend: Optional[Union["str", HPSearchBackend]] = None, + hp_name: Optional[Callable[["optuna.Trial"], str]] = None, + **kwargs, + ) -> Union[BestRun, List[BestRun]]: + """ + Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined + by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, + the sum of all metrics otherwise. + + + + To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to + reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to + subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom + optimizer/scheduler. + + + + Args: + hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*): + A function that defines the hyperparameter search space. Will default to + [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or + [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. + compute_objective (`Callable[[Dict[str, float]], float]`, *optional*): + A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` + method. Will default to [`~trainer_utils.default_compute_objective`]. + n_trials (`int`, *optional*, defaults to 100): + The number of trial runs to test. + direction (`str` or `List[str]`, *optional*, defaults to `"minimize"`): + If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you + should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or + several metrics. If it's multi objectives optimization, direction is `List[str]`, can be List of + `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, + `"maximize"` when optimizing one or several metrics. + backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): + The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending + on which one is installed. If all are installed, will default to optuna. + hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): + A function that defines the trial/run name. Will default to None. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more + information see: + + - the documentation of + [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) + - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run) + - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create) + + Returns: + [`trainer_utils.BestRun` or `List[trainer_utils.BestRun]`]: All the information about the best run or best + runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray + backend. + """ + if backend is None: + backend = default_hp_search_backend() + backend = HPSearchBackend(backend) + backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]() + backend_obj.ensure_available() + self.hp_search_backend = backend + if self.model_init is None: + raise RuntimeError( + "To use hyperparameter search, you need to pass your model through a model_init function." + ) + + self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space + self.hp_name = hp_name + self.compute_objective = default_compute_objective if compute_objective is None else compute_objective + + best_run = backend_obj.run(self, n_trials, direction, **kwargs) + + self.hp_search_backend = None + return best_run + + def log(self, logs: Dict[str, float]) -> None: + """ + Log `logs` on the various objects watching training. + + Subclass and override this method to inject custom behavior. + + Args: + logs (`Dict[str, float]`): + The values to log. + """ + if self.state.epoch is not None: + logs["epoch"] = round(self.state.epoch, 2) + if self.args.include_num_input_tokens_seen: + logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen + + output = {**logs, **{"step": self.state.global_step}} + self.state.log_history.append(output) + self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) + + def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: + """ + Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. + """ + if isinstance(data, Mapping): + return type(data)({k: self._prepare_input(v) for k, v in data.items()}) + elif isinstance(data, (tuple, list)): + return type(data)(self._prepare_input(v) for v in data) + elif isinstance(data, torch.Tensor): + kwargs = {"device": self.args.device} + if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)): + # NLP models inputs are int/uint and those get adjusted to the right dtype of the + # embedding. Other models such as wav2vec2's inputs are already float and thus + # may need special handling to match the dtypes of the model + kwargs.update({"dtype": self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()}) + return data.to(**kwargs) + return data + + def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: + """ + Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and + handling potential state. + """ + inputs = self._prepare_input(inputs) + if len(inputs) == 0: + raise ValueError( + "The batch received was empty, your model won't be able to train on it. Double-check that your " + f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." + ) + if self.args.past_index >= 0 and self._past is not None: + inputs["mems"] = self._past + + return inputs + + def compute_loss_context_manager(self): + """ + A helper wrapper to group together context managers. + """ + return self.autocast_smart_context_manager() + + def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): + """ + A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired + arguments, depending on the situation. + """ + if self.use_cpu_amp: + ctx_manager = torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + else: + ctx_manager = contextlib.nullcontext() + + return ctx_manager + + def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: + """ + Perform a training step on a batch of inputs. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to train. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + + Return: + `torch.Tensor`: The tensor with training loss on this batch. + """ + model.train() + inputs = self._prepare_inputs(inputs) + + if is_sagemaker_mp_enabled(): + loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) + return loss_mb.reduce_mean().detach().to(self.args.device) + + with self.compute_loss_context_manager(): + loss = self.compute_loss(model, inputs) + + if self.args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + + if self.use_apex: + with amp.scale_loss(loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + else: + self.accelerator.backward(loss) + + return loss.detach() / self.args.gradient_accumulation_steps + + def compute_loss(self, model, inputs, return_outputs=False): + """ + How the loss is computed by Trainer. By default, all models return the loss in the first element. + + Subclass and override for custom behavior. + """ + if self.label_smoother is not None and "labels" in inputs: + labels = inputs.pop("labels") + else: + labels = None + outputs = model(**inputs) + # Save past state if it exists + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index] + + if labels is not None: + unwrapped_model = unwrap_model(model) + if _is_peft_model(unwrapped_model): + model_name = unwrapped_model.base_model.model._get_name() + else: + model_name = unwrapped_model._get_name() + if model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): + loss = self.label_smoother(outputs, labels, shift_labels=True) + else: + loss = self.label_smoother(outputs, labels) + else: + if isinstance(outputs, dict) and "loss" not in outputs: + raise ValueError( + "The model did not return a loss from the inputs, only the following keys: " + f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." + ) + # We don't use .loss here since the model may return tuples instead of ModelOutput. + loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] + + return (loss, outputs) if return_outputs else loss + + def is_local_process_zero(self) -> bool: + """ + Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several + machines) main process. + """ + return self.args.local_process_index == 0 + + def is_world_process_zero(self) -> bool: + """ + Whether or not this process is the global main process (when training in a distributed fashion on several + machines, this is only going to be `True` for one process). + """ + # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global + # process index. + if is_sagemaker_mp_enabled(): + return smp.rank() == 0 + else: + return self.args.process_index == 0 + + def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): + """ + Will save the model, so you can reload it using `from_pretrained()`. + + Will only save from the main process. + """ + + if output_dir is None: + output_dir = self.args.output_dir + + if is_torch_xla_available(): + self._save_tpu(output_dir) + elif is_sagemaker_mp_enabled(): + # Calling the state_dict needs to be done on the wrapped model and on all processes. + os.makedirs(output_dir, exist_ok=True) + state_dict = self.model_wrapped.state_dict() + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + if IS_SAGEMAKER_MP_POST_1_10: + # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 + Path(os.path.join(output_dir, "user_content.pt")).touch() + elif self.is_fsdp_enabled: + if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and ( + version.parse(accelerate_version) > version.parse("0.24.1") + ): + state_dict = self.accelerator.get_state_dict(self.model) + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + elif self.is_deepspeed_enabled: + try: + state_dict = self.accelerator.get_state_dict(self.deepspeed) + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + except ValueError: + logger.warning( + " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" + " zero_to_fp32.py to recover weights" + ) + if self.args.should_save: + self._save(output_dir, state_dict={}) + # remove the dummy state_dict + remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) + self.model_wrapped.save_checkpoint(output_dir) + + elif self.args.should_save: + self._save(output_dir) + + # Push to the Hub when `save_model` is called by the user. + if self.args.push_to_hub and not _internal_call: + self.push_to_hub(commit_message="Model save") + + def _save_tpu(self, output_dir: Optional[str] = None): + output_dir = output_dir if output_dir is not None else self.args.output_dir + + logger.info(f"Saving model checkpoint to {output_dir}") + model = self.model + xm.mark_step() + model.to("cpu") + + if xm.is_master_ordinal(): + os.makedirs(output_dir, exist_ok=True) + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + supported_classes = (PushToHubMixin,) + xm.rendezvous("saving_checkpoint") + if not isinstance(model, supported_classes): + if isinstance(unwrap_model(model), supported_classes): + unwrap_model(model).save_pretrained( + output_dir, + is_main_process=self.args.should_save, + state_dict=model.state_dict(), + save_function=xm.save, + safe_serialization=self.args.save_safetensors, + ) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + state_dict = model.state_dict() + xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + model.save_pretrained( + output_dir, + is_main_process=self.args.should_save, + save_function=xm.save, + safe_serialization=self.args.save_safetensors, + ) + if self.tokenizer is not None and self.args.should_save: + self.tokenizer.save_pretrained(output_dir) + + # We moved the model from TPU -> CPU for saving the weights. + # Now we should move it back to subsequent compute still works. + model.to(self.args.device) + + def _save(self, output_dir: Optional[str] = None, state_dict=None): + # If we are executing this function, we are the process zero, so we don't check for that. + output_dir = output_dir if output_dir is not None else self.args.output_dir + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving model checkpoint to {output_dir}") + + supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + if not isinstance(self.model, supported_classes): + if state_dict is None: + state_dict = self.model.state_dict() + + if isinstance(unwrap_model(self.model), supported_classes): + unwrap_model(self.model).save_pretrained( + output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors + ) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + if self.args.save_safetensors: + safetensors.torch.save_file( + state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"} + ) + else: + torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + self.model.save_pretrained( + output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors + ) + + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + def store_flos(self): + # Storing the number of floating-point operations that went into the model + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + self.state.total_flos += ( + distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() + ) + self.current_flos = 0 + else: + self.state.total_flos += self.current_flos + self.current_flos = 0 + + def _sorted_checkpoints( + self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False + ) -> List[str]: + ordering_and_checkpoint_path = [] + + glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] + + for path in glob_checkpoints: + if use_mtime: + ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) + else: + regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) + if regex_match is not None and regex_match.groups() is not None: + ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) + + checkpoints_sorted = sorted(ordering_and_checkpoint_path) + checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] + # Make sure we don't delete the best model. + if ( + self.state.best_model_checkpoint is not None + and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted + ): + best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) + for i in range(best_model_index, len(checkpoints_sorted) - 2): + checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] + return checkpoints_sorted + + def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: + if self.args.save_total_limit is None or self.args.save_total_limit <= 0: + return + + # Check if we should delete older checkpoint(s) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) + if len(checkpoints_sorted) <= self.args.save_total_limit: + return + + # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which + # we don't do to allow resuming. + save_total_limit = self.args.save_total_limit + if ( + self.state.best_model_checkpoint is not None + and self.args.save_total_limit == 1 + and checkpoints_sorted[-1] != self.state.best_model_checkpoint + ): + save_total_limit = 2 + + number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) + checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] + for checkpoint in checkpoints_to_be_deleted: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint, ignore_errors=True) + + def evaluate( + self, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + + You can also subclass and override this method to inject custom behavior. + + Args: + eval_dataset (Union[`Dataset`, Dict[str, `Dataset`]), *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will + evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the + `__len__` method. + + + + If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run + separate evaluations on each dataset. This can be useful to monitor how training affects other + datasets or simply to get a more fine-grained evaluation. + When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one + of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets + `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the + loss on `data1` and `metric_for_best_model="eval_data1_loss"` for the loss on `data2`. + + + + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is "eval" (default) + + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + # handle multipe eval datasets + eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset + if isinstance(eval_dataset, dict): + metrics = {} + for eval_dataset_name, _eval_dataset in eval_dataset.items(): + dataset_metrics = self.evaluate( + eval_dataset=_eval_dataset, + ignore_keys=ignore_keys, + metric_key_prefix=f"{metric_key_prefix}_{eval_dataset_name}", + ) + metrics.update(dataset_metrics) + return metrics + + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + eval_dataloader = self.get_eval_dataloader(eval_dataset) + if self.is_fsdp_xla_v2_enabled: + eval_dataloader = tpu_spmd_dataloader(eval_dataloader) + + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + eval_dataloader, + description="Evaluation", + # No point gathering the predictions if there are no metrics, otherwise we defer to + # self.args.prediction_loss_only + prediction_loss_only=True if self.compute_metrics is None else None, + ignore_keys=ignore_keys, + metric_key_prefix=metric_key_prefix, + ) + + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.log(output.metrics) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + + self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) + + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return output.metrics + + def predict( + self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" + ) -> PredictionOutput: + """ + Run prediction and returns predictions and potential metrics. + + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"test"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "test_bleu" if the prefix is "test" (default) + + + + If your predictions or labels have different sequence length (for instance because you're doing dynamic padding + in a token classification task) the predictions will be padded (on the right) to allow for concatenation into + one array. The padding index is -100. + + + + Returns: *NamedTuple* A namedtuple with the following keys: + + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + test_dataloader = self.get_test_dataloader(test_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix + ) + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) + + def evaluation_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + + Works both with or without labels. + """ + args = self.args + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train, handle model prep here + if self.is_deepspeed_enabled and self.deepspeed is None: + _, _ = deepspeed_init(self, num_training_steps=0, inference=True) + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + if len(self.accelerator._models) == 0 and model is self.model: + model = ( + self.accelerator.prepare(model) + if self.is_deepspeed_enabled + else self.accelerator.prepare_model(model, evaluation_mode=True) + ) + + if self.is_fsdp_enabled: + self.model = model + + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + + # backward compatibility + if self.is_deepspeed_enabled: + self.deepspeed = self.model_wrapped + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = self.args.eval_batch_size + + logger.info(f"***** Running {description} *****") + if has_length(dataloader): + logger.info(f" Num examples = {self.num_examples(dataloader)}") + else: + logger.info(" Num examples: Unknown") + logger.info(f" Batch size = {batch_size}") + + model.eval() + + self.callback_handler.eval_dataloader = dataloader + # Do this before wrapping. + eval_dataset = getattr(dataloader, "dataset", None) + + if args.past_index >= 0: + self._past = None + + # Initialize containers + # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps) + losses_host = None + preds_host = None + labels_host = None + inputs_host = None + + # losses/preds/labels on CPU (final containers) + all_losses = None + all_preds = None + all_labels = None + all_inputs = None + # Will be useful when we have an iterable dataset so don't know its length. + + observed_num_examples = 0 + # Main evaluation loop + for step, inputs in enumerate(dataloader): + # Update the observed num examples + observed_batch_size = find_batch_size(inputs) + if observed_batch_size is not None: + observed_num_examples += observed_batch_size + # For batch samplers, batch_size is not known by the dataloader in advance. + if batch_size is None: + batch_size = observed_batch_size + + # Prediction step + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + main_input_name = getattr(self.model, "main_input_name", "input_ids") + inputs_decode = self._prepare_input(inputs[main_input_name]) if args.include_inputs_for_metrics else None + + if is_torch_xla_available(): + xm.mark_step() + + # Update containers on host + if loss is not None: + losses = self.gather_function((loss.repeat(batch_size))) + losses_host = losses if losses_host is None else nested_concat(losses_host, losses, padding_index=-100) + if labels is not None: + labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100) + if inputs_decode is not None: + inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100) + inputs_decode = self.gather_function((inputs_decode)) + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + if logits is not None: + logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100) + if self.preprocess_logits_for_metrics is not None: + logits = self.preprocess_logits_for_metrics(logits, labels) + logits = self.gather_function((logits)) + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + + if labels is not None: + labels = self.gather_function((labels)) + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode + if all_inputs is None + else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = ( + labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + ) + + # Set back to None to begin a new accumulation + losses_host, preds_host, inputs_host, labels_host = None, None, None, None + + # After all calls to `.gather_function`, reset to `gather_for_metrics`: + self.gather_function = self.accelerator.gather_for_metrics + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + + # Number of samples + if has_length(eval_dataset): + num_samples = len(eval_dataset) + # The instance check is weird and does not actually check for the type, but whether the dataset has the right + # methods. Therefore we need to make sure it also has the attribute. + elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: + num_samples = eval_dataset.num_examples + else: + if has_length(dataloader): + num_samples = self.num_examples(dataloader) + else: # both len(dataloader.dataset) and len(dataloader) fail + num_samples = observed_num_examples + if num_samples == 0 and observed_num_examples > 0: + num_samples = observed_num_examples + + # Metrics! + if self.compute_metrics is not None and all_preds is not None and all_labels is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if all_losses is not None: + metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() + if hasattr(self, "jit_compilation_time"): + metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) + + def _nested_gather(self, tensors, name=None): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_xla_available(): + if name is None: + name = "nested_gather" + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif (self.args.distributed_state is not None and self.args.distributed_state.distributed_type != "NO") or ( + self.args.distributed_state is None and self.args.local_rank != -1 + ): + tensors = distributed_concat(tensors) + return tensors + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + + Return: + Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, + logits and labels (each being optional). + """ + has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) + # For CLIP-like models capable of returning loss values. + # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss` + # is `True` in `model.forward`. + return_loss = inputs.get("return_loss", None) + if return_loss is None: + return_loss = self.can_return_loss + loss_without_labels = True if len(self.label_names) == 0 and return_loss else False + + inputs = self._prepare_inputs(inputs) + if ignore_keys is None: + if hasattr(self.model, "config"): + ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) + else: + ignore_keys = [] + + # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. + if has_labels or loss_without_labels: + labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) + if len(labels) == 1: + labels = labels[0] + else: + labels = None + + with torch.no_grad(): + if is_sagemaker_mp_enabled(): + raw_outputs = smp_forward_only(model, inputs) + if has_labels or loss_without_labels: + if isinstance(raw_outputs, dict): + loss_mb = raw_outputs["loss"] + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) + else: + loss_mb = raw_outputs[0] + logits_mb = raw_outputs[1:] + + loss = loss_mb.reduce_mean().detach().cpu() + logits = smp_nested_concat(logits_mb) + else: + loss = None + if isinstance(raw_outputs, dict): + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) + else: + logits_mb = raw_outputs + logits = smp_nested_concat(logits_mb) + else: + if has_labels or loss_without_labels: + with self.compute_loss_context_manager(): + loss, outputs = self.compute_loss(model, inputs, return_outputs=True) + loss = loss.mean().detach() + + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) + else: + logits = outputs[1:] + else: + loss = None + with self.compute_loss_context_manager(): + outputs = model(**inputs) + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) + else: + logits = outputs + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index - 1] + + if prediction_loss_only: + return (loss, None, None) + + logits = nested_detach(logits) + if len(logits) == 1: + logits = logits[0] + + return (loss, logits, labels) + + def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): + """ + For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point + operations for every backward + forward pass. If using another model, either implement such a method in the + model or subclass and override this method. + + Args: + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + Returns: + `int`: The number of floating-point operations. + """ + if hasattr(self.model, "floating_point_ops"): + return self.model.floating_point_ops(inputs) + else: + return 0 + + def init_hf_repo(self): + """ + Initializes a git repo in `self.args.hub_model_id`. + """ + # Only on process zero + if not self.is_world_process_zero(): + return + + if self.args.hub_model_id is None: + repo_name = Path(self.args.output_dir).absolute().name + else: + repo_name = self.args.hub_model_id + + repo_url = create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True) + self.hub_model_id = repo_url.repo_id + self.push_in_progress = None + + def create_model_card( + self, + language: Optional[str] = None, + license: Optional[str] = None, + tags: Union[str, List[str], None] = None, + model_name: Optional[str] = None, + finetuned_from: Optional[str] = None, + tasks: Union[str, List[str], None] = None, + dataset_tags: Union[str, List[str], None] = None, + dataset: Union[str, List[str], None] = None, + dataset_args: Union[str, List[str], None] = None, + ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + model_name (`str`, *optional*): + The name of the model. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ + if not self.is_world_process_zero(): + return + + model_card_filepath = os.path.join(self.args.output_dir, "README.md") + is_peft_library = False + if os.path.exists(model_card_filepath): + library_name = ModelCard.load(model_card_filepath).data.get("library_name") + is_peft_library = library_name == "peft" + + # Append existing tags in `tags` + existing_tags = ModelCard.load(model_card_filepath).data.tags + if tags is not None and existing_tags is not None: + if isinstance(tags, str): + tags = [tags] + for tag in existing_tags: + if tag not in tags: + tags.append(tag) + + training_summary = TrainingSummary.from_trainer( + self, + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + ) + model_card = training_summary.to_model_card() + with open(model_card_filepath, "w") as f: + f.write(model_card) + + if is_peft_library: + unwrap_model(self.model).create_or_update_model_card(self.args.output_dir) + + def _push_from_checkpoint(self, checkpoint_folder): + # Only push from one node. + if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: + return + # If we haven't finished the last push, we don't do this one unless args.hub_always_push=True. + if not self.args.hub_always_push and self.push_in_progress is not None and not self.push_in_progress.is_done(): + return + + output_dir = self.args.output_dir + # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder + modeling_files = [CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME] + if is_peft_available(): + modeling_files.extend([ADAPTER_CONFIG_NAME, ADAPTER_WEIGHTS_NAME, ADAPTER_SAFE_WEIGHTS_NAME]) + for modeling_file in modeling_files: + if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): + shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) + # Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure. + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + # Same for the training arguments + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + if self.args.save_strategy == IntervalStrategy.STEPS: + commit_message = f"Training in progress, step {self.state.global_step}" + else: + commit_message = f"Training in progress, epoch {int(self.state.epoch)}" + + model_push_job = upload_folder( + repo_id=self.hub_model_id, + folder_path=output_dir, + commit_message=commit_message, + token=self.args.hub_token, + run_as_future=True, + ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], + ) + + push_jobs = [model_push_job] + + if self.args.hub_strategy in [HubStrategy.CHECKPOINT, HubStrategy.ALL_CHECKPOINTS]: + path_in_repo = ( + "last-checkpoint" if self.args.hub_strategy == HubStrategy.CHECKPOINT else Path(checkpoint_folder).name + ) + checkpoint_push = upload_folder( + repo_id=self.hub_model_id, + folder_path=checkpoint_folder, + path_in_repo=path_in_repo, + commit_message=commit_message + ", checkpoint", + token=self.args.hub_token, + run_as_future=True, + ) + push_jobs.append(checkpoint_push) + + if self.push_in_progress is None or self.push_in_progress.is_done(): + self.push_in_progress = PushInProgress(push_jobs) + else: + self.push_in_progress.jobs.extend(push_jobs) + + def _finish_current_push(self): + if not hasattr(self, "push_in_progress"): + return + if self.push_in_progress is not None and not self.push_in_progress.is_done(): + logger.info("Waiting for the current checkpoint push to be finished, this might take a couple of minutes.") + self.push_in_progress.wait_until_done() + + def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str: + """ + Upload `self.model` and `self.tokenizer` to the 🤗 model hub on the repo `self.args.hub_model_id`. + + Parameters: + commit_message (`str`, *optional*, defaults to `"End of training"`): + Message to commit while pushing. + blocking (`bool`, *optional*, defaults to `True`): + Whether the function should return only when the `git push` has finished. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to [`~Trainer.create_model_card`]. + + Returns: + The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the + progress of the commit if `blocking=True`. + """ + model_name = kwargs.pop("model_name", None) + if model_name is None and self.args.should_save: + if self.args.hub_model_id is None: + model_name = Path(self.args.output_dir).name + else: + model_name = self.args.hub_model_id.split("/")[-1] + + # In case the user calls this method with args.push_to_hub = False + if self.hub_model_id is None: + self.init_hf_repo() + + # Needs to be executed on all processes for TPU training, but will only save on the processed determined by + # self.args.should_save. + self.save_model(_internal_call=True) + + # Only push from one node. + if not self.is_world_process_zero(): + return + + # Add additional tags in the case the model has already some tags and users pass + # "tags" argument to `push_to_hub` so that trainer automatically handles internal tags + # from all models since Trainer does not call `model.push_to_hub`. + if getattr(self.model, "model_tags", None) is not None: + if "tags" not in kwargs: + kwargs["tags"] = [] + + # If it is a string, convert it to a list + if isinstance(kwargs["tags"], str): + kwargs["tags"] = [kwargs["tags"]] + + for model_tag in self.model.model_tags: + if model_tag not in kwargs["tags"]: + kwargs["tags"].append(model_tag) + + self.create_model_card(model_name=model_name, **kwargs) + + # Wait for the current upload to be finished. + self._finish_current_push() + return upload_folder( + repo_id=self.hub_model_id, + folder_path=self.args.output_dir, + commit_message=commit_message, + token=self.args.hub_token, + run_as_future=not blocking, + ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], + ) + + # + # Deprecated code + # + + def prediction_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + + Works both with or without labels. + """ + args = self.args + + if not has_length(dataloader): + raise ValueError("dataloader must implement a working __len__") + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train, handle model prep here + if self.is_deepspeed_enabled and self.deepspeed is None: + _, _ = deepspeed_init(self, num_training_steps=0, inference=True) + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + if len(self.accelerator._models) == 0 and model is self.model: + model = ( + self.accelerator.prepare(model) + if self.is_deepspeed_enabled + else self.accelerator.prepare_model(model, evaluation_mode=True) + ) + + if self.is_fsdp_enabled: + self.model = model + + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + + # backward compatibility + if self.is_deepspeed_enabled: + self.deepspeed = self.model_wrapped + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = dataloader.batch_size + num_examples = self.num_examples(dataloader) + logger.info(f"***** Running {description} *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Batch size = {batch_size}") + losses_host: torch.Tensor = None + preds_host: Union[torch.Tensor, List[torch.Tensor]] = None + labels_host: Union[torch.Tensor, List[torch.Tensor]] = None + inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None + + world_size = max(1, args.world_size) + + eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) + if not prediction_loss_only: + # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass + # a batch size to the sampler) + make_multiple_of = None + if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): + make_multiple_of = dataloader.sampler.batch_size + preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + + model.eval() + + if args.past_index >= 0: + self._past = None + + self.callback_handler.eval_dataloader = dataloader + + for step, inputs in enumerate(dataloader): + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + main_input_name = getattr(self.model, "main_input_name", "input_ids") + inputs_decode = self._prepare_input(inputs[main_input_name]) if args.include_inputs_for_metrics else None + + if loss is not None: + losses = loss.repeat(batch_size) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if logits is not None: + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + if labels is not None: + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + # Set back to None to begin a new accumulation + losses_host, preds_host, labels_host, inputs_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + eval_loss = eval_losses_gatherer.finalize() + preds = preds_gatherer.finalize() if not prediction_loss_only else None + label_ids = labels_gatherer.finalize() if not prediction_loss_only else None + inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None + + if self.compute_metrics is not None and preds is not None and label_ids is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if eval_loss is not None: + metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) + + def _gather_and_numpify(self, tensors, name): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_xla_available(): + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: + tensors = distributed_concat(tensors) + + return nested_numpify(tensors) + + def _add_sm_patterns_to_gitignore(self) -> None: + """Add SageMaker Checkpointing patterns to .gitignore file.""" + # Make sure we only do this on the main process + if not self.is_world_process_zero(): + return + + patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] + + # Get current .gitignore content + if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): + with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f: + current_content = f.read() + else: + current_content = "" + + # Add the patterns to .gitignore + content = current_content + for pattern in patterns: + if pattern not in content: + if content.endswith("\n"): + content += pattern + else: + content += f"\n{pattern}" + + # Write the .gitignore file if it has changed + if content != current_content: + with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: + logger.debug(f"Writing .gitignore file. Content: {content}") + f.write(content) + + self.repo.git_add(".gitignore") + + # avoid race condition with git status + time.sleep(0.5) + + if not self.repo.is_repo_clean(): + self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") + self.repo.git_push() + + def create_accelerator_and_postprocess(self): + grad_acc_kwargs = {"num_steps": self.args.gradient_accumulation_steps} + grad_acc_kwargs["sync_with_dataloader"] = False + gradient_accumulation_plugin = GradientAccumulationPlugin(**grad_acc_kwargs) + + # create accelerator object + self.accelerator = Accelerator( + deepspeed_plugin=self.args.deepspeed_plugin, + gradient_accumulation_plugin=gradient_accumulation_plugin, + **self.args.accelerator_config.to_dict(), + ) + # some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag + self.gather_function = self.accelerator.gather_for_metrics + + # deepspeed and accelerate flags covering both trainer args and accelerate launcher + self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None + self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None + + # post accelerator creation setup + if self.is_fsdp_enabled: + fsdp_plugin = self.accelerator.state.fsdp_plugin + fsdp_plugin.limit_all_gathers = self.args.fsdp_config.get( + "limit_all_gathers", fsdp_plugin.limit_all_gathers + ) + if is_accelerate_available("0.23.0"): + fsdp_plugin.activation_checkpointing = self.args.fsdp_config.get( + "activation_checkpointing", fsdp_plugin.activation_checkpointing + ) + if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing: + raise ValueError( + "The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg " + "can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic " + "when using FSDP." + ) + + if self.is_deepspeed_enabled and getattr(self.args, "hf_deepspeed_config", None) is None: + self.propagate_args_to_deepspeed() + + # `save_only_model` can't be used with DeepSpeed/FSDP along with `load_best_model_at_end` + if ( + self.args.save_only_model + and (self.is_deepspeed_enabled or self.is_fsdp_enabled) + and self.args.load_best_model_at_end + ): + wrapper = "DeepSpeed" if self.is_deepspeed_enabled else "FSDP" + raise ValueError(f"{wrapper} can't be used with `save_only_model` along with `load_best_model_at_end`.") + + # `auto_find_batch_size` isn't yet supported with DeepSpeed/FSDP + if (self.is_deepspeed_enabled or self.is_fsdp_enabled) and self.args.auto_find_batch_size: + wrapper = "DeepSpeed" if self.is_deepspeed_enabled else "FSDP" + raise NotImplementedError(f"`{wrapper}` doesn't support `auto_find_batch_size`.") + + def propagate_args_to_deepspeed(self, auto_find_batch_size=False): + """ + Sets values in the deepspeed plugin based on the Trainer args + """ + from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig + + ds_plugin = self.accelerator.state.deepspeed_plugin + + ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config) + ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config + ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size) + + def _fsdp_qlora_plugin_updates(self): + if self.is_fsdp_enabled and _is_peft_model(self.model): + from peft import LoraConfig + from peft.utils.other import fsdp_auto_wrap_policy + + if isinstance(self.model.active_peft_config, LoraConfig): + fsdp_plugin = self.accelerator.state.fsdp_plugin + fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(self.model) + if ( + getattr(self.model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES + and self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage.is_floating_point + and version.parse(accelerate_version) > version.parse("0.27.0") + ): + fsdp_plugin.set_mixed_precision( + self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage, override=True + ) diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/trainer_seq2seq.py b/env-llmeval/lib/python3.10/site-packages/transformers/trainer_seq2seq.py new file mode 100644 index 0000000000000000000000000000000000000000..b6bce1b57d5e2a6524bf878a3b7930e8ead44b0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/trainer_seq2seq.py @@ -0,0 +1,367 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from copy import deepcopy +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.utils.data import Dataset + +from .generation.configuration_utils import GenerationConfig +from .integrations.deepspeed import is_deepspeed_zero3_enabled +from .trainer import Trainer +from .utils import logging + + +if TYPE_CHECKING: + from .data.data_collator import DataCollator + from .modeling_utils import PreTrainedModel + from .tokenization_utils_base import PreTrainedTokenizerBase + from .trainer_callback import TrainerCallback + from .trainer_utils import EvalPrediction, PredictionOutput + from .training_args import TrainingArguments + + +logger = logging.get_logger(__name__) + + +class Seq2SeqTrainer(Trainer): + def __init__( + self, + model: Union["PreTrainedModel", nn.Module] = None, + args: "TrainingArguments" = None, + data_collator: Optional["DataCollator"] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + tokenizer: Optional["PreTrainedTokenizerBase"] = None, + model_init: Optional[Callable[[], "PreTrainedModel"]] = None, + compute_metrics: Optional[Callable[["EvalPrediction"], Dict]] = None, + callbacks: Optional[List["TrainerCallback"]] = None, + optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), + preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + ): + super().__init__( + model=model, + args=args, + data_collator=data_collator, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + tokenizer=tokenizer, + model_init=model_init, + compute_metrics=compute_metrics, + callbacks=callbacks, + optimizers=optimizers, + preprocess_logits_for_metrics=preprocess_logits_for_metrics, + ) + + # Override self.model.generation_config if a GenerationConfig is specified in args. + # Priority: args.generation_config > model.generation_config > default GenerationConfig. + if self.args.generation_config is not None: + gen_config = self.load_generation_config(self.args.generation_config) + self.model.generation_config = gen_config + + @staticmethod + def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig: + """ + Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments. + + Args: + gen_config_arg (`str` or [`~generation.GenerationConfig`]): + `Seq2SeqTrainingArguments.generation_config` argument. + + Returns: + A `~generation.GenerationConfig`. + """ + + # GenerationConfig provided, nothing to do + if isinstance(gen_config_arg, GenerationConfig): + gen_config = deepcopy(gen_config_arg) + else: + # str or Path + pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg + config_file_name = None + + # Figuring if it is path pointing to a file, pointing to a directory or else a model id or URL + # This step is required in order to determine config_file_name + if pretrained_model_name.is_file(): + config_file_name = pretrained_model_name.name + pretrained_model_name = pretrained_model_name.parent + # dir path + elif pretrained_model_name.is_dir(): + pass + # model id or URL + else: + pretrained_model_name = gen_config_arg + + gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name) + + # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws + # an exception if there are warnings at validation time. + try: + with warnings.catch_warnings(record=True) as caught_warnings: + gen_config.validate() + if len(caught_warnings) > 0: + raise ValueError(str([w.message for w in caught_warnings])) + except ValueError as exc: + raise ValueError( + "The loaded generation config instance is invalid -- `GenerationConfig.validate()` throws warnings " + "and/or exceptions. Fix these issues to train your model.\n\nThrown during validation:\n" + str(exc) + ) + return gen_config + + def evaluate( + self, + eval_dataset: Optional[Dataset] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + **gen_kwargs, + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + + You can also subclass and override this method to inject custom behavior. + + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is `"eval"` (default) + max_length (`int`, *optional*): + The maximum target length to use when predicting with the generate method. + num_beams (`int`, *optional*): + Number of beams for beam search that will be used when predicting with the generate method. 1 means no + beam search. + gen_kwargs: + Additional `generate` specific kwargs. + + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + + gen_kwargs = gen_kwargs.copy() + + # Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the + # training args + if ( + gen_kwargs.get("max_length") is None + and gen_kwargs.get("max_new_tokens") is None + and self.args.generation_max_length is not None + ): + gen_kwargs["max_length"] = self.args.generation_max_length + if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: + gen_kwargs["num_beams"] = self.args.generation_num_beams + # We don't want to drop samples in general + self.gather_function = self.accelerator.gather + self._gen_kwargs = gen_kwargs + return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) + + def predict( + self, + test_dataset: Dataset, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "test", + **gen_kwargs, + ) -> "PredictionOutput": + """ + Run prediction and returns predictions and potential metrics. + + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is `"eval"` (default) + max_length (`int`, *optional*): + The maximum target length to use when predicting with the generate method. + num_beams (`int`, *optional*): + Number of beams for beam search that will be used when predicting with the generate method. 1 means no + beam search. + gen_kwargs: + Additional `generate` specific kwargs. + + + + If your predictions or labels have different sequence lengths (for instance because you're doing dynamic + padding in a token classification task) the predictions will be padded (on the right) to allow for + concatenation into one array. The padding index is -100. + + + + Returns: *NamedTuple* A namedtuple with the following keys: + + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + + gen_kwargs = gen_kwargs.copy() + + # Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the + # training args + if ( + gen_kwargs.get("max_length") is None + and gen_kwargs.get("max_new_tokens") is None + and self.args.generation_max_length is not None + ): + gen_kwargs["max_length"] = self.args.generation_max_length + if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: + gen_kwargs["num_beams"] = self.args.generation_num_beams + self.gather_function = self.accelerator.gather + self._gen_kwargs = gen_kwargs + + return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + **gen_kwargs, + ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + gen_kwargs: + Additional `generate` specific kwargs. + + Return: + Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and + labels (each being optional). + """ + + if not self.args.predict_with_generate or prediction_loss_only: + return super().prediction_step( + model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys + ) + + has_labels = "labels" in inputs + inputs = self._prepare_inputs(inputs) + + # Priority (handled in generate): + # non-`None` gen_kwargs > model.generation_config > default GenerationConfig() + if len(gen_kwargs) == 0 and hasattr(self, "_gen_kwargs"): + gen_kwargs = self._gen_kwargs.copy() + if "num_beams" in gen_kwargs and gen_kwargs["num_beams"] is None: + gen_kwargs.pop("num_beams") + if "max_length" in gen_kwargs and gen_kwargs["max_length"] is None: + gen_kwargs.pop("max_length") + + default_synced_gpus = True if is_deepspeed_zero3_enabled() else False + gen_kwargs["synced_gpus"] = ( + gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus + ) + + generation_inputs = inputs.copy() + # If the `decoder_input_ids` was created from `labels`, evict the former, so that the model can freely generate + # (otherwise, it would continue generating from the padded `decoder_input_ids`) + if ( + "labels" in generation_inputs + and "decoder_input_ids" in generation_inputs + and generation_inputs["labels"].shape == generation_inputs["decoder_input_ids"].shape + ): + generation_inputs = { + k: v for k, v in inputs.items() if k not in ("decoder_input_ids", "decoder_attention_mask") + } + generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs) + + # Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop + # TODO: remove this hack when the legacy code that initializes generation_config from a model config is + # removed in https://github.com/huggingface/transformers/blob/98d88b23f54e5a23e741833f1e973fdf600cc2c5/src/transformers/generation/utils.py#L1183 + if self.model.generation_config._from_model_config: + self.model.generation_config._from_model_config = False + + # Retrieves GenerationConfig from model.generation_config + gen_config = self.model.generation_config + # in case the batch is shorter than max length, the output should be padded + if generated_tokens.shape[-1] < gen_config.max_length: + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length) + elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1: + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1) + + with torch.no_grad(): + if has_labels: + with self.compute_loss_context_manager(): + outputs = model(**inputs) + if self.label_smoother is not None: + loss = self.label_smoother(outputs, inputs["labels"]).mean().detach() + else: + loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach() + else: + loss = None + + if self.args.prediction_loss_only: + return loss, None, None + + if has_labels: + labels = inputs["labels"] + if labels.shape[-1] < gen_config.max_length: + labels = self._pad_tensors_to_max_len(labels, gen_config.max_length) + elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1: + labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1) + else: + labels = None + + return loss, generated_tokens, labels + + def _pad_tensors_to_max_len(self, tensor, max_length): + if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"): + # If PAD token is not defined at least EOS token has to be defined + pad_token_id = ( + self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id + ) + else: + if self.model.config.pad_token_id is not None: + pad_token_id = self.model.config.pad_token_id + else: + raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors") + + padded_tensor = pad_token_id * torch.ones( + (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device + ) + padded_tensor[:, : tensor.shape[-1]] = tensor + return padded_tensor diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/trainer_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/trainer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0faf657387ba99eb5955ad5e6a4d3d8fd5c0c190 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/trainer_utils.py @@ -0,0 +1,826 @@ +# coding=utf-8 +# Copyright 2020-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PyTorch-independent utilities for the Trainer class. +""" + +import copy +import functools +import gc +import inspect +import os +import random +import re +import threading +import time +from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union + +import numpy as np + +from .utils import ( + ExplicitEnum, + is_psutil_available, + is_tf_available, + is_torch_available, + is_torch_cuda_available, + is_torch_mps_available, + is_torch_npu_available, + is_torch_xla_available, + is_torch_xpu_available, + requires_backends, +) + + +if is_torch_available(): + import torch + + +def seed_worker(_): + """ + Helper function to set worker seed during Dataloader initialization. + """ + worker_seed = torch.initial_seed() % 2**32 + set_seed(worker_seed) + + +def enable_full_determinism(seed: int, warn_only: bool = False): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow + """ + # set seed first + set_seed(seed) + + if is_torch_available(): + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True, warn_only=warn_only) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + if is_tf_available(): + import tensorflow as tf + + tf.config.experimental.enable_op_determinism() + + +def set_seed(seed: int): + """ + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). + + Args: + seed (`int`): The seed to set. + """ + random.seed(seed) + np.random.seed(seed) + if is_torch_available(): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if is_torch_npu_available(): + torch.npu.manual_seed_all(seed) + if is_torch_xpu_available(): + torch.xpu.manual_seed_all(seed) + if is_tf_available(): + import tensorflow as tf + + tf.random.set_seed(seed) + + +def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding + layers. This method is slightly adapted from the original source code that can be found here: + https://github.com/neelsjain/NEFTune Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to + the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + + +class EvalPrediction: + """ + Evaluation output (always contains labels), to be used to compute metrics. + + Parameters: + predictions (`np.ndarray`): Predictions of the model. + label_ids (`np.ndarray`): Targets to be matched. + inputs (`np.ndarray`, *optional*): + """ + + def __init__( + self, + predictions: Union[np.ndarray, Tuple[np.ndarray]], + label_ids: Union[np.ndarray, Tuple[np.ndarray]], + inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None, + ): + self.predictions = predictions + self.label_ids = label_ids + self.inputs = inputs + + def __iter__(self): + if self.inputs is not None: + return iter((self.predictions, self.label_ids, self.inputs)) + else: + return iter((self.predictions, self.label_ids)) + + def __getitem__(self, idx): + if idx < 0 or idx > 2: + raise IndexError("tuple index out of range") + if idx == 2 and self.inputs is None: + raise IndexError("tuple index out of range") + if idx == 0: + return self.predictions + elif idx == 1: + return self.label_ids + elif idx == 2: + return self.inputs + + +class EvalLoopOutput(NamedTuple): + predictions: Union[np.ndarray, Tuple[np.ndarray]] + label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] + metrics: Optional[Dict[str, float]] + num_samples: Optional[int] + + +class PredictionOutput(NamedTuple): + predictions: Union[np.ndarray, Tuple[np.ndarray]] + label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] + metrics: Optional[Dict[str, float]] + + +class TrainOutput(NamedTuple): + global_step: int + training_loss: float + metrics: Dict[str, float] + + +PREFIX_CHECKPOINT_DIR = "checkpoint" +_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") + + +def get_last_checkpoint(folder): + content = os.listdir(folder) + checkpoints = [ + path + for path in content + if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) + ] + if len(checkpoints) == 0: + return + return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) + + +class IntervalStrategy(ExplicitEnum): + NO = "no" + STEPS = "steps" + EPOCH = "epoch" + + +class EvaluationStrategy(ExplicitEnum): + NO = "no" + STEPS = "steps" + EPOCH = "epoch" + + +class HubStrategy(ExplicitEnum): + END = "end" + EVERY_SAVE = "every_save" + CHECKPOINT = "checkpoint" + ALL_CHECKPOINTS = "all_checkpoints" + + +class BestRun(NamedTuple): + """ + The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). + + Parameters: + run_id (`str`): + The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending + with run-{run_id}). + objective (`float`): + The objective that was obtained for this run. + hyperparameters (`Dict[str, Any]`): + The hyperparameters picked to get this run. + run_summary (`Optional[Any]`): + A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. + """ + + run_id: str + objective: Union[float, List[float]] + hyperparameters: Dict[str, Any] + run_summary: Optional[Any] = None + + +def default_compute_objective(metrics: Dict[str, float]) -> float: + """ + The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no + metrics are provided to the [`Trainer`], the sum of all metrics otherwise. + + Args: + metrics (`Dict[str, float]`): The metrics returned by the evaluate method. + + Return: + `float`: The objective to minimize or maximize + """ + metrics = copy.deepcopy(metrics) + loss = metrics.pop("eval_loss", None) + _ = metrics.pop("epoch", None) + # Remove speed metrics + speed_metrics = [ + m + for m in metrics.keys() + if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time") + ] + for sm in speed_metrics: + _ = metrics.pop(sm, None) + return loss if len(metrics) == 0 else sum(metrics.values()) + + +def default_hp_space_optuna(trial) -> Dict[str, float]: + from .integrations import is_optuna_available + + assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" + return { + "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), + "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), + "seed": trial.suggest_int("seed", 1, 40), + "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), + } + + +def default_hp_space_ray(trial) -> Dict[str, float]: + from .integrations import is_ray_tune_available + + assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" + from ray import tune + + return { + "learning_rate": tune.loguniform(1e-6, 1e-4), + "num_train_epochs": tune.choice(list(range(1, 6))), + "seed": tune.uniform(1, 40), + "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), + } + + +def default_hp_space_sigopt(trial): + return [ + {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, + {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, + {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, + { + "categorical_values": ["4", "8", "16", "32", "64"], + "name": "per_device_train_batch_size", + "type": "categorical", + }, + ] + + +def default_hp_space_wandb(trial) -> Dict[str, float]: + from .integrations import is_wandb_available + + if not is_wandb_available(): + raise ImportError("This function needs wandb installed: `pip install wandb`") + + return { + "method": "random", + "metric": {"name": "objective", "goal": "minimize"}, + "parameters": { + "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, + "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, + "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, + "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, + }, + } + + +class HPSearchBackend(ExplicitEnum): + OPTUNA = "optuna" + RAY = "ray" + SIGOPT = "sigopt" + WANDB = "wandb" + + +def is_main_process(local_rank): + """ + Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on + `local_rank`. + """ + if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + return xm.get_ordinal() == 0 + return local_rank in [-1, 0] + + +def total_processes_number(local_rank): + """ + Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. + """ + if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + return xm.xrt_world_size() + elif local_rank != -1 and is_torch_available(): + import torch + + return torch.distributed.get_world_size() + return 1 + + +def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None): + """ + Measure and return speed performance metrics. + + This function requires a time snapshot `start_time` before the operation to be measured starts and this function + should be run immediately after the operation to be measured has completed. + + Args: + - split: name to prefix metric (like train, eval, test...) + - start_time: operation start time + - num_samples: number of samples processed + - num_steps: number of steps processed + - num_tokens: number of tokens processed + """ + runtime = time.time() - start_time + result = {f"{split}_runtime": round(runtime, 4)} + if runtime == 0: + return result + if num_samples is not None: + samples_per_second = num_samples / runtime + result[f"{split}_samples_per_second"] = round(samples_per_second, 3) + if num_steps is not None: + steps_per_second = num_steps / runtime + result[f"{split}_steps_per_second"] = round(steps_per_second, 3) + if num_tokens is not None: + tokens_per_second = num_tokens / runtime + result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3) + return result + + +class SchedulerType(ExplicitEnum): + LINEAR = "linear" + COSINE = "cosine" + COSINE_WITH_RESTARTS = "cosine_with_restarts" + POLYNOMIAL = "polynomial" + CONSTANT = "constant" + CONSTANT_WITH_WARMUP = "constant_with_warmup" + INVERSE_SQRT = "inverse_sqrt" + REDUCE_ON_PLATEAU = "reduce_lr_on_plateau" + + +class TrainerMemoryTracker: + """ + A helper class that tracks cpu and gpu memory. + + This class will silently skip unless `psutil` is available. Install with `pip install psutil`. + + When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. + + Example : + + ```python + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) + self._memory_tracker.start() + # code ... + metrics = {"train_runtime": 10.5} + self._memory_tracker.stop_and_update_metrics(metrics) + ``` + + At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`. + + To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. + """ + + # map trainer methods to metrics prefix + stages = { + "__init__": "init", + "train": "train", + "_inner_training_loop": "train", + "evaluate": "eval", + "predict": "test", + } + + def __init__(self, skip_memory_metrics=False): + self.skip_memory_metrics = skip_memory_metrics + + if not is_psutil_available(): + # soft dependency on psutil + self.skip_memory_metrics = True + + if self.skip_memory_metrics: + return + + import psutil # noqa + + if is_torch_cuda_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_mps_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_xpu_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_npu_available(): + import torch + + self.torch = torch + self.gpu = {} + else: + self.torch = None + + self.process = psutil.Process() + + self.cur_stage = None + self.cpu = {} + self.init_reported = False + + def derive_stage(self): + """derives the stage/caller name automatically""" + caller = inspect.currentframe().f_back.f_back.f_code.co_name + if caller in self.stages: + return self.stages[caller] + else: + raise ValueError( + f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" + ) + + def cpu_mem_used(self): + """get resident set size memory for the current process""" + return self.process.memory_info().rss + + def peak_monitor_func(self): + self.cpu_mem_used_peak = -1 + + while True: + self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) + + # can't sleep or will not catch the peak right (this comment is here on purpose) + # time.sleep(0.001) # 1msec + + if not self.peak_monitoring: + break + + def start(self): + """start tracking for the caller's stage""" + if self.skip_memory_metrics: + return + + stage = self.derive_stage() + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + self.cur_stage = stage + + gc.collect() + + if self.torch is not None: + if torch.cuda.is_available(): + self.torch.cuda.reset_peak_memory_stats() + self.torch.cuda.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.reset_peak_memory_stats() + self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.reset_peak_memory_stats() + self.torch.npu.empty_cache() + elif is_torch_mps_available(): + self.torch.mps.empty_cache() + + # gpu + if self.torch is not None: + if torch.cuda.is_available(): + self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() + elif is_torch_mps_available(): + self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory() + + # cpu + self.cpu_mem_used_at_start = self.cpu_mem_used() + + self.peak_monitoring = True + peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) + peak_monitor_thread.daemon = True + peak_monitor_thread.start() + + def stop(self, stage): + """stop tracking for the passed stage""" + + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + # this sends a signal to peak_monitor_func to complete its loop + self.peak_monitoring = False + + # first ensure all objects get collected and their memory is freed + gc.collect() + + if self.torch is not None: + if torch.cuda.is_available(): + self.torch.cuda.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.empty_cache() + elif is_torch_mps_available(): + self.torch.mps.empty_cache() + + # concepts: + # - alloc_delta: the difference of allocated memory between the end and the start + # - peaked_delta: the difference between the peak memory and the current memory + # in order to know how much memory the measured code consumed one needs to sum these two + + # gpu + if self.torch is not None: + if torch.cuda.is_available(): + self.gpu_mem_used_now = self.torch.cuda.memory_allocated() + self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_now = self.torch.xpu.memory_allocated() + self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_now = self.torch.npu.memory_allocated() + self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() + elif is_torch_mps_available(): + self.gpu_mem_used_now = self.torch.mps.current_allocated_memory() + # self.torch.mps.max_memory_allocated() does not exist yet + self.gpu_mem_used_peak = None + + else: + raise ValueError("No available GPU device found!") + + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + } + if self.gpu_mem_used_peak is not None: + self.gpu[self.cur_stage]["peaked"] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now) + else: + self.gpu[self.cur_stage]["peaked"] = "Not available" + + # cpu + self.cpu_mem_used_now = self.cpu_mem_used() + self.cpu[self.cur_stage] = { + "begin": self.cpu_mem_used_at_start, + "end": self.cpu_mem_used_now, + "alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start), + "peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), + } + + # reset - cycle finished + self.cur_stage = None + + def update_metrics(self, stage, metrics): + """updates the metrics""" + if self.skip_memory_metrics: + return + + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + # since we don't have a way to return init metrics, we push them into the first of train/val/predict + stages = [stage] + if not self.init_reported: + stages.insert(0, "init") + self.init_reported = True + + for stage in stages: + for t in ["alloc", "peaked"]: + if stage in self.cpu and t in self.cpu[stage]: + metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] + if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: + metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] + # if we need additional debug info, enable the following + # for t in ["begin", "end"]: + # if stage in self.cpu and t in self.cpu[stage]: + # metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t] + # if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: + # metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t] + + # since memory can be allocated before init, and it might be difficult to track overall + # memory usage, in particular for GPU, let's report memory usage at the point init was called + if stages[0] == "init": + metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] + if self.torch is not None: + metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] + # if we also wanted to report any additional memory allocations in between init and + # whatever the next stage was we could also report this: + # if self.cpu["init"]["end"] != self.cpu[stage]["begin"]: + # metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"] + # if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]: + # metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"] + + def stop_and_update_metrics(self, metrics=None): + """combine stop and metrics update in one call for simpler code""" + if self.skip_memory_metrics: + return + + stage = self.derive_stage() + self.stop(stage) + + # init doesn't have metrics to update so we just save that data for later stages to retrieve + if metrics is not None: + self.update_metrics(stage, metrics) + + +def has_length(dataset): + """ + Checks if the dataset implements __len__() and it doesn't raise an error + """ + try: + return len(dataset) is not None + except TypeError: + # TypeError: len() of unsized object + return False + + +def denumpify_detensorize(metrics): + """ + Recursively calls `.item()` on the element of the dictionary passed + """ + if isinstance(metrics, (list, tuple)): + return type(metrics)(denumpify_detensorize(m) for m in metrics) + elif isinstance(metrics, dict): + return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) + elif isinstance(metrics, np.generic): + return metrics.item() + elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: + return metrics.item() + return metrics + + +def number_of_arguments(func): + """ + Return the number of arguments of the passed function, even if it's a partial function. + """ + if isinstance(func, functools.partial): + total_args = len(inspect.signature(func.func).parameters) + return total_args - len(func.args) - len(func.keywords) + return len(inspect.signature(func).parameters) + + +def find_executable_batch_size( + function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False +): + """ + Args: + A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or + CUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as + its first argument. + function (`callable`, *optional*) + A function to wrap + starting_batch_size (`int`, *optional*) + The batch size to try and fit into memory + auto_find_batch_size (`bool`, *optional*) + If False, will just execute `function` + """ + if function is None: + return functools.partial( + find_executable_batch_size, + starting_batch_size=starting_batch_size, + auto_find_batch_size=auto_find_batch_size, + ) + + if auto_find_batch_size: + requires_backends(find_executable_batch_size, "accelerate") + from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size + + return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) + + return functools.partial(function, batch_size=starting_batch_size) + + +class FSDPOption(ExplicitEnum): + FULL_SHARD = "full_shard" + SHARD_GRAD_OP = "shard_grad_op" + NO_SHARD = "no_shard" + HYBRID_SHARD = "hybrid_shard" + HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" + OFFLOAD = "offload" + AUTO_WRAP = "auto_wrap" + + +class RemoveColumnsCollator: + """Wrap the data collator to remove unused columns before they are passed to the collator.""" + + def __init__( + self, + data_collator, + signature_columns, + logger=None, + model_name: Optional[str] = None, + description: Optional[str] = None, + ): + self.data_collator = data_collator + self.signature_columns = signature_columns + self.logger = logger + self.description = description + self.model_name = model_name + self.message_logged = False + + def _remove_columns(self, feature: dict) -> dict: + if not isinstance(feature, dict): + return feature + if not self.message_logged and self.logger and self.model_name: + ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if self.description is None else f"in the {self.description} set" + self.logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " + " you can safely ignore this message." + ) + self.message_logged = True + return {k: v for k, v in feature.items() if k in self.signature_columns} + + def __call__(self, features: List[dict]): + features = [self._remove_columns(feature) for feature in features] + return self.data_collator(features) + + +def check_target_module_exists(optim_target_modules, key: str, return_is_regex: bool = False): + """A helper method to check if the passed module's key name matches any of the target modules in the optim_target_modules. + + Args: + optim_target_modules (`Union[str, List[str]]`): + A list of strings to try to match. Can be also a full string. + key (`str`): + A key to search any matches in optim_target_modules + return_is_regex (`bool`): + If set to `True`, the method will return whether the passed `optim_target_modules` + is a regex or not. + + Returns: + `bool` : True of match object if key matches any target modules from config, False or + None if no match found + `bool` : If the matched target module is a regex to silence out the warnings in Trainer + for extra modules being found (only if `target_module_found=True` for an array of regex). + """ + target_module_found = False + is_regex = False + + if isinstance(optim_target_modules, str): + target_module_found = bool(re.fullmatch(optim_target_modules, key)) + is_regex = True if not optim_target_modules == key else False + elif key in optim_target_modules: # from here, target_module_found must be a list of str + # this module is specified directly in target_modules + target_module_found = True + elif any(target_key in key for target_key in optim_target_modules): + target_module_found = True + elif any(bool(re.fullmatch(optim_target_module, key)) for optim_target_module in optim_target_modules): + target_module_found = True + is_regex = True + + if return_is_regex: + return target_module_found, is_regex + + return target_module_found diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/training_args_seq2seq.py b/env-llmeval/lib/python3.10/site-packages/transformers/training_args_seq2seq.py new file mode 100644 index 0000000000000000000000000000000000000000..88ae662570abefe5c6908d1b51510932264a6067 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/transformers/training_args_seq2seq.py @@ -0,0 +1,96 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional, Union + +from .generation.configuration_utils import GenerationConfig +from .training_args import TrainingArguments +from .utils import add_start_docstrings + + +logger = logging.getLogger(__name__) + + +@dataclass +@add_start_docstrings(TrainingArguments.__doc__) +class Seq2SeqTrainingArguments(TrainingArguments): + """ + Args: + sortish_sampler (`bool`, *optional*, defaults to `False`): + Whether to use a *sortish sampler* or not. Only possible if the underlying datasets are *Seq2SeqDataset* + for now but will become generally available in the near future. + + It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness + for the training set. + predict_with_generate (`bool`, *optional*, defaults to `False`): + Whether to use generate to calculate generative metrics (ROUGE, BLEU). + generation_max_length (`int`, *optional*): + The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the + `max_length` value of the model configuration. + generation_num_beams (`int`, *optional*): + The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the + `num_beams` value of the model configuration. + generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*): + Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. + - a [`~generation.GenerationConfig`] object. + """ + + sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) + predict_with_generate: bool = field( + default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} + ) + generation_max_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " + "to the `max_length` value of the model configuration." + ) + }, + ) + generation_num_beams: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " + "to the `num_beams` value of the model configuration." + ) + }, + ) + generation_config: Optional[Union[str, Path, GenerationConfig]] = field( + default=None, + metadata={ + "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." + }, + ) + + def to_dict(self): + """ + Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON + serialization support). It obfuscates the token values by removing their value. + """ + # filter out fields that are defined as field(init=False) + d = super().to_dict() + for k, v in d.items(): + if isinstance(v, GenerationConfig): + d[k] = v.to_dict() + return d