diff --git a/llmeval-env/lib/python3.10/site-packages/absl/__init__.py b/llmeval-env/lib/python3.10/site-packages/absl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bd1cd51810385ca0e5e9fed3fb9a804febf27e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/absl/app.py b/llmeval-env/lib/python3.10/site-packages/absl/app.py new file mode 100644 index 0000000000000000000000000000000000000000..d12397b31a9093dd0f9a8622c72a16d31fcb4fa9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/app.py @@ -0,0 +1,480 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generic entry point for Abseil Python applications. + +To use this module, define a ``main`` function with a single ``argv`` argument +and call ``app.run(main)``. For example:: + + def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + if __name__ == '__main__': + app.run(main) +""" + +import collections +import errno +import os +import pdb +import sys +import textwrap +import traceback + +from absl import command_name +from absl import flags +from absl import logging + +try: + import faulthandler +except ImportError: + faulthandler = None + +FLAGS = flags.FLAGS + +flags.DEFINE_boolean('run_with_pdb', False, 'Set to true for PDB debug mode') +flags.DEFINE_boolean('pdb_post_mortem', False, + 'Set to true to handle uncaught exceptions with PDB ' + 'post mortem.') +flags.DEFINE_alias('pdb', 'pdb_post_mortem') +flags.DEFINE_boolean('run_with_profiling', False, + 'Set to true for profiling the script. ' + 'Execution will be slower, and the output format might ' + 'change over time.') +flags.DEFINE_string('profile_file', None, + 'Dump profile information to a file (for python -m ' + 'pstats). Implies --run_with_profiling.') +flags.DEFINE_boolean('use_cprofile_for_profiling', True, + 'Use cProfile instead of the profile module for ' + 'profiling. This has no effect unless ' + '--run_with_profiling is set.') +flags.DEFINE_boolean('only_check_args', False, + 'Set to true to validate args and exit.', + allow_hide_cpp=True) + + +# If main() exits via an abnormal exception, call into these +# handlers before exiting. +EXCEPTION_HANDLERS = [] + + +class Error(Exception): + pass + + +class UsageError(Error): + """Exception raised when the arguments supplied by the user are invalid. + + Raise this when the arguments supplied are invalid from the point of + view of the application. For example when two mutually exclusive + flags have been supplied or when there are not enough non-flag + arguments. It is distinct from flags.Error which covers the lower + level of parsing and validating individual flags. + """ + + def __init__(self, message, exitcode=1): + super(UsageError, self).__init__(message) + self.exitcode = exitcode + + +class HelpFlag(flags.BooleanFlag): + """Special boolean flag that displays usage and raises SystemExit.""" + NAME = 'help' + SHORT_NAME = '?' + + def __init__(self): + super(HelpFlag, self).__init__( + self.NAME, False, 'show this help', + short_name=self.SHORT_NAME, allow_hide_cpp=True) + + def parse(self, arg): + if self._parse(arg): + usage(shorthelp=True, writeto_stdout=True) + # Advertise --helpfull on stdout, since usage() was on stdout. + print() + print('Try --helpfull to get a list of all flags.') + sys.exit(1) + + +class HelpshortFlag(HelpFlag): + """--helpshort is an alias for --help.""" + NAME = 'helpshort' + SHORT_NAME = None + + +class HelpfullFlag(flags.BooleanFlag): + """Display help for flags in the main module and all dependent modules.""" + + def __init__(self): + super(HelpfullFlag, self).__init__( + 'helpfull', False, 'show full help', allow_hide_cpp=True) + + def parse(self, arg): + if self._parse(arg): + usage(writeto_stdout=True) + sys.exit(1) + + +class HelpXMLFlag(flags.BooleanFlag): + """Similar to HelpfullFlag, but generates output in XML format.""" + + def __init__(self): + super(HelpXMLFlag, self).__init__( + 'helpxml', False, 'like --helpfull, but generates XML output', + allow_hide_cpp=True) + + def parse(self, arg): + if self._parse(arg): + flags.FLAGS.write_help_in_xml_format(sys.stdout) + sys.exit(1) + + +def parse_flags_with_usage(args): + """Tries to parse the flags, print usage, and exit if unparsable. + + Args: + args: [str], a non-empty list of the command line arguments including + program name. + + Returns: + [str], a non-empty list of remaining command line arguments after parsing + flags, including program name. + """ + try: + return FLAGS(args) + except flags.Error as error: + message = str(error) + if '\n' in message: + final_message = 'FATAL Flags parsing error:\n%s\n' % textwrap.indent( + message, ' ') + else: + final_message = 'FATAL Flags parsing error: %s\n' % message + sys.stderr.write(final_message) + sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n') + sys.exit(1) + + +_define_help_flags_called = False + + +def define_help_flags(): + """Registers help flags. Idempotent.""" + # Use a global to ensure idempotence. + global _define_help_flags_called + + if not _define_help_flags_called: + flags.DEFINE_flag(HelpFlag()) + flags.DEFINE_flag(HelpshortFlag()) # alias for --help + flags.DEFINE_flag(HelpfullFlag()) + flags.DEFINE_flag(HelpXMLFlag()) + _define_help_flags_called = True + + +def _register_and_parse_flags_with_usage( + argv=None, + flags_parser=parse_flags_with_usage, +): + """Registers help flags, parses arguments and shows usage if appropriate. + + This also calls sys.exit(0) if flag --only_check_args is True. + + Args: + argv: [str], a non-empty list of the command line arguments including + program name, sys.argv is used if None. + flags_parser: Callable[[List[Text]], Any], the function used to parse flags. + The return value of this function is passed to `main` untouched. + It must guarantee FLAGS is parsed after this function is called. + + Returns: + The return value of `flags_parser`. When using the default `flags_parser`, + it returns the following: + [str], a non-empty list of remaining command line arguments after parsing + flags, including program name. + + Raises: + Error: Raised when flags_parser is called, but FLAGS is not parsed. + SystemError: Raised when it's called more than once. + """ + if _register_and_parse_flags_with_usage.done: + raise SystemError('Flag registration can be done only once.') + + define_help_flags() + + original_argv = sys.argv if argv is None else argv + args_to_main = flags_parser(original_argv) + if not FLAGS.is_parsed(): + raise Error('FLAGS must be parsed after flags_parser is called.') + + # Exit when told so. + if FLAGS.only_check_args: + sys.exit(0) + # Immediately after flags are parsed, bump verbosity to INFO if the flag has + # not been set. + if FLAGS['verbosity'].using_default_value: + FLAGS.verbosity = 0 + _register_and_parse_flags_with_usage.done = True + + return args_to_main + +_register_and_parse_flags_with_usage.done = False + + +def _run_main(main, argv): + """Calls main, optionally with pdb or profiler.""" + if FLAGS.run_with_pdb: + sys.exit(pdb.runcall(main, argv)) + elif FLAGS.run_with_profiling or FLAGS.profile_file: + # Avoid import overhead since most apps (including performance-sensitive + # ones) won't be run with profiling. + # pylint: disable=g-import-not-at-top + import atexit + if FLAGS.use_cprofile_for_profiling: + import cProfile as profile + else: + import profile + profiler = profile.Profile() + if FLAGS.profile_file: + atexit.register(profiler.dump_stats, FLAGS.profile_file) + else: + atexit.register(profiler.print_stats) + sys.exit(profiler.runcall(main, argv)) + else: + sys.exit(main(argv)) + + +def _call_exception_handlers(exception): + """Calls any installed exception handlers.""" + for handler in EXCEPTION_HANDLERS: + try: + if handler.wants(exception): + handler.handle(exception) + except: # pylint: disable=bare-except + try: + # We don't want to stop for exceptions in the exception handlers but + # we shouldn't hide them either. + logging.error(traceback.format_exc()) + except: # pylint: disable=bare-except + # In case even the logging statement fails, ignore. + pass + + +def run( + main, + argv=None, + flags_parser=parse_flags_with_usage, +): + """Begins executing the program. + + Args: + main: The main function to execute. It takes an single argument "argv", + which is a list of command line arguments with parsed flags removed. + The return value is passed to `sys.exit`, and so for example + a return value of 0 or None results in a successful termination, whereas + a return value of 1 results in abnormal termination. + For more details, see https://docs.python.org/3/library/sys#sys.exit + argv: A non-empty list of the command line arguments including program name, + sys.argv is used if None. + flags_parser: Callable[[List[Text]], Any], the function used to parse flags. + The return value of this function is passed to `main` untouched. + It must guarantee FLAGS is parsed after this function is called. + Should be passed as a keyword-only arg which will become mandatory in a + future release. + - Parses command line flags with the flag module. + - If there are any errors, prints usage(). + - Calls main() with the remaining arguments. + - If main() raises a UsageError, prints usage and the error message. + """ + try: + args = _run_init( + sys.argv if argv is None else argv, + flags_parser, + ) + while _init_callbacks: + callback = _init_callbacks.popleft() + callback() + try: + _run_main(main, args) + except UsageError as error: + usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode) + except: + exc = sys.exc_info()[1] + # Don't try to post-mortem debug successful SystemExits, since those + # mean there wasn't actually an error. In particular, the test framework + # raises SystemExit(False) even if all tests passed. + if isinstance(exc, SystemExit) and not exc.code: + raise + + # Check the tty so that we don't hang waiting for input in an + # non-interactive scenario. + if FLAGS.pdb_post_mortem and sys.stdout.isatty(): + traceback.print_exc() + print() + print(' *** Entering post-mortem debugging ***') + print() + pdb.post_mortem() + raise + except Exception as e: + _call_exception_handlers(e) + raise + +# Callbacks which have been deferred until after _run_init has been called. +_init_callbacks = collections.deque() + + +def call_after_init(callback): + """Calls the given callback only once ABSL has finished initialization. + + If ABSL has already finished initialization when ``call_after_init`` is + called then the callback is executed immediately, otherwise `callback` is + stored to be executed after ``app.run`` has finished initializing (aka. just + before the main function is called). + + If called after ``app.run``, this is equivalent to calling ``callback()`` in + the caller thread. If called before ``app.run``, callbacks are run + sequentially (in an undefined order) in the same thread as ``app.run``. + + Args: + callback: a callable to be called once ABSL has finished initialization. + This may be immediate if initialization has already finished. It + takes no arguments and returns nothing. + """ + if _run_init.done: + callback() + else: + _init_callbacks.append(callback) + + +def _run_init( + argv, + flags_parser, +): + """Does one-time initialization and re-parses flags on rerun.""" + if _run_init.done: + return flags_parser(argv) + command_name.make_process_name_useful() + # Set up absl logging handler. + logging.use_absl_handler() + args = _register_and_parse_flags_with_usage( + argv=argv, + flags_parser=flags_parser, + ) + if faulthandler: + try: + faulthandler.enable() + except Exception: # pylint: disable=broad-except + # Some tests verify stderr output very closely, so don't print anything. + # Disabled faulthandler is a low-impact error. + pass + _run_init.done = True + return args + + +_run_init.done = False + + +def usage(shorthelp=False, writeto_stdout=False, detailed_error=None, + exitcode=None): + """Writes __main__'s docstring to stderr with some help text. + + Args: + shorthelp: bool, if True, prints only flags from the main module, + rather than all flags. + writeto_stdout: bool, if True, writes help message to stdout, + rather than to stderr. + detailed_error: str, additional detail about why usage info was presented. + exitcode: optional integer, if set, exits with this status code after + writing help. + """ + if writeto_stdout: + stdfile = sys.stdout + else: + stdfile = sys.stderr + + doc = sys.modules['__main__'].__doc__ + if not doc: + doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] + doc = flags.text_wrap(doc, indent=' ', firstline_indent='') + else: + # Replace all '%s' with sys.argv[0], and all '%%' with '%'. + num_specifiers = doc.count('%') - 2 * doc.count('%%') + try: + doc %= (sys.argv[0],) * num_specifiers + except (OverflowError, TypeError, ValueError): + # Just display the docstring as-is. + pass + if shorthelp: + flag_str = FLAGS.main_module_help() + else: + flag_str = FLAGS.get_help() + try: + stdfile.write(doc) + if flag_str: + stdfile.write('\nflags:\n') + stdfile.write(flag_str) + stdfile.write('\n') + if detailed_error is not None: + stdfile.write('\n%s\n' % detailed_error) + except IOError as e: + # We avoid printing a huge backtrace if we get EPIPE, because + # "foo.par --help | less" is a frequent use case. + if e.errno != errno.EPIPE: + raise + if exitcode is not None: + sys.exit(exitcode) + + +class ExceptionHandler(object): + """Base exception handler from which other may inherit.""" + + def wants(self, exc): + """Returns whether this handler wants to handle the exception or not. + + This base class returns True for all exceptions by default. Override in + subclass if it wants to be more selective. + + Args: + exc: Exception, the current exception. + """ + del exc # Unused. + return True + + def handle(self, exc): + """Do something with the current exception. + + Args: + exc: Exception, the current exception + + This method must be overridden. + """ + raise NotImplementedError() + + +def install_exception_handler(handler): + """Installs an exception handler. + + Args: + handler: ExceptionHandler, the exception handler to install. + + Raises: + TypeError: Raised when the handler was not of the correct type. + + All installed exception handlers will be called if main() exits via + an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt, + FlagsError or UsageError. + """ + if not isinstance(handler, ExceptionHandler): + raise TypeError('handler of type %s does not inherit from ExceptionHandler' + % type(handler)) + EXCEPTION_HANDLERS.append(handler) diff --git a/llmeval-env/lib/python3.10/site-packages/absl/app.pyi b/llmeval-env/lib/python3.10/site-packages/absl/app.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fe5e44809915f3dbd56b23207781a2219d86f842 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/app.pyi @@ -0,0 +1,99 @@ + +from typing import Any, Callable, Collection, Iterable, List, NoReturn, Optional, Text, TypeVar, Union, overload + +from absl.flags import _flag + + +_MainArgs = TypeVar('_MainArgs') +_Exc = TypeVar('_Exc', bound=Exception) + + +class ExceptionHandler(): + + def wants(self, exc: _Exc) -> bool: + ... + + def handle(self, exc: _Exc): + ... + + +EXCEPTION_HANDLERS: List[ExceptionHandler] = ... + + +class HelpFlag(_flag.BooleanFlag): + def __init__(self): + ... + + +class HelpshortFlag(HelpFlag): + ... + + +class HelpfullFlag(_flag.BooleanFlag): + def __init__(self): + ... + + +class HelpXMLFlag(_flag.BooleanFlag): + def __init__(self): + ... + + +def define_help_flags() -> None: + ... + + +@overload +def usage(shorthelp: Union[bool, int] = ..., + writeto_stdout: Union[bool, int] = ..., + detailed_error: Optional[Any] = ..., + exitcode: None = ...) -> None: + ... + + +@overload +def usage(shorthelp: Union[bool, int] = ..., + writeto_stdout: Union[bool, int] = ..., + detailed_error: Optional[Any] = ..., + exitcode: int = ...) -> NoReturn: + ... + + +def install_exception_handler(handler: ExceptionHandler) -> None: + ... + + +class Error(Exception): + ... + + +class UsageError(Error): + exitcode: int + + +def parse_flags_with_usage(args: List[Text]) -> List[Text]: + ... + + +def call_after_init(callback: Callable[[], Any]) -> None: + ... + + +# Without the flag_parser argument, `main` should require a List[Text]. +@overload +def run( + main: Callable[[List[Text]], Any], + argv: Optional[List[Text]] = ..., + *, +) -> NoReturn: + ... + + +@overload +def run( + main: Callable[[_MainArgs], Any], + argv: Optional[List[Text]] = ..., + *, + flags_parser: Callable[[List[Text]], _MainArgs], +) -> NoReturn: + ... diff --git a/llmeval-env/lib/python3.10/site-packages/absl/command_name.py b/llmeval-env/lib/python3.10/site-packages/absl/command_name.py new file mode 100644 index 0000000000000000000000000000000000000000..9260fee9bd853ba33b2139b3d47b73e59c127f36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/command_name.py @@ -0,0 +1,63 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A tiny stand alone library to change the kernel process name on Linux.""" + +import os +import sys + +# This library must be kept small and stand alone. It is used by small things +# that require no extension modules. + + +def make_process_name_useful(): + """Sets the process name to something better than 'python' if possible.""" + set_kernel_process_name(os.path.basename(sys.argv[0])) + + +def set_kernel_process_name(name): + """Changes the Kernel's /proc/self/status process name on Linux. + + The kernel name is NOT what will be shown by the ps or top command. + It is a 15 character string stored in the kernel's process table that + is included in the kernel log when a process is OOM killed. + The first 15 bytes of name are used. Non-ASCII unicode is replaced with '?'. + + Does nothing if /proc/self/comm cannot be written or prctl() fails. + + Args: + name: bytes|unicode, the Linux kernel's command name to set. + """ + if not isinstance(name, bytes): + name = name.encode('ascii', 'replace') + try: + # This is preferred to using ctypes to try and call prctl() when possible. + with open('/proc/self/comm', 'wb') as proc_comm: + proc_comm.write(name[:15]) + except EnvironmentError: + try: + import ctypes # pylint: disable=g-import-not-at-top + except ImportError: + return # No ctypes. + try: + libc = ctypes.CDLL('libc.so.6') + except EnvironmentError: + return # No libc.so.6. + pr_set_name = ctypes.c_ulong(15) # linux/prctl.h PR_SET_NAME value. + zero = ctypes.c_ulong(0) + try: + libc.prctl(pr_set_name, name, zero, zero, zero) + # Ignore the prctl return value. Nothing we can do if it errored. + except AttributeError: + return # No prctl. diff --git a/llmeval-env/lib/python3.10/site-packages/absl/logging/__init__.py b/llmeval-env/lib/python3.10/site-packages/absl/logging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..42166cd892f99c34f637dd661576897196a357f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/logging/__init__.py @@ -0,0 +1,1281 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abseil Python logging module implemented on top of standard logging. + +Simple usage:: + + from absl import logging + + logging.info('Interesting Stuff') + logging.info('Interesting Stuff with Arguments: %d', 42) + + logging.set_verbosity(logging.INFO) + logging.log(logging.DEBUG, 'This will *not* be printed') + logging.set_verbosity(logging.DEBUG) + logging.log(logging.DEBUG, 'This will be printed') + + logging.warning('Worrying Stuff') + logging.error('Alarming Stuff') + logging.fatal('AAAAHHHHH!!!!') # Process exits. + +Usage note: Do not pre-format the strings in your program code. +Instead, let the logging module perform argument interpolation. +This saves cycles because strings that don't need to be printed +are never formatted. Note that this module does not attempt to +interpolate arguments when no arguments are given. In other words:: + + logging.info('Interesting Stuff: %s') + +does not raise an exception because logging.info() has only one +argument, the message string. + +"Lazy" evaluation for debugging +------------------------------- + +If you do something like this:: + + logging.debug('Thing: %s', thing.ExpensiveOp()) + +then the ExpensiveOp will be evaluated even if nothing +is printed to the log. To avoid this, use the level_debug() function:: + + if logging.level_debug(): + logging.debug('Thing: %s', thing.ExpensiveOp()) + +Per file level logging is supported by logging.vlog() and +logging.vlog_is_on(). For example:: + + if logging.vlog_is_on(2): + logging.vlog(2, very_expensive_debug_message()) + +Notes on Unicode +---------------- + +The log output is encoded as UTF-8. Don't pass data in other encodings in +bytes() instances -- instead pass unicode string instances when you need to +(for both the format string and arguments). + +Note on critical and fatal: +Standard logging module defines fatal as an alias to critical, but it's not +documented, and it does NOT actually terminate the program. +This module only defines fatal but not critical, and it DOES terminate the +program. + +The differences in behavior are historical and unfortunate. +""" + +import collections +from collections import abc +import getpass +import io +import itertools +import logging +import os +import socket +import struct +import sys +import tempfile +import threading +import time +import timeit +import traceback +import types +import warnings + +from absl import flags +from absl.logging import converter + +# pylint: disable=g-import-not-at-top +try: + from typing import NoReturn +except ImportError: + pass + +# pylint: enable=g-import-not-at-top + +FLAGS = flags.FLAGS + + +# Logging levels. +FATAL = converter.ABSL_FATAL +ERROR = converter.ABSL_ERROR +WARNING = converter.ABSL_WARNING +WARN = converter.ABSL_WARNING # Deprecated name. +INFO = converter.ABSL_INFO +DEBUG = converter.ABSL_DEBUG + +# Regex to match/parse log line prefixes. +ABSL_LOGGING_PREFIX_REGEX = ( + r'^(?P[IWEF])' + r'(?P\d\d)(?P\d\d) ' + r'(?P\d\d):(?P\d\d):(?P\d\d)' + r'\.(?P\d\d\d\d\d\d) +' + r'(?P-?\d+) ' + r'(?P[a-zA-Z<][\w._<>-]+):(?P\d+)') + + +# Mask to convert integer thread ids to unsigned quantities for logging purposes +_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1 + +# Extra property set on the LogRecord created by ABSLLogger when its level is +# CRITICAL/FATAL. +_ABSL_LOG_FATAL = '_absl_log_fatal' +# Extra prefix added to the log message when a non-absl logger logs a +# CRITICAL/FATAL message. +_CRITICAL_PREFIX = 'CRITICAL - ' + +# Used by findCaller to skip callers from */logging/__init__.py. +_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.') + +# The ABSL logger instance, initialized in _initialize(). +_absl_logger = None +# The ABSL handler instance, initialized in _initialize(). +_absl_handler = None + + +_CPP_NAME_TO_LEVELS = { + 'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here. + 'info': '0', + 'warning': '1', + 'warn': '1', + 'error': '2', + 'fatal': '3' +} + +_CPP_LEVEL_TO_NAMES = { + '0': 'info', + '1': 'warning', + '2': 'error', + '3': 'fatal', +} + + +class _VerbosityFlag(flags.Flag): + """Flag class for -v/--verbosity.""" + + def __init__(self, *args, **kwargs): + super(_VerbosityFlag, self).__init__( + flags.IntegerParser(), + flags.ArgumentSerializer(), + *args, **kwargs) + + @property + def value(self): + return self._value + + @value.setter + def value(self, v): + self._value = v + self._update_logging_levels() + + def _update_logging_levels(self): + """Updates absl logging levels to the current verbosity. + + Visibility: module-private + """ + if not _absl_logger: + return + + if self._value <= converter.ABSL_DEBUG: + standard_verbosity = converter.absl_to_standard(self._value) + else: + # --verbosity is set to higher than 1 for vlog. + standard_verbosity = logging.DEBUG - (self._value - 1) + + # Also update root level when absl_handler is used. + if _absl_handler in logging.root.handlers: + # Make absl logger inherit from the root logger. absl logger might have + # a non-NOTSET value if logging.set_verbosity() is called at import time. + _absl_logger.setLevel(logging.NOTSET) + logging.root.setLevel(standard_verbosity) + else: + _absl_logger.setLevel(standard_verbosity) + + +class _LoggerLevelsFlag(flags.Flag): + """Flag class for --logger_levels.""" + + def __init__(self, *args, **kwargs): + super(_LoggerLevelsFlag, self).__init__( + _LoggerLevelsParser(), + _LoggerLevelsSerializer(), + *args, **kwargs) + + @property + def value(self): + # For lack of an immutable type, be defensive and return a copy. + # Modifications to the dict aren't supported and won't have any affect. + # While Py3 could use MappingProxyType, that isn't deepcopy friendly, so + # just return a copy. + return self._value.copy() + + @value.setter + def value(self, v): + self._value = {} if v is None else v + self._update_logger_levels() + + def _update_logger_levels(self): + # Visibility: module-private. + # This is called by absl.app.run() during initialization. + for name, level in self._value.items(): + logging.getLogger(name).setLevel(level) + + +class _LoggerLevelsParser(flags.ArgumentParser): + """Parser for --logger_levels flag.""" + + def parse(self, value): + if isinstance(value, abc.Mapping): + return value + + pairs = [pair.strip() for pair in value.split(',') if pair.strip()] + + # Preserve the order so that serialization is deterministic. + levels = collections.OrderedDict() + for name_level in pairs: + name, level = name_level.split(':', 1) + name = name.strip() + level = level.strip() + levels[name] = level + return levels + + +class _LoggerLevelsSerializer(object): + """Serializer for --logger_levels flag.""" + + def serialize(self, value): + if isinstance(value, str): + return value + return ','.join( + '{}:{}'.format(name, level) for name, level in value.items()) + + +class _StderrthresholdFlag(flags.Flag): + """Flag class for --stderrthreshold.""" + + def __init__(self, *args, **kwargs): + super(_StderrthresholdFlag, self).__init__( + flags.ArgumentParser(), + flags.ArgumentSerializer(), + *args, **kwargs) + + @property + def value(self): + return self._value + + @value.setter + def value(self, v): + if v in _CPP_LEVEL_TO_NAMES: + # --stderrthreshold also accepts numeric strings whose values are + # Abseil C++ log levels. + cpp_value = int(v) + v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings. + elif v.lower() in _CPP_NAME_TO_LEVELS: + v = v.lower() + if v == 'warn': + v = 'warning' # Use 'warning' as the canonical name. + cpp_value = int(_CPP_NAME_TO_LEVELS[v]) + else: + raise ValueError( + '--stderrthreshold must be one of (case-insensitive) ' + "'debug', 'info', 'warning', 'error', 'fatal', " + "or '0', '1', '2', '3', not '%s'" % v) + + self._value = v + + +LOGTOSTDERR = flags.DEFINE_boolean( + 'logtostderr', + False, + 'Should only log to stderr?', + allow_override_cpp=True, +) +ALSOLOGTOSTDERR = flags.DEFINE_boolean( + 'alsologtostderr', + False, + 'also log to stderr?', + allow_override_cpp=True, +) +LOG_DIR = flags.DEFINE_string( + 'log_dir', + os.getenv('TEST_TMPDIR', ''), + 'directory to write logfiles into', + allow_override_cpp=True, +) +VERBOSITY = flags.DEFINE_flag( + _VerbosityFlag( + 'verbosity', + -1, + ( + 'Logging verbosity level. Messages logged at this level or lower' + ' will be included. Set to 1 for debug logging. If the flag was not' + ' set or supplied, the value will be changed from the default of -1' + ' (warning) to 0 (info) after flags are parsed.' + ), + short_name='v', + allow_hide_cpp=True, + ) +) +LOGGER_LEVELS = flags.DEFINE_flag( + _LoggerLevelsFlag( + 'logger_levels', + {}, + ( + 'Specify log level of loggers. The format is a CSV list of ' + '`name:level`. Where `name` is the logger name used with ' + '`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, ' + 'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`' + ), + ) +) +STDERRTHRESHOLD = flags.DEFINE_flag( + _StderrthresholdFlag( + 'stderrthreshold', + 'fatal', + ( + 'log messages at this level, or more severe, to stderr in ' + 'addition to the logfile. Possible values are ' + "'debug', 'info', 'warning', 'error', and 'fatal'. " + 'Obsoletes --alsologtostderr. Using --alsologtostderr ' + 'cancels the effect of this flag. Please also note that ' + 'this flag is subject to --verbosity and requires logfile ' + 'not be stderr.' + ), + allow_hide_cpp=True, + ) +) +SHOWPREFIXFORINFO = flags.DEFINE_boolean( + 'showprefixforinfo', + True, + ( + 'If False, do not prepend prefix to info messages ' + "when it's logged to stderr, " + '--verbosity is set to INFO level, ' + 'and python logging is used.' + ), +) + + +def get_verbosity(): + """Returns the logging verbosity.""" + return FLAGS['verbosity'].value + + +def set_verbosity(v): + """Sets the logging verbosity. + + Causes all messages of level <= v to be logged, + and all messages of level > v to be silently discarded. + + Args: + v: int|str, the verbosity level as an integer or string. Legal string values + are those that can be coerced to an integer as well as case-insensitive + 'debug', 'info', 'warning', 'error', and 'fatal'. + """ + try: + new_level = int(v) + except ValueError: + new_level = converter.ABSL_NAMES[v.upper()] + FLAGS.verbosity = new_level + + +def set_stderrthreshold(s): + """Sets the stderr threshold to the value passed in. + + Args: + s: str|int, valid strings values are case-insensitive 'debug', + 'info', 'warning', 'error', and 'fatal'; valid integer values are + logging.DEBUG|INFO|WARNING|ERROR|FATAL. + + Raises: + ValueError: Raised when s is an invalid value. + """ + if s in converter.ABSL_LEVELS: + FLAGS.stderrthreshold = converter.ABSL_LEVELS[s] + elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES: + FLAGS.stderrthreshold = s + else: + raise ValueError( + 'set_stderrthreshold only accepts integer absl logging level ' + 'from -3 to 1, or case-insensitive string values ' + "'debug', 'info', 'warning', 'error', and 'fatal'. " + 'But found "{}" ({}).'.format(s, type(s))) + + +def fatal(msg, *args, **kwargs): + # type: (Any, Any, Any) -> NoReturn + """Logs a fatal message.""" + log(FATAL, msg, *args, **kwargs) + + +def error(msg, *args, **kwargs): + """Logs an error message.""" + log(ERROR, msg, *args, **kwargs) + + +def warning(msg, *args, **kwargs): + """Logs a warning message.""" + log(WARNING, msg, *args, **kwargs) + + +def warn(msg, *args, **kwargs): + """Deprecated, use 'warning' instead.""" + warnings.warn("The 'warn' function is deprecated, use 'warning' instead", + DeprecationWarning, 2) + log(WARNING, msg, *args, **kwargs) + + +def info(msg, *args, **kwargs): + """Logs an info message.""" + log(INFO, msg, *args, **kwargs) + + +def debug(msg, *args, **kwargs): + """Logs a debug message.""" + log(DEBUG, msg, *args, **kwargs) + + +def exception(msg, *args, exc_info=True, **kwargs): + """Logs an exception, with traceback and message.""" + error(msg, *args, exc_info=exc_info, **kwargs) + + +# Counter to keep track of number of log entries per token. +_log_counter_per_token = {} + + +def _get_next_log_count_per_token(token): + """Wrapper for _log_counter_per_token. Thread-safe. + + Args: + token: The token for which to look up the count. + + Returns: + The number of times this function has been called with + *token* as an argument (starting at 0). + """ + # Can't use a defaultdict because defaultdict isn't atomic, whereas + # setdefault is. + return next(_log_counter_per_token.setdefault(token, itertools.count())) + + +def log_every_n(level, msg, n, *args): + """Logs ``msg % args`` at level 'level' once per 'n' times. + + Logs the 1st call, (N+1)st call, (2N+1)st call, etc. + Not threadsafe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n: int, the number of times this should be called before it is logged. + *args: The args to be substituted into the msg. + """ + count = _get_next_log_count_per_token(get_absl_logger().findCaller()) + log_if(level, msg, not (count % n), *args) + + +# Keeps track of the last log time of the given token. +# Note: must be a dict since set/get is atomic in CPython. +# Note: entries are never released as their number is expected to be low. +_log_timer_per_token = {} + + +def _seconds_have_elapsed(token, num_seconds): + """Tests if 'num_seconds' have passed since 'token' was requested. + + Not strictly thread-safe - may log with the wrong frequency if called + concurrently from multiple threads. Accuracy depends on resolution of + 'timeit.default_timer()'. + + Always returns True on the first call for a given 'token'. + + Args: + token: The token for which to look up the count. + num_seconds: The number of seconds to test for. + + Returns: + Whether it has been >= 'num_seconds' since 'token' was last requested. + """ + now = timeit.default_timer() + then = _log_timer_per_token.get(token, None) + if then is None or (now - then) >= num_seconds: + _log_timer_per_token[token] = now + return True + else: + return False + + +def log_every_n_seconds(level, msg, n_seconds, *args): + """Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call. + + Logs the first call, logs subsequent calls if 'n' seconds have elapsed since + the last logging call from the same call site (file + line). Not thread-safe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n_seconds: float or int, seconds which should elapse before logging again. + *args: The args to be substituted into the msg. + """ + should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds) + log_if(level, msg, should_log, *args) + + +def log_first_n(level, msg, n, *args): + """Logs ``msg % args`` at level ``level`` only first ``n`` times. + + Not threadsafe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n: int, the maximal number of times the message is logged. + *args: The args to be substituted into the msg. + """ + count = _get_next_log_count_per_token(get_absl_logger().findCaller()) + log_if(level, msg, count < n, *args) + + +def log_if(level, msg, condition, *args): + """Logs ``msg % args`` at level ``level`` only if condition is fulfilled.""" + if condition: + log(level, msg, *args) + + +def log(level, msg, *args, **kwargs): + """Logs ``msg % args`` at absl logging level ``level``. + + If no args are given just print msg, ignoring any interpolation specifiers. + + Args: + level: int, the absl logging level at which to log the message + (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging + level constants are also supported, callers should prefer explicit + logging.vlog() calls for such purpose. + + msg: str, the message to be logged. + *args: The args to be substituted into the msg. + **kwargs: May contain exc_info to add exception traceback to message. + """ + if level > converter.ABSL_DEBUG: + # Even though this function supports level that is greater than 1, users + # should use logging.vlog instead for such cases. + # Treat this as vlog, 1 is equivalent to DEBUG. + standard_level = converter.STANDARD_DEBUG - (level - 1) + else: + if level < converter.ABSL_FATAL: + level = converter.ABSL_FATAL + standard_level = converter.absl_to_standard(level) + + # Match standard logging's behavior. Before use_absl_handler() and + # logging is configured, there is no handler attached on _absl_logger nor + # logging.root. So logs go no where. + if not logging.root.handlers: + logging.basicConfig() + + _absl_logger.log(standard_level, msg, *args, **kwargs) + + +def vlog(level, msg, *args, **kwargs): + """Log ``msg % args`` at C++ vlog level ``level``. + + Args: + level: int, the C++ verbose logging level at which to log the message, + e.g. 1, 2, 3, 4... While absl level constants are also supported, + callers should prefer logging.log|debug|info|... calls for such purpose. + msg: str, the message to be logged. + *args: The args to be substituted into the msg. + **kwargs: May contain exc_info to add exception traceback to message. + """ + log(level, msg, *args, **kwargs) + + +def vlog_is_on(level): + """Checks if vlog is enabled for the given level in caller's source file. + + Args: + level: int, the C++ verbose logging level at which to log the message, + e.g. 1, 2, 3, 4... While absl level constants are also supported, + callers should prefer level_debug|level_info|... calls for + checking those. + + Returns: + True if logging is turned on for that level. + """ + + if level > converter.ABSL_DEBUG: + # Even though this function supports level that is greater than 1, users + # should use logging.vlog instead for such cases. + # Treat this as vlog, 1 is equivalent to DEBUG. + standard_level = converter.STANDARD_DEBUG - (level - 1) + else: + if level < converter.ABSL_FATAL: + level = converter.ABSL_FATAL + standard_level = converter.absl_to_standard(level) + return _absl_logger.isEnabledFor(standard_level) + + +def flush(): + """Flushes all log files.""" + get_absl_handler().flush() + + +def level_debug(): + """Returns True if debug logging is turned on.""" + return get_verbosity() >= DEBUG + + +def level_info(): + """Returns True if info logging is turned on.""" + return get_verbosity() >= INFO + + +def level_warning(): + """Returns True if warning logging is turned on.""" + return get_verbosity() >= WARNING + + +level_warn = level_warning # Deprecated function. + + +def level_error(): + """Returns True if error logging is turned on.""" + return get_verbosity() >= ERROR + + +def get_log_file_name(level=INFO): + """Returns the name of the log file. + + For Python logging, only one file is used and level is ignored. And it returns + empty string if it logs to stderr/stdout or the log stream has no `name` + attribute. + + Args: + level: int, the absl.logging level. + + Raises: + ValueError: Raised when `level` has an invalid value. + """ + if level not in converter.ABSL_LEVELS: + raise ValueError('Invalid absl.logging level {}'.format(level)) + stream = get_absl_handler().python_handler.stream + if (stream == sys.stderr or stream == sys.stdout or + not hasattr(stream, 'name')): + return '' + else: + return stream.name + + +def find_log_dir_and_names(program_name=None, log_dir=None): + """Computes the directory and filename prefix for log file. + + Args: + program_name: str|None, the filename part of the path to the program that + is running without its extension. e.g: if your program is called + ``usr/bin/foobar.py`` this method should probably be called with + ``program_name='foobar`` However, this is just a convention, you can + pass in any string you want, and it will be used as part of the + log filename. If you don't pass in anything, the default behavior + is as described in the example. In python standard logging mode, + the program_name will be prepended with ``py_`` if it is the + ``program_name`` argument is omitted. + log_dir: str|None, the desired log directory. + + Returns: + (log_dir, file_prefix, symlink_prefix) + + Raises: + FileNotFoundError: raised in Python 3 when it cannot find a log directory. + OSError: raised in Python 2 when it cannot find a log directory. + """ + if not program_name: + # Strip the extension (foobar.par becomes foobar, and + # fubar.py becomes fubar). We do this so that the log + # file names are similar to C++ log file names. + program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] + + # Prepend py_ to files so that python code gets a unique file, and + # so that C++ libraries do not try to write to the same log files as us. + program_name = 'py_%s' % program_name + + actual_log_dir = find_log_dir(log_dir=log_dir) + + try: + username = getpass.getuser() + except KeyError: + # This can happen, e.g. when running under docker w/o passwd file. + if hasattr(os, 'getuid'): + # Windows doesn't have os.getuid + username = str(os.getuid()) + else: + username = 'unknown' + hostname = socket.gethostname() + file_prefix = '%s.%s.%s.log' % (program_name, hostname, username) + + return actual_log_dir, file_prefix, program_name + + +def find_log_dir(log_dir=None): + """Returns the most suitable directory to put log files into. + + Args: + log_dir: str|None, if specified, the logfile(s) will be created in that + directory. Otherwise if the --log_dir command-line flag is provided, + the logfile will be created in that directory. Otherwise the logfile + will be created in a standard location. + + Raises: + FileNotFoundError: raised in Python 3 when it cannot find a log directory. + OSError: raised in Python 2 when it cannot find a log directory. + """ + # Get a list of possible log dirs (will try to use them in order). + # NOTE: Google's internal implementation has a special handling for Google + # machines, which uses a list of directories. Hence the following uses `dirs` + # instead of a single directory. + if log_dir: + # log_dir was explicitly specified as an arg, so use it and it alone. + dirs = [log_dir] + elif FLAGS['log_dir'].value: + # log_dir flag was provided, so use it and it alone (this mimics the + # behavior of the same flag in logging.cc). + dirs = [FLAGS['log_dir'].value] + else: + dirs = [tempfile.gettempdir()] + + # Find the first usable log dir. + for d in dirs: + if os.path.isdir(d) and os.access(d, os.W_OK): + return d + raise FileNotFoundError( + "Can't find a writable directory for logs, tried %s" % dirs) + + +def get_absl_log_prefix(record): + """Returns the absl log prefix for the log record. + + Args: + record: logging.LogRecord, the record to get prefix for. + """ + created_tuple = time.localtime(record.created) + created_microsecond = int(record.created % 1.0 * 1e6) + + critical_prefix = '' + level = record.levelno + if _is_non_absl_fatal_record(record): + # When the level is FATAL, but not logged from absl, lower the level so + # it's treated as ERROR. + level = logging.ERROR + critical_prefix = _CRITICAL_PREFIX + severity = converter.get_initial_for_level(level) + + return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % ( + severity, + created_tuple.tm_mon, + created_tuple.tm_mday, + created_tuple.tm_hour, + created_tuple.tm_min, + created_tuple.tm_sec, + created_microsecond, + _get_thread_id(), + record.filename, + record.lineno, + critical_prefix) + + +def skip_log_prefix(func): + """Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`. + + This is a convenience wrapper function / decorator for + :meth:`~absl.logging.ABSLLogger.register_frame_to_skip`. + + If a callable function is provided, only that function will be skipped. + If a function name is provided, all functions with the same name in the + file that this is called in will be skipped. + + This can be used as a decorator of the intended function to be skipped. + + Args: + func: Callable function or its name as a string. + + Returns: + func (the input, unchanged). + + Raises: + ValueError: The input is callable but does not have a function code object. + TypeError: The input is neither callable nor a string. + """ + if callable(func): + func_code = getattr(func, '__code__', None) + if func_code is None: + raise ValueError('Input callable does not have a function code object.') + file_name = func_code.co_filename + func_name = func_code.co_name + func_lineno = func_code.co_firstlineno + elif isinstance(func, str): + file_name = get_absl_logger().findCaller()[0] + func_name = func + func_lineno = None + else: + raise TypeError('Input is neither callable nor a string.') + ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno) + return func + + +def _is_non_absl_fatal_record(log_record): + return (log_record.levelno >= logging.FATAL and + not log_record.__dict__.get(_ABSL_LOG_FATAL, False)) + + +def _is_absl_fatal_record(log_record): + return (log_record.levelno >= logging.FATAL and + log_record.__dict__.get(_ABSL_LOG_FATAL, False)) + + +# Indicates if we still need to warn about pre-init logs going to stderr. +_warn_preinit_stderr = True + + +class PythonHandler(logging.StreamHandler): + """The handler class used by Abseil Python logging implementation.""" + + def __init__(self, stream=None, formatter=None): + super(PythonHandler, self).__init__(stream) + self.setFormatter(formatter or PythonFormatter()) + + def start_logging_to_file(self, program_name=None, log_dir=None): + """Starts logging messages to files instead of standard error.""" + FLAGS.logtostderr = False + + actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names( + program_name=program_name, log_dir=log_dir) + + basename = '%s.INFO.%s.%d' % ( + file_prefix, + time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())), + os.getpid()) + filename = os.path.join(actual_log_dir, basename) + + self.stream = open(filename, 'a', encoding='utf-8') + + # os.symlink is not available on Windows Python 2. + if getattr(os, 'symlink', None): + # Create a symlink to the log file with a canonical name. + symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO') + try: + if os.path.islink(symlink): + os.unlink(symlink) + os.symlink(os.path.basename(filename), symlink) + except EnvironmentError: + # If it fails, we're sad but it's no error. Commonly, this + # fails because the symlink was created by another user and so + # we can't modify it + pass + + def use_absl_log_file(self, program_name=None, log_dir=None): + """Conditionally logs to files, based on --logtostderr.""" + if FLAGS['logtostderr'].value: + self.stream = sys.stderr + else: + self.start_logging_to_file(program_name=program_name, log_dir=log_dir) + + def flush(self): + """Flushes all log files.""" + self.acquire() + try: + if self.stream and hasattr(self.stream, 'flush'): + self.stream.flush() + except (EnvironmentError, ValueError): + # A ValueError is thrown if we try to flush a closed file. + pass + finally: + self.release() + + def _log_to_stderr(self, record): + """Emits the record to stderr. + + This temporarily sets the handler stream to stderr, calls + StreamHandler.emit, then reverts the stream back. + + Args: + record: logging.LogRecord, the record to log. + """ + # emit() is protected by a lock in logging.Handler, so we don't need to + # protect here again. + old_stream = self.stream + self.stream = sys.stderr + try: + super(PythonHandler, self).emit(record) + finally: + self.stream = old_stream + + def emit(self, record): + """Prints a record out to some streams. + + 1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY. + 2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``. + 3. If ``FLAGS.logtostderr`` is not set, it will log to the stream + associated with the current thread. + + Args: + record: :class:`logging.LogRecord`, the record to emit. + """ + # People occasionally call logging functions at import time before + # our flags may have even been defined yet, let alone even parsed, as we + # rely on the C++ side to define some flags for us and app init to + # deal with parsing. Match the C++ library behavior of notify and emit + # such messages to stderr. It encourages people to clean-up and does + # not hide the message. + level = record.levelno + if not FLAGS.is_parsed(): # Also implies "before flag has been defined". + global _warn_preinit_stderr + if _warn_preinit_stderr: + sys.stderr.write( + 'WARNING: Logging before flag parsing goes to stderr.\n') + _warn_preinit_stderr = False + self._log_to_stderr(record) + elif FLAGS['logtostderr'].value: + self._log_to_stderr(record) + else: + super(PythonHandler, self).emit(record) + stderr_threshold = converter.string_to_standard( + FLAGS['stderrthreshold'].value) + if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and + self.stream != sys.stderr): + self._log_to_stderr(record) + # Die when the record is created from ABSLLogger and level is FATAL. + if _is_absl_fatal_record(record): + self.flush() # Flush the log before dying. + + # In threaded python, sys.exit() from a non-main thread only + # exits the thread in question. + os.abort() + + def close(self): + """Closes the stream to which we are writing.""" + self.acquire() + try: + self.flush() + try: + # Do not close the stream if it's sys.stderr|stdout. They may be + # redirected or overridden to files, which should be managed by users + # explicitly. + user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__ + if self.stream not in user_managed and ( + not hasattr(self.stream, 'isatty') or not self.stream.isatty()): + self.stream.close() + except ValueError: + # A ValueError is thrown if we try to run isatty() on a closed file. + pass + super(PythonHandler, self).close() + finally: + self.release() + + +class ABSLHandler(logging.Handler): + """Abseil Python logging module's log handler.""" + + def __init__(self, python_logging_formatter): + super(ABSLHandler, self).__init__() + + self._python_handler = PythonHandler(formatter=python_logging_formatter) + self.activate_python_handler() + + def format(self, record): + return self._current_handler.format(record) + + def setFormatter(self, fmt): + self._current_handler.setFormatter(fmt) + + def emit(self, record): + self._current_handler.emit(record) + + def flush(self): + self._current_handler.flush() + + def close(self): + super(ABSLHandler, self).close() + self._current_handler.close() + + def handle(self, record): + rv = self.filter(record) + if rv: + return self._current_handler.handle(record) + return rv + + @property + def python_handler(self): + return self._python_handler + + def activate_python_handler(self): + """Uses the Python logging handler as the current logging handler.""" + self._current_handler = self._python_handler + + def use_absl_log_file(self, program_name=None, log_dir=None): + self._current_handler.use_absl_log_file(program_name, log_dir) + + def start_logging_to_file(self, program_name=None, log_dir=None): + self._current_handler.start_logging_to_file(program_name, log_dir) + + +class PythonFormatter(logging.Formatter): + """Formatter class used by :class:`~absl.logging.PythonHandler`.""" + + def format(self, record): + """Appends the message from the record to the results of the prefix. + + Args: + record: logging.LogRecord, the record to be formatted. + + Returns: + The formatted string representing the record. + """ + if (not FLAGS['showprefixforinfo'].value and + FLAGS['verbosity'].value == converter.ABSL_INFO and + record.levelno == logging.INFO and + _absl_handler.python_handler.stream == sys.stderr): + prefix = '' + else: + prefix = get_absl_log_prefix(record) + return prefix + super(PythonFormatter, self).format(record) + + +class ABSLLogger(logging.getLoggerClass()): + """A logger that will create LogRecords while skipping some stack frames. + + This class maintains an internal list of filenames and method names + for use when determining who called the currently executing stack + frame. Any method names from specific source files are skipped when + walking backwards through the stack. + + Client code should use the register_frame_to_skip method to let the + ABSLLogger know which method from which file should be + excluded from the walk backwards through the stack. + """ + _frames_to_skip = set() + + def findCaller(self, stack_info=False, stacklevel=1): + """Finds the frame of the calling method on the stack. + + This method skips any frames registered with the + ABSLLogger and any methods from this file, and whatever + method is currently being used to generate the prefix for the log + line. Then it returns the file name, line number, and method name + of the calling method. An optional fourth item may be returned, + callers who only need things from the first three are advised to + always slice or index the result rather than using direct unpacking + assignment. + + Args: + stack_info: bool, when True, include the stack trace as a fourth item + returned. On Python 3 there are always four items returned - the + fourth will be None when this is False. On Python 2 the stdlib + base class API only returns three items. We do the same when this + new parameter is unspecified or False for compatibility. + + Returns: + (filename, lineno, methodname[, sinfo]) of the calling method. + """ + f_to_skip = ABSLLogger._frames_to_skip + # Use sys._getframe(2) instead of logging.currentframe(), it's slightly + # faster because there is one less frame to traverse. + frame = sys._getframe(2) # pylint: disable=protected-access + + while frame: + code = frame.f_code + if (_LOGGING_FILE_PREFIX not in code.co_filename and + (code.co_filename, code.co_name, + code.co_firstlineno) not in f_to_skip and + (code.co_filename, code.co_name) not in f_to_skip): + sinfo = None + if stack_info: + out = io.StringIO() + out.write(u'Stack (most recent call last):\n') + traceback.print_stack(frame, file=out) + sinfo = out.getvalue().rstrip(u'\n') + return (code.co_filename, frame.f_lineno, code.co_name, sinfo) + frame = frame.f_back + + def critical(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``CRITICAL``.""" + self.log(logging.CRITICAL, msg, *args, **kwargs) + + def fatal(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``FATAL``.""" + self.log(logging.FATAL, msg, *args, **kwargs) + + def error(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``ERROR``.""" + self.log(logging.ERROR, msg, *args, **kwargs) + + def warn(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``WARN``.""" + warnings.warn("The 'warn' method is deprecated, use 'warning' instead", + DeprecationWarning, 2) + self.log(logging.WARN, msg, *args, **kwargs) + + def warning(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``WARNING``.""" + self.log(logging.WARNING, msg, *args, **kwargs) + + def info(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``INFO``.""" + self.log(logging.INFO, msg, *args, **kwargs) + + def debug(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``DEBUG``.""" + self.log(logging.DEBUG, msg, *args, **kwargs) + + def log(self, level, msg, *args, **kwargs): + """Logs a message at a cetain level substituting in the supplied arguments. + + This method behaves differently in python and c++ modes. + + Args: + level: int, the standard logging level at which to log the message. + msg: str, the text of the message to log. + *args: The arguments to substitute in the message. + **kwargs: The keyword arguments to substitute in the message. + """ + if level >= logging.FATAL: + # Add property to the LogRecord created by this logger. + # This will be used by the ABSLHandler to determine whether it should + # treat CRITICAL/FATAL logs as really FATAL. + extra = kwargs.setdefault('extra', {}) + extra[_ABSL_LOG_FATAL] = True + super(ABSLLogger, self).log(level, msg, *args, **kwargs) + + def handle(self, record): + """Calls handlers without checking ``Logger.disabled``. + + Non-root loggers are set to disabled after setup with :func:`logging.config` + if it's not explicitly specified. Historically, absl logging will not be + disabled by that. To maintaining this behavior, this function skips + checking the ``Logger.disabled`` bit. + + This logger can still be disabled by adding a filter that filters out + everything. + + Args: + record: logging.LogRecord, the record to handle. + """ + if self.filter(record): + self.callHandlers(record) + + @classmethod + def register_frame_to_skip(cls, file_name, function_name, line_number=None): + """Registers a function name to skip when walking the stack. + + The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the + stack to make the log messages meaningful in their appropriate context. + This method registers a function from a particular file as one + which should be skipped. + + Args: + file_name: str, the name of the file that contains the function. + function_name: str, the name of the function to skip. + line_number: int, if provided, only the function with this starting line + number will be skipped. Otherwise, all functions with the same name + in the file will be skipped. + """ + if line_number is not None: + cls._frames_to_skip.add((file_name, function_name, line_number)) + else: + cls._frames_to_skip.add((file_name, function_name)) + + +def _get_thread_id(): + """Gets id of current thread, suitable for logging as an unsigned quantity. + + If pywrapbase is linked, returns GetTID() for the thread ID to be + consistent with C++ logging. Otherwise, returns the numeric thread id. + The quantities are made unsigned by masking with 2*sys.maxint + 1. + + Returns: + Thread ID unique to this process (unsigned) + """ + thread_id = threading.get_ident() + return thread_id & _THREAD_ID_MASK + + +def get_absl_logger(): + """Returns the absl logger instance.""" + assert _absl_logger is not None + return _absl_logger + + +def get_absl_handler(): + """Returns the absl handler instance.""" + assert _absl_handler is not None + return _absl_handler + + +def use_python_logging(quiet=False): + """Uses the python implementation of the logging code. + + Args: + quiet: No logging message about switching logging type. + """ + get_absl_handler().activate_python_handler() + if not quiet: + info('Restoring pure python logging') + + +_attempted_to_remove_stderr_stream_handlers = False + + +def use_absl_handler(): + """Uses the ABSL logging handler for logging. + + This method is called in :func:`app.run()` so the absl handler + is used in absl apps. + """ + global _attempted_to_remove_stderr_stream_handlers + if not _attempted_to_remove_stderr_stream_handlers: + # The absl handler logs to stderr by default. To prevent double logging to + # stderr, the following code tries its best to remove other handlers that + # emit to stderr. Those handlers are most commonly added when + # logging.info/debug is called before calling use_absl_handler(). + handlers = [ + h for h in logging.root.handlers + if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr] + for h in handlers: + logging.root.removeHandler(h) + _attempted_to_remove_stderr_stream_handlers = True + + absl_handler = get_absl_handler() + if absl_handler not in logging.root.handlers: + logging.root.addHandler(absl_handler) + FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access + FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access + + +def _initialize(): + """Initializes loggers and handlers.""" + global _absl_logger, _absl_handler + + if _absl_logger: + return + + original_logger_class = logging.getLoggerClass() + logging.setLoggerClass(ABSLLogger) + _absl_logger = logging.getLogger('absl') + logging.setLoggerClass(original_logger_class) + + python_logging_formatter = PythonFormatter() + _absl_handler = ABSLHandler(python_logging_formatter) + + +_initialize() diff --git a/llmeval-env/lib/python3.10/site-packages/absl/logging/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/absl/logging/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5d5bb69d59d8e0c8fdaafaab7b975361031be898 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/logging/__init__.pyi @@ -0,0 +1,290 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, TypeVar, Union + +from absl import flags + +# Logging levels. +FATAL: int +ERROR: int +WARNING: int +WARN: int # Deprecated name. +INFO: int +DEBUG: int + +ABSL_LOGGING_PREFIX_REGEX: str + +LOGTOSTDERR: flags.FlagHolder[bool] +ALSOLOGTOSTDERR: flags.FlagHolder[bool] +LOG_DIR: flags.FlagHolder[str] +VERBOSITY: flags.FlagHolder[int] +LOGGER_LEVELS: flags.FlagHolder[Dict[str, str]] +STDERRTHRESHOLD: flags.FlagHolder[str] +SHOWPREFIXFORINFO: flags.FlagHolder[bool] + + +def get_verbosity() -> int: + ... + + +def set_verbosity(v: Union[int, str]) -> None: + ... + + +def set_stderrthreshold(s: Union[int, str]) -> None: + ... + + +# TODO(b/277607978): Provide actual args+kwargs shadowing stdlib's logging functions. +def fatal(msg: Any, *args: Any, **kwargs: Any) -> NoReturn: + ... + + +def error(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def warning(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def warn(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def info(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def debug(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def exception(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def log_every_n(level: int, msg: Any, n: int, *args: Any) -> None: + ... + + +def log_every_n_seconds( + level: int, msg: Any, n_seconds: float, *args: Any +) -> None: + ... + + +def log_first_n(level: int, msg: Any, n: int, *args: Any) -> None: + ... + + +def log_if(level: int, msg: Any, condition: Any, *args: Any) -> None: + ... + + +def log(level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def vlog(level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def vlog_is_on(level: int) -> bool: + ... + + +def flush() -> None: + ... + + +def level_debug() -> bool: + ... + + +def level_info() -> bool: + ... + + +def level_warning() -> bool: + ... + + +level_warn = level_warning # Deprecated function. + + +def level_error() -> bool: + ... + + +def get_log_file_name(level: int = ...) -> str: + ... + + +def find_log_dir_and_names( + program_name: Optional[str] = ..., log_dir: Optional[str] = ... +) -> Tuple[str, str, str]: + ... + + +def find_log_dir(log_dir: Optional[str] = ...) -> str: + ... + + +def get_absl_log_prefix(record: logging.LogRecord) -> str: + ... + + +_SkipLogT = TypeVar('_SkipLogT', str, Callable[..., Any]) + +def skip_log_prefix(func: _SkipLogT) -> _SkipLogT: + ... + + +_StreamT = TypeVar("_StreamT") + + +class PythonHandler(logging.StreamHandler[_StreamT]): + + def __init__( + self, + stream: Optional[_StreamT] = ..., + formatter: Optional[logging.Formatter] = ..., + ) -> None: + ... + + def start_logging_to_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def use_absl_log_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def flush(self) -> None: + ... + + def emit(self, record: logging.LogRecord) -> None: + ... + + def close(self) -> None: + ... + + +class ABSLHandler(logging.Handler): + + def __init__(self, python_logging_formatter: PythonFormatter) -> None: + ... + + def format(self, record: logging.LogRecord) -> str: + ... + + def setFormatter(self, fmt) -> None: + ... + + def emit(self, record: logging.LogRecord) -> None: + ... + + def flush(self) -> None: + ... + + def close(self) -> None: + ... + + def handle(self, record: logging.LogRecord) -> bool: + ... + + @property + def python_handler(self) -> PythonHandler: + ... + + def activate_python_handler(self) -> None: + ... + + def use_absl_log_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def start_logging_to_file(self, program_name=None, log_dir=None) -> None: + ... + + +class PythonFormatter(logging.Formatter): + + def format(self, record: logging.LogRecord) -> str: + ... + + +class ABSLLogger(logging.Logger): + + def findCaller( + self, stack_info: bool = ..., stacklevel: int = ... + ) -> Tuple[str, int, str, Optional[str]]: + ... + + def critical(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def fatal(self, msg: Any, *args: Any, **kwargs: Any) -> NoReturn: + ... + + def error(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def warn(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def warning(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def info(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def debug(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def log(self, level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def handle(self, record: logging.LogRecord) -> None: + ... + + @classmethod + def register_frame_to_skip( + cls, file_name: str, function_name: str, line_number: Optional[int] = ... + ) -> None: + ... + + +# NOTE: Returns None before _initialize called but shouldn't occur after import. +def get_absl_logger() -> ABSLLogger: + ... + + +# NOTE: Returns None before _initialize called but shouldn't occur after import. +def get_absl_handler() -> ABSLHandler: + ... + + +def use_python_logging(quiet: bool = ...) -> None: + ... + + +def use_absl_handler() -> None: + ... diff --git a/llmeval-env/lib/python3.10/site-packages/absl/logging/converter.py b/llmeval-env/lib/python3.10/site-packages/absl/logging/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..0239ab4556458b995f9cbca796281cc44acaf476 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/absl/logging/converter.py @@ -0,0 +1,214 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module to convert log levels between Abseil Python, C++, and Python standard. + +This converter has to convert (best effort) between three different +logging level schemes: + + * **cpp**: The C++ logging level scheme used in Abseil C++. + * **absl**: The absl.logging level scheme used in Abseil Python. + * **standard**: The python standard library logging level scheme. + +Here is a handy ascii chart for easy mental mapping:: + + LEVEL | cpp | absl | standard | + ---------+-----+--------+----------+ + DEBUG | 0 | 1 | 10 | + INFO | 0 | 0 | 20 | + WARNING | 1 | -1 | 30 | + ERROR | 2 | -2 | 40 | + CRITICAL | 3 | -3 | 50 | + FATAL | 3 | -3 | 50 | + +Note: standard logging ``CRITICAL`` is mapped to absl/cpp ``FATAL``. +However, only ``CRITICAL`` logs from the absl logger (or absl.logging.fatal) +will terminate the program. ``CRITICAL`` logs from non-absl loggers are treated +as error logs with a message prefix ``"CRITICAL - "``. + +Converting from standard to absl or cpp is a lossy conversion. +Converting back to standard will lose granularity. For this reason, +users should always try to convert to standard, the richest +representation, before manipulating the levels, and then only to cpp +or absl if those level schemes are absolutely necessary. +""" + +import logging + +STANDARD_CRITICAL = logging.CRITICAL +STANDARD_ERROR = logging.ERROR +STANDARD_WARNING = logging.WARNING +STANDARD_INFO = logging.INFO +STANDARD_DEBUG = logging.DEBUG + +# These levels are also used to define the constants +# FATAL, ERROR, WARNING, INFO, and DEBUG in the +# absl.logging module. +ABSL_FATAL = -3 +ABSL_ERROR = -2 +ABSL_WARNING = -1 +ABSL_WARN = -1 # Deprecated name. +ABSL_INFO = 0 +ABSL_DEBUG = 1 + +ABSL_LEVELS = {ABSL_FATAL: 'FATAL', + ABSL_ERROR: 'ERROR', + ABSL_WARNING: 'WARNING', + ABSL_INFO: 'INFO', + ABSL_DEBUG: 'DEBUG'} + +# Inverts the ABSL_LEVELS dictionary +ABSL_NAMES = {'FATAL': ABSL_FATAL, + 'ERROR': ABSL_ERROR, + 'WARNING': ABSL_WARNING, + 'WARN': ABSL_WARNING, # Deprecated name. + 'INFO': ABSL_INFO, + 'DEBUG': ABSL_DEBUG} + +ABSL_TO_STANDARD = {ABSL_FATAL: STANDARD_CRITICAL, + ABSL_ERROR: STANDARD_ERROR, + ABSL_WARNING: STANDARD_WARNING, + ABSL_INFO: STANDARD_INFO, + ABSL_DEBUG: STANDARD_DEBUG} + +# Inverts the ABSL_TO_STANDARD +STANDARD_TO_ABSL = dict((v, k) for (k, v) in ABSL_TO_STANDARD.items()) + + +def get_initial_for_level(level): + """Gets the initial that should start the log line for the given level. + + It returns: + + * ``'I'`` when: ``level < STANDARD_WARNING``. + * ``'W'`` when: ``STANDARD_WARNING <= level < STANDARD_ERROR``. + * ``'E'`` when: ``STANDARD_ERROR <= level < STANDARD_CRITICAL``. + * ``'F'`` when: ``level >= STANDARD_CRITICAL``. + + Args: + level: int, a Python standard logging level. + + Returns: + The first initial as it would be logged by the C++ logging module. + """ + if level < STANDARD_WARNING: + return 'I' + elif level < STANDARD_ERROR: + return 'W' + elif level < STANDARD_CRITICAL: + return 'E' + else: + return 'F' + + +def absl_to_cpp(level): + """Converts an absl log level to a cpp log level. + + Args: + level: int, an absl.logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in Abseil C++. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level >= 0: + # C++ log levels must be >= 0 + return 0 + else: + return -level + + +def absl_to_standard(level): + """Converts an integer level from the absl value to the standard value. + + Args: + level: int, an absl.logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in standard logging. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level < ABSL_FATAL: + level = ABSL_FATAL + if level <= ABSL_DEBUG: + return ABSL_TO_STANDARD[level] + # Maps to vlog levels. + return STANDARD_DEBUG - level + 1 + + +def string_to_standard(level): + """Converts a string level to standard logging level value. + + Args: + level: str, case-insensitive ``'debug'``, ``'info'``, ``'warning'``, + ``'error'``, ``'fatal'``. + + Returns: + The corresponding integer level for use in standard logging. + """ + return absl_to_standard(ABSL_NAMES.get(level.upper())) + + +def standard_to_absl(level): + """Converts an integer level from the standard value to the absl value. + + Args: + level: int, a Python standard logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in absl logging. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level < 0: + level = 0 + if level < STANDARD_DEBUG: + # Maps to vlog levels. + return STANDARD_DEBUG - level + 1 + elif level < STANDARD_INFO: + return ABSL_DEBUG + elif level < STANDARD_WARNING: + return ABSL_INFO + elif level < STANDARD_ERROR: + return ABSL_WARNING + elif level < STANDARD_CRITICAL: + return ABSL_ERROR + else: + return ABSL_FATAL + + +def standard_to_cpp(level): + """Converts an integer level from the standard value to the cpp value. + + Args: + level: int, a Python standard logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in cpp logging. + """ + return absl_to_cpp(standard_to_absl(level)) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0dc2975aa963e1915c525ec6656d33448fa57c30 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__init__.py @@ -0,0 +1,158 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ..utils import _LazyModule + + +_import_structure = { + "aqlm": ["replace_with_aqlm_linear"], + "awq": [ + "fuse_awq_modules", + "post_init_awq_exllama_modules", + "replace_with_awq_linear", + ], + "bitsandbytes": [ + "get_keys_to_not_convert", + "replace_8bit_linear", + "replace_with_bnb_linear", + "set_module_8bit_tensor_to_device", + "set_module_quantized_tensor_to_device", + ], + "deepspeed": [ + "HfDeepSpeedConfig", + "HfTrainerDeepSpeedConfig", + "deepspeed_config", + "deepspeed_init", + "deepspeed_load_checkpoint", + "deepspeed_optim_sched", + "is_deepspeed_available", + "is_deepspeed_zero3_enabled", + "set_hf_deepspeed_config", + "unset_hf_deepspeed_config", + ], + "integration_utils": [ + "INTEGRATION_TO_CALLBACK", + "AzureMLCallback", + "ClearMLCallback", + "CodeCarbonCallback", + "CometCallback", + "DagsHubCallback", + "DVCLiveCallback", + "FlyteCallback", + "MLflowCallback", + "NeptuneCallback", + "NeptuneMissingConfiguration", + "TensorBoardCallback", + "WandbCallback", + "get_available_reporting_integrations", + "get_reporting_integration_callbacks", + "hp_params", + "is_azureml_available", + "is_clearml_available", + "is_codecarbon_available", + "is_comet_available", + "is_dagshub_available", + "is_dvclive_available", + "is_flyte_deck_standard_available", + "is_flytekit_available", + "is_mlflow_available", + "is_neptune_available", + "is_optuna_available", + "is_ray_available", + "is_ray_tune_available", + "is_sigopt_available", + "is_tensorboard_available", + "is_wandb_available", + "rewrite_logs", + "run_hp_search_optuna", + "run_hp_search_ray", + "run_hp_search_sigopt", + "run_hp_search_wandb", + ], + "peft": ["PeftAdapterMixin"], + "quanto": ["replace_with_quanto_layers"], +} + +if TYPE_CHECKING: + from .aqlm import replace_with_aqlm_linear + from .awq import ( + fuse_awq_modules, + post_init_awq_exllama_modules, + replace_with_awq_linear, + ) + from .bitsandbytes import ( + get_keys_to_not_convert, + replace_8bit_linear, + replace_with_bnb_linear, + set_module_8bit_tensor_to_device, + set_module_quantized_tensor_to_device, + ) + from .deepspeed import ( + HfDeepSpeedConfig, + HfTrainerDeepSpeedConfig, + deepspeed_config, + deepspeed_init, + deepspeed_load_checkpoint, + deepspeed_optim_sched, + is_deepspeed_available, + is_deepspeed_zero3_enabled, + set_hf_deepspeed_config, + unset_hf_deepspeed_config, + ) + from .integration_utils import ( + INTEGRATION_TO_CALLBACK, + AzureMLCallback, + ClearMLCallback, + CodeCarbonCallback, + CometCallback, + DagsHubCallback, + DVCLiveCallback, + FlyteCallback, + MLflowCallback, + NeptuneCallback, + NeptuneMissingConfiguration, + TensorBoardCallback, + WandbCallback, + get_available_reporting_integrations, + get_reporting_integration_callbacks, + hp_params, + is_azureml_available, + is_clearml_available, + is_codecarbon_available, + is_comet_available, + is_dagshub_available, + is_dvclive_available, + is_flyte_deck_standard_available, + is_flytekit_available, + is_mlflow_available, + is_neptune_available, + is_optuna_available, + is_ray_available, + is_ray_tune_available, + is_sigopt_available, + is_tensorboard_available, + is_wandb_available, + rewrite_logs, + run_hp_search_optuna, + run_hp_search_ray, + run_hp_search_sigopt, + run_hp_search_wandb, + ) + from .peft import PeftAdapterMixin + from .quanto import replace_with_quanto_layers +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6cba637e2476d061ad1017f455aa4c67cf925e4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e19b164451e1b62528ae837499a12e58220a810 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..070a30a82119ab62f72cfde825db1cc121d0c54e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e246c09ba129bc56219fcb518cb9d03f3f5df6aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..617ff4484eeb18c5a5db4edfc05c297d80b16525 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/integration_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/integration_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd84b9e33e7a549a1bf38b04bc6bc2e6b07e035f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/integration_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dbc59ea01e2919cb4b02212a466cc1b870916c7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eef703034b9267e507f59b6199ad88af50525ad7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bc35be3d977c901f6d7977eab42cb349f948b18 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/aqlm.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/aqlm.py new file mode 100644 index 0000000000000000000000000000000000000000..903d0ecdaebc05a712f78719db6a2066ee6a788a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/aqlm.py @@ -0,0 +1,99 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"AQLM (Additive Quantization of Language Model) integration file" + + +from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available + + +if is_torch_available(): + import torch.nn as nn + + +def replace_with_aqlm_linear( + model, + quantization_config=None, + linear_weights_not_to_quantize=None, + current_key_name=None, + has_been_replaced=False, +): + """ + Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers. + `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the + conversion has been successfull or not. + + Args: + model (`torch.nn.Module`): + The model to convert, can be any `torch.nn.Module` instance. + quantization_config (`AqlmConfig`): + The quantization config object that contains the quantization parameters. + linear_weights_not_to_quantize (`list[str]`, *optional*): + A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be + converted. + current_key_name (`list`, *optional*): + A list that contains the current key name. This is used for recursion and should not be passed by the user. + has_been_replaced (`bool`, *optional*): + A boolean that indicates if the conversion has been successful or not. This is used for recursion and + should not be passed by the user. + """ + if not is_aqlm_available(): + raise ValueError("AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`") + + if not is_accelerate_available(): + raise ValueError("AQLM requires Accelerate to be installed: `pip install accelerate`") + + if linear_weights_not_to_quantize is None: + linear_weights_not_to_quantize = [] + + from accelerate import init_empty_weights + from aqlm import QuantizedLinear + + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if isinstance(module, nn.Linear): + # Check if the current key is not in the `linear_weights_not_to_quantize` + if ".".join(current_key_name) + ".weight" not in linear_weights_not_to_quantize: + with init_empty_weights(): + in_features = module.in_features + out_features = module.out_features + + model._modules[name] = QuantizedLinear( + in_features, + out_features, + bias=module.bias is not None, + in_group_size=quantization_config.in_group_size, + out_group_size=quantization_config.out_group_size, + num_codebooks=quantization_config.num_codebooks, + nbits_per_codebook=quantization_config.nbits_per_codebook, + ) + has_been_replaced = True + + # Store the module class in case we need to transpose the weight later + model._modules[name].source_cls = type(module) + # Force requires grad to False to avoid unexpected errors + model._modules[name].requires_grad_(False) + if len(list(module.children())) > 0: + _, has_been_replaced = replace_with_aqlm_linear( + module, + quantization_config=quantization_config, + linear_weights_not_to_quantize=linear_weights_not_to_quantize, + current_key_name=current_key_name, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/awq.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/awq.py new file mode 100644 index 0000000000000000000000000000000000000000..a543860f100396cbf710356013420c24da93ff9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/awq.py @@ -0,0 +1,444 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"AWQ (Activation aware Weight Quantization) integration file" +from ..activations import ACT2FN +from ..modeling_utils import PreTrainedModel +from ..utils import is_auto_awq_available, is_torch_available +from ..utils.quantization_config import ( + AwqBackendPackingMethod, + AwqConfig, + AWQLinearVersion, + ExllamaVersion, +) + + +if is_torch_available(): + import torch + import torch.nn as nn + + +AWQ_FUSED_MAPPINGS = { + "mistral": { + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + }, + "mixtral": { + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["w1", "w3", "w2"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + "rope_theta": 1000000.0, + }, + "llama": { + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + }, + "llava": { + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + }, +} + + +def replace_with_awq_linear( + model, + modules_to_not_convert=None, + quantization_config=None, + current_key_name=None, + has_been_replaced=False, +) -> bool: + """ + Public method that recursively replaces the Linear layers of the given model with AWQ quantized layers. + `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the + conversion has been successfull or not. + + During the module replacement, we also infer the backend to use through the `quantization_config` object. + + Args: + model (`torch.nn.Module`): + The model to convert, can be any `torch.nn.Module` instance. + quantization_config (`AwqConfig`): + The quantization config object that contains the quantization parameters. + modules_to_not_convert (`list`, *optional*): + A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be + converted. + current_key_name (`list`, *optional*): + A list that contains the current key name. This is used for recursion and should not be passed by the user. + has_been_replaced (`bool`, *optional*): + A boolean that indicates if the conversion has been successful or not. This is used for recursion and + should not be passed by the user. + """ + if modules_to_not_convert is None: + modules_to_not_convert = [] + + backend = quantization_config.backend + + if not is_auto_awq_available(): + raise ValueError( + "AWQ (either `autoawq` or `llmawq`) is not available. Please install it with `pip install autoawq` or check out the installation guide in https://github.com/mit-han-lab/llm-awq" + ) + + if backend == AwqBackendPackingMethod.AUTOAWQ: + if quantization_config.version == AWQLinearVersion.GEMM: + from awq.modules.linear.gemm import WQLinear_GEMM + + target_cls = WQLinear_GEMM + elif quantization_config.version == AWQLinearVersion.GEMV: + from awq.modules.linear.gemv import WQLinear_GEMV + + target_cls = WQLinear_GEMV + elif quantization_config.version == AWQLinearVersion.EXLLAMA: + if quantization_config.exllama_config["version"] == ExllamaVersion.ONE: + from awq.modules.linear.exllama import WQLinear_Exllama + + target_cls = WQLinear_Exllama + elif quantization_config.exllama_config["version"] == ExllamaVersion.TWO: + from awq.modules.linear.exllamav2 import WQLinear_ExllamaV2 + + target_cls = WQLinear_ExllamaV2 + else: + raise ValueError(f"Unrecognized Exllama version: {quantization_config.exllama_config['version']}") + else: + raise ValueError(f"Unrecognized AWQ version: {quantization_config.version}") + else: + from awq.quantize.qmodule import WQLinear + + target_cls = WQLinear + + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if isinstance(module, nn.Linear) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): + in_features = module.in_features + out_features = module.out_features + + model._modules[name] = target_cls( + w_bit=quantization_config.bits, + group_size=quantization_config.group_size, + in_features=in_features, + out_features=out_features, + bias=module.bias is not None, + dev=module.weight.device, + ) + has_been_replaced = True + + # Force requires grad to False to avoid unexpected errors + model._modules[name].requires_grad_(False) + if len(list(module.children())) > 0: + _, has_been_replaced = replace_with_awq_linear( + module, + modules_to_not_convert=modules_to_not_convert, + current_key_name=current_key_name, + quantization_config=quantization_config, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced + + +def get_modules_to_fuse(model, quantization_config): + """ + Returns the fusing mapping given the quantization config and the model + + Args: + model (`~PreTrainedModel`): + The model to fuse - note this model should have been converted into AWQ format beforehand. + quantization_config (`~transformers.quantization_config.AWQConfig`): + The quantization configuration to use. + """ + if not isinstance(model, PreTrainedModel): + raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}") + + # Always default to `quantization_config.modules_to_fuse` + if quantization_config.modules_to_fuse is not None: + current_fused_mapping = quantization_config.modules_to_fuse + current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len + elif model.config.model_type in AWQ_FUSED_MAPPINGS: + current_fused_mapping = AWQ_FUSED_MAPPINGS[model.config.model_type] + + # Properly deal with the case where we have a multi-modal model as well (e.g. Llava) + if not hasattr(model.config, "text_config"): + config = model.config + else: + config = model.config.text_config + + # Handle hidden_size, num_attention_heads, num_key_value_heads on our own. + hidden_size = config.hidden_size + num_attention_heads = config.num_attention_heads + num_key_value_heads = getattr(config, "num_key_value_heads", num_attention_heads) + + # Fill `current_fused_mapping` with the expected values + current_fused_mapping["hidden_size"] = hidden_size + current_fused_mapping["num_attention_heads"] = num_attention_heads + current_fused_mapping["num_key_value_heads"] = num_key_value_heads + current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len + else: + raise ValueError( + "Fusing mapping not found either on the quantization config or the supported `AWQ_FUSED_MAPPINGS`. Please pass a `fused_mapping` argument" + " in the `quantization_config` or raise an issue on transformers https://github.com/huggingface/transformers to add its support." + ) + return current_fused_mapping + + +def fuse_awq_modules(model, quantization_config): + """ + Optionally fuse some modules in the model to speedup inference. + + Args: + model (`~PreTrainedModel`): + The model to fuse - note this model should have been converted into AWQ format beforehand. + quantization_config (`Union[AwqConfig, dict]`): + The quantization configuration to use. + """ + # We need to convert it from dict in order to get an AwqConfig object + # otherwise the fields `backend` etc. will not be available + # https://github.com/huggingface/transformers/pull/27411#discussion_r1414044495 + if isinstance(quantization_config, dict): + quantization_config = AwqConfig.from_dict(quantization_config) + backend = quantization_config.backend + + modules_to_fuse = get_modules_to_fuse(model, quantization_config) + modules_to_not_convert = getattr(quantization_config, "modules_to_not_convert", None) + + if backend == AwqBackendPackingMethod.AUTOAWQ: + from awq.modules.fused.attn import QuantAttentionFused + from awq.modules.fused.mlp import QuantFusedMLP + from awq.modules.fused.norm import FasterTransformerRMSNorm + else: + raise ValueError("Fusing is only supported for the AutoAWQ backend") + + fused_attention_modules = [] + + for name, module in model.named_modules(): + if modules_to_not_convert is not None: + if any(module_name_to_not_convert in name for module_name_to_not_convert in modules_to_not_convert): + continue + + # Replace layer norms + _fuse_awq_layernorm(modules_to_fuse["layernorm"], module, FasterTransformerRMSNorm) + + # Replace MLP layers + _fuse_awq_mlp(model, name, modules_to_fuse["mlp"], module, QuantFusedMLP) + + # Replace attention layers + attention_has_been_fused = _fuse_awq_attention_layers( + model, module, modules_to_fuse, name, QuantAttentionFused + ) + + if attention_has_been_fused: + fused_attention_modules.append(name.split(".")[0]) + + # For AWQ fused + Llama we need to set `config._attn_implementation` = "custom" to avoid unexpected behavior and pass + # `None` attention mask to the fused attention modules as now the attention mask is dropped by our models and dealt + # by the `AttentionMaskConverter` module. + if len(fused_attention_modules) > 0: + for module_name, module in model.named_modules(): + if any( + module_name in fused_attention_modules for fused_attention_parent_module in fused_attention_modules + ): + if hasattr(module, "config") and hasattr(module.config, "_attn_implementation"): + module.config._attn_implementation = "custom" + return model + + +def _fuse_awq_layernorm(fuse_module_names, module, target_cls): + """ + Fuse the LayerNorm layers into a target class using autoawq + + Args: + fuse_module_names (`List[str]`): + The list of module names to fuse + module (`nn.Module`): + The pytorch parent module that has layernorm modules to fuse + target_cls (`~autoawq.FasterTransformerRMSNorm`): + The `FasterTransformerRMSNorm` class as it only supports that class + for now. + """ + for module_name in fuse_module_names: + if hasattr(module, module_name): + old_module = getattr(module, module_name) + module._modules[module_name] = target_cls( + old_module.weight, + old_module.variance_epsilon, + ).to(old_module.weight.device) + del old_module + + +def _fuse_awq_mlp(model, current_module_name, fuse_module_names, module, target_cls): + """ + Fuse the MLP layers into a target class using autoawq + + Args: + model (`~PreTrainedModel`): + The input pretrained model + current_module_name (`str`): + The current submodule name + fuse_module_names (`List[str]`): + The list of module names to fuse. For the MLP layers it has to be an array + of length 3 that consists of the 3 MLP layers in the order (gate (dense layer post-attention) / up / down layers) + module (`nn.Module`): + The pytorch parent module that has layernorm modules to fuse + target_cls (`~autoawq.QuantFusedMLP`): + The `QuantFusedMLP` class as it only supports that class + for now. + """ + if len(fuse_module_names) == 0: + return + + if hasattr(module, fuse_module_names[0]): + gate_proj = getattr(module, fuse_module_names[0]) + up_proj = getattr(module, fuse_module_names[1]) + down_proj = getattr(module, fuse_module_names[2]) + + previous_device = gate_proj.qweight.device + + # Deal also with the case model has `text_config` attribute + hidden_act = ( + model.config.hidden_act + if not hasattr(model.config, "text_config") + else model.config.text_config.hidden_act + ) + activation_fn = ACT2FN[hidden_act] + new_module = target_cls(gate_proj, down_proj, up_proj, activation_fn) + + parent_name, child_name = current_module_name.rsplit(".", 1) + parent = model.get_submodule(parent_name) + setattr(parent, child_name, new_module.to(previous_device)) + + del gate_proj, up_proj, down_proj + + +def _fuse_awq_attention_layers(model, module, modules_to_fuse, current_module_name, target_cls): + """ + Fuse the Attention layers into a target class using autoawq + + Args: + model (`~PreTrainedModel`): + The input pretrained model + module (`nn.Module`): + The pytorch parent module that has layernorm modules to fuse + modules_to_fuse (`List[str]`): + The module fusing mapping. The dictionary has to contain a field `attention` with attention module names + in the correct order: q, k, v, o layer + current_module_name (`str`): + The current submodule name + target_cls (`~autoawq.QuantAttentionFused`): + The `QuantAttentionFused` class as it only supports that class + for now. + """ + from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV + + module_has_been_fused = False + + if len(modules_to_fuse["attention"]) == 0: + return module_has_been_fused + + if hasattr(module, modules_to_fuse["attention"][0]): + # First, we pack the QKV layers together + q_proj = getattr(module, modules_to_fuse["attention"][0]) + + if isinstance(q_proj, WQLinear_GEMV): + linear_target_cls = WQLinear_GEMV + cat_dim = 0 + elif isinstance(q_proj, WQLinear_GEMM): + linear_target_cls = WQLinear_GEMM + cat_dim = 1 + else: + raise ValueError("Unsupported q_proj type: {type(q_proj)}") + + previous_device = q_proj.qweight.device + + k_proj = getattr(module, modules_to_fuse["attention"][1]) + v_proj = getattr(module, modules_to_fuse["attention"][2]) + o_proj = getattr(module, modules_to_fuse["attention"][3]) + + bias = torch.cat([q_proj.bias, k_proj.bias, v_proj.bias], dim=0) if q_proj.bias is not None else None + + qkv_layer = linear_target_cls( + q_proj.w_bit, + q_proj.group_size, + q_proj.in_features, + q_proj.out_features + k_proj.out_features + v_proj.out_features, + q_proj.bias is not None, + next(iter(module.state_dict().values())).device, + ) + + qkv_layer.qweight = torch.cat([q_proj.qweight, k_proj.qweight, v_proj.qweight], dim=cat_dim) + qkv_layer.qzeros = torch.cat([q_proj.qzeros, k_proj.qzeros, v_proj.qzeros], dim=cat_dim) + qkv_layer.scales = torch.cat([q_proj.scales, k_proj.scales, v_proj.scales], dim=cat_dim) + + if isinstance(qkv_layer, WQLinear_GEMV): + qkv_layer.split_k_iters = q_proj.split_k_iters + + qkv_layer.bias = bias + + fused_attention_layer = target_cls( + modules_to_fuse["hidden_size"], + modules_to_fuse["num_attention_heads"], + modules_to_fuse["num_key_value_heads"], + qkv_layer, + o_proj, + previous_device, + modules_to_fuse["max_seq_len"], + use_alibi=modules_to_fuse["use_alibi"], + # The default value in autoawq is set to 10000.0 + rope_theta=modules_to_fuse.get("rope_theta", 10000.0), + ) + + fused_attention_layer.is_hf_transformers = True + + parent_name, child_name = current_module_name.rsplit(".", 1) + parent = model.get_submodule(parent_name) + setattr(parent, child_name, fused_attention_layer.to(previous_device)) + + del q_proj, k_proj, v_proj, o_proj + module_has_been_fused = True + + return module_has_been_fused + + +def post_init_awq_exllama_modules(model, exllama_config): + """ + Runs post init for Exllama layers which performs: + - Weights unpacking, reordering and repacking + - Devices scratch space allocation + """ + + if exllama_config["version"] == ExllamaVersion.ONE: + from awq.modules.linear.exllama import exllama_post_init + + model = exllama_post_init(model) + elif exllama_config["version"] == ExllamaVersion.TWO: + from awq.modules.linear.exllamav2 import exllamav2_post_init + + model = exllamav2_post_init( + model, + max_input_len=exllama_config["max_input_len"], + max_batch_size=exllama_config["max_batch_size"], + ) + else: + raise ValueError(f"Unrecognized Exllama version: {exllama_config['version']}") + + return model diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/bitsandbytes.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/bitsandbytes.py new file mode 100644 index 0000000000000000000000000000000000000000..f340c1db823731bb8b44763cb2852436c4d43e0c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/bitsandbytes.py @@ -0,0 +1,324 @@ +import importlib.metadata +import warnings +from copy import deepcopy +from inspect import signature + +from packaging import version + +from ..utils import is_accelerate_available, is_bitsandbytes_available, logging + + +if is_bitsandbytes_available(): + import bitsandbytes as bnb + import torch + import torch.nn as nn + + from ..pytorch_utils import Conv1D + +if is_accelerate_available(): + from accelerate import init_empty_weights + from accelerate.utils import find_tied_parameters + +logger = logging.get_logger(__name__) + + +def set_module_quantized_tensor_to_device(module, tensor_name, device, value=None, quantized_stats=None): + """ + A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing + `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The + function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the + class `Int8Params` from `bitsandbytes`. + + Args: + module (`torch.nn.Module`): + The module in which the tensor we want to move lives. + tensor_name (`str`): + The full name of the parameter/buffer. + device (`int`, `str` or `torch.device`): + The device on which to set the tensor. + value (`torch.Tensor`, *optional*): + The value of the tensor (useful when going from the meta device to any other device). + quantized_stats (`dict[str, Any]`, *optional*): + Dict with items for either 4-bit or 8-bit serialization + """ + # Recurse if needed + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + + if tensor_name not in module._parameters and tensor_name not in module._buffers: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + is_buffer = tensor_name in module._buffers + old_value = getattr(module, tensor_name) + + if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") + + prequantized_loading = quantized_stats is not None + if is_buffer or not is_bitsandbytes_available(): + is_8bit = False + is_4bit = False + else: + is_4bit = hasattr(bnb.nn, "Params4bit") and isinstance(module._parameters[tensor_name], bnb.nn.Params4bit) + is_8bit = isinstance(module._parameters[tensor_name], bnb.nn.Int8Params) + + if is_8bit or is_4bit: + param = module._parameters[tensor_name] + if param.device.type != "cuda": + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to("cpu") + else: + new_value = torch.tensor(value, device="cpu") + + # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. + # Since weights are saved in the correct "orientation", we skip transposing when loading. + if issubclass(module.source_cls, Conv1D) and not prequantized_loading: + new_value = new_value.T + + kwargs = old_value.__dict__ + + if prequantized_loading != (new_value.dtype in (torch.int8, torch.uint8)): + raise ValueError( + f"Value dtype `{new_value.dtype}` is not compatible with parameter quantization status." + ) + + if is_8bit: + is_8bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( + "0.37.2" + ) + if new_value.dtype in (torch.int8, torch.uint8) and not is_8bit_serializable: + raise ValueError( + "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " + "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." + ) + new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(device) + if prequantized_loading: + setattr(new_value, "SCB", quantized_stats["SCB"].to(device)) + elif is_4bit: + if prequantized_loading: + is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( + "0.41.3" + ) + if new_value.dtype in (torch.int8, torch.uint8) and not is_4bit_serializable: + raise ValueError( + "Detected 4-bit weights but the version of bitsandbytes is not compatible with 4-bit serialization. " + "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." + ) + new_value = bnb.nn.Params4bit.from_prequantized( + data=new_value, + quantized_stats=quantized_stats, + requires_grad=False, + device=device, + **kwargs, + ) + else: + new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(device) + module._parameters[tensor_name] = new_value + + else: + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to(device) + else: + new_value = torch.tensor(value, device=device) + + if is_buffer: + module._buffers[tensor_name] = new_value + else: + new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad) + module._parameters[tensor_name] = new_value + + +def _replace_with_bnb_linear( + model, + modules_to_not_convert=None, + current_key_name=None, + quantization_config=None, + has_been_replaced=False, +): + """ + Private method that wraps the recursion for module replacement. + + Returns the converted model and a boolean that indicates if the conversion has been successfull or not. + """ + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if (isinstance(module, nn.Linear) or isinstance(module, Conv1D)) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + current_key_name_str = ".".join(current_key_name) + if not any( + (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert + ): + with init_empty_weights(): + if isinstance(module, Conv1D): + in_features, out_features = module.weight.shape + else: + in_features = module.in_features + out_features = module.out_features + + if quantization_config.quantization_method() == "llm_int8": + model._modules[name] = bnb.nn.Linear8bitLt( + in_features, + out_features, + module.bias is not None, + has_fp16_weights=quantization_config.llm_int8_has_fp16_weight, + threshold=quantization_config.llm_int8_threshold, + ) + has_been_replaced = True + else: + if ( + quantization_config.llm_int8_skip_modules is not None + and name in quantization_config.llm_int8_skip_modules + ): + pass + else: + extra_kwargs = ( + {"quant_storage": quantization_config.bnb_4bit_quant_storage} + if "quant_storage" in list(signature(bnb.nn.Linear4bit).parameters) + else {} + ) + model._modules[name] = bnb.nn.Linear4bit( + in_features, + out_features, + module.bias is not None, + quantization_config.bnb_4bit_compute_dtype, + compress_statistics=quantization_config.bnb_4bit_use_double_quant, + quant_type=quantization_config.bnb_4bit_quant_type, + **extra_kwargs, + ) + has_been_replaced = True + # Store the module class in case we need to transpose the weight later + model._modules[name].source_cls = type(module) + # Force requires grad to False to avoid unexpected errors + model._modules[name].requires_grad_(False) + if len(list(module.children())) > 0: + _, has_been_replaced = _replace_with_bnb_linear( + module, + modules_to_not_convert, + current_key_name, + quantization_config, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced + + +def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None): + """ + A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` + library. This will enable running your models using mixed int8 precision as described by the paper `LLM.int8(): + 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA + version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ + bitsandbytes` + + The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should + be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no + CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a + matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 + (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no + predictive degradation is possible for very large models (>=176B parameters). + + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`): + Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision + for numerical stability reasons. + current_key_name (`List[`str`]`, *optional*): + An array to track the current key of the recursion. This is used to check whether the current key (part of + it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or + `disk`). + """ + modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert + model, has_been_replaced = _replace_with_bnb_linear( + model, modules_to_not_convert, current_key_name, quantization_config + ) + + if not has_been_replaced: + logger.warning( + "You are loading your model in 8bit or 4bit but no linear modules were found in your model." + " Please double check your model architecture, or submit an issue on github if you think this is" + " a bug." + ) + + return model + + +# For backward compatibility +def replace_8bit_linear(*args, **kwargs): + warnings.warn( + "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead", + FutureWarning, + ) + return replace_with_bnb_linear(*args, **kwargs) + + +# For backward compatiblity +def set_module_8bit_tensor_to_device(*args, **kwargs): + warnings.warn( + "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead", + FutureWarning, + ) + return set_module_quantized_tensor_to_device(*args, **kwargs) + + +def get_keys_to_not_convert(model): + r""" + An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules + we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want + to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in + int8. + + Parameters: + model (`torch.nn.Module`): + Input model + """ + # Create a copy of the model and tie the weights, then + # check if it contains tied weights + tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` + tied_model.tie_weights() + + tied_params = find_tied_parameters(tied_model) + # For compatibility with Accelerate < 0.18 + if isinstance(tied_params, dict): + tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) + else: + tied_keys = sum(tied_params, []) + has_tied_params = len(tied_keys) > 0 + + # If there is not tied weights, we want to keep the lm_head(output_embedding) in full precision + if not has_tied_params: + output_emb = model.get_output_embeddings() + if output_emb is not None: + list_last_module = [name for name, module in model.named_modules() if id(module) == id(output_emb)] + return list_last_module + + # otherwise, no tied weights, no output embedding defined, simply keep the last module in full precision + list_modules = list(model.named_parameters()) + list_last_module = [list_modules[-1][0]] + # add last module together with tied weights + intersection = set(list_last_module) - set(tied_keys) + list_untouched = list(set(tied_keys)) + list(intersection) + + # remove ".weight" from the keys + names_to_remove = [".weight", ".bias"] + filtered_module_names = [] + for name in list_untouched: + for name_to_remove in names_to_remove: + if name_to_remove in name: + name = name.replace(name_to_remove, "") + filtered_module_names.append(name) + + return filtered_module_names diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/deepspeed.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..4754c37a1eb38cd58ed158b6c172bc236ea6b459 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/deepspeed.py @@ -0,0 +1,441 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Integration with Deepspeed +""" +import copy +import importlib.metadata as importlib_metadata +import importlib.util +import weakref +from functools import partialmethod + +from ..dependency_versions_check import dep_version_check +from ..utils import is_accelerate_available, is_torch_available, is_torch_mlu_available, logging + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +def is_deepspeed_available(): + package_exists = importlib.util.find_spec("deepspeed") is not None + + # Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version + # AND checking it has an author field in the metadata that is HuggingFace. + if package_exists: + try: + if is_torch_mlu_available(): + _ = importlib_metadata.metadata("deepspeed-mlu") + return True + _ = importlib_metadata.metadata("deepspeed") + return True + except importlib_metadata.PackageNotFoundError: + return False + + +if is_accelerate_available() and is_deepspeed_available(): + from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig +else: + # Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file. + # Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available. + from builtins import object as DeepSpeedConfig + + +class HfDeepSpeedConfig(DeepSpeedConfig): + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + # set global weakref object + set_hf_deepspeed_config(self) + dep_version_check("accelerate") + dep_version_check("deepspeed") + super().__init__(config_file_or_dict) + + +class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig): + """ + The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the + same lifespan as the latter. + """ + + def __init__(self, config_file_or_dict): + super().__init__(config_file_or_dict) + self._dtype = None + self.mismatches = [] + + def dtype(self): + if self._dtype is None: + raise ValueError("trainer_config_process() wasn't called yet to tell dtype") + return self._dtype + + def is_auto(self, ds_key_long): + val = self.get_value(ds_key_long) + if val is None: + return False + else: + return val == "auto" + + def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True): + """ + A utility method that massages the config file and can optionally verify that the values match. + + 1. Replace "auto" values with `TrainingArguments` value. + + 2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer + config values and if mismatched add the entry to `self.mismatched` - will assert during + `trainer_config_finalize` for one or more mismatches. + + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return + + if config.get(ds_key) == "auto": + config[ds_key] = hf_val + return + + if not must_match: + return + + ds_val = config.get(ds_key) + if ds_val is not None and ds_val != hf_val: + self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}") + + fill_only = partialmethod(fill_match, must_match=False) + + def trainer_config_process(self, args, auto_find_batch_size=False): + """ + Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object + creation. + """ + # DeepSpeed does: + # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps + train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps + self.fill_match( + "train_micro_batch_size_per_gpu", + args.per_device_train_batch_size, + "per_device_train_batch_size", + not auto_find_batch_size, + ) + self.fill_match( + "gradient_accumulation_steps", + args.gradient_accumulation_steps, + "gradient_accumulation_steps", + ) + self.fill_match( + "train_batch_size", + train_batch_size, + "train_batch_size (calculated)", + not auto_find_batch_size, + ) + self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm") + + self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate") + self.fill_match( + "optimizer.params.betas", + [args.adam_beta1, args.adam_beta2], + "adam_beta1+adam_beta2", + ) + self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon") + self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay") + + self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg + self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate") + # total_num_steps - will get set in trainer_config_finalize + + # fp16 + if args.fp16 or args.fp16_full_eval: + fp16_backend = "apex" if args.fp16_backend == "apex" else "amp" + else: + fp16_backend = None + + if args.save_on_each_node: + # deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True + self.config["checkpoint"] = self.config.get("checkpoint", {}) + self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node + + # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set + # any here unless the user did the work + self.fill_match( + "fp16.enabled", + ((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"), + "fp16|fp16_full_eval+fp16_backend(amp)", + ) + + # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any + # ZeRO features + self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)") + self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level") + + self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval") + + # deepspeed's default mode is fp16 unless there is a config that says differently + if self.is_true("bf16.enabled"): + self._dtype = torch.bfloat16 + elif self.is_false("fp16.enabled"): + self._dtype = torch.float32 + else: + self._dtype = torch.float16 + + def trainer_config_finalize(self, args, model, num_training_steps): + """ + This stage is run after we have the model and know num_training_steps. + + Now we can complete the configuration process. + """ + # zero + + # deal with config keys that use `auto` value and rely on model's hidden_size + hidden_size_based_keys = [ + "zero_optimization.reduce_bucket_size", + "zero_optimization.stage3_prefetch_bucket_size", + "zero_optimization.stage3_param_persistence_threshold", + ] + hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)] + + if len(hidden_size_auto_keys) > 0: + if hasattr(model.config, "hidden_size"): + hidden_size = model.config.hidden_size + elif hasattr(model.config, "hidden_sizes"): + # if there are many hidden sizes pick the largest one + hidden_size = max(model.config.hidden_sizes) + else: + raise ValueError( + "The model's config file has neither `hidden_size` nor `hidden_sizes` entry, " + "therefore it's not possible to automatically fill out the following `auto` entries " + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " + "`auto` values for these keys with an integer value of your choice." + ) + + self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size) + if self.is_zero3(): + # automatically assign the optimal config values based on model config + self.fill_only( + "zero_optimization.stage3_prefetch_bucket_size", + 0.9 * hidden_size * hidden_size, + ) + self.fill_only( + "zero_optimization.stage3_param_persistence_threshold", + 10 * hidden_size, + ) + + # scheduler + self.fill_match( + "scheduler.params.total_num_steps", + num_training_steps, + "num_training_steps (calculated)", + ) + self.fill_match( + "scheduler.params.warmup_num_steps", + args.get_warmup_steps(num_training_steps), + "warmup_steps", + ) + + if len(self.mismatches) > 0: + mismatches = "\n".join(self.mismatches) + raise ValueError( + "Please correct the following DeepSpeed config values that mismatch TrainingArguments" + f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'." + ) + + +# keep the config object global to be able to access it anywhere during TrainingArguments life-cycle +_hf_deepspeed_config_weak_ref = None + + +def set_hf_deepspeed_config(hf_deepspeed_config_obj): + # this is a special weakref global object to allow us to get to Deepspeed config from APIs + # that don't have an easy way to get to the Deepspeed config outside of the Trainer domain. + global _hf_deepspeed_config_weak_ref + # will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed) + _hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj) + + +def unset_hf_deepspeed_config(): + # useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method + global _hf_deepspeed_config_weak_ref + _hf_deepspeed_config_weak_ref = None + + +def is_deepspeed_zero3_enabled(): + if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: + return _hf_deepspeed_config_weak_ref().is_zero3() + else: + return False + + +def deepspeed_config(): + if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: + return _hf_deepspeed_config_weak_ref().config + else: + return None + + +def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters): + """ + A convenience wrapper that deals with optimizer and lr scheduler configuration. + """ + from accelerate.utils import DummyOptim, DummyScheduler + + config = hf_deepspeed_config.config + + # Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's: + # 1. DS scheduler + DS optimizer: Yes + # 2. HF scheduler + HF optimizer: Mostly* + # 3. DS scheduler + HF optimizer: Mostly* + # 4. HF scheduler + DS optimizer: Yes + # + # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB) + + optimizer = None + if "optimizer" in config: + if args.adafactor: + raise ValueError( + "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. " + "Only one optimizer can be configured." + ) + optimizer = DummyOptim(params=model_parameters) + else: + if hf_deepspeed_config.is_offload(): + logger.info( + "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the" + " custom optimizer has both CPU and GPU implementation (except LAMB)" + ) + + # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch. + # But trainer uses AdamW by default. + optimizer = trainer.create_optimizer() + # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer` + config["zero_allow_untested_optimizer"] = True + + lr_scheduler = None + if "scheduler" in config: + lr_scheduler = DummyScheduler(optimizer) + else: + if isinstance(optimizer, DummyOptim): + + def _lr_scheduler_callable(optimizer): + # create a shallow copy first, so later modifications do not affect original trainer + trainer_copy = copy.copy(trainer) + # at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set + # update it to None so that we can re-create a new scheduler + trainer_copy.lr_scheduler = None + lr_scheduler = trainer_copy.create_scheduler( + num_training_steps=num_training_steps, optimizer=optimizer + ) + return lr_scheduler + + lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable) + else: + lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) + + return optimizer, lr_scheduler + + +def deepspeed_init(trainer, num_training_steps, inference=False): + """ + Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. + + If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. + + Args: + trainer: Trainer object + num_training_steps: per single gpu + resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load + inference: launch in inference mode (no optimizer and no lr scheduler) + auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being + set automatically by the auto batch size finder + + Returns: optimizer, lr_scheduler + + We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on: + https://github.com/microsoft/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it + can't resume from a checkpoint after it did some stepping https://github.com/microsoft/DeepSpeed/issues/1612 + + """ + from deepspeed.utils import logger as ds_logger + + model = trainer.model + args = trainer.args + + hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config + + # resume config update - some bits like `model` and `num_training_steps` only become available during train + hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps) + + # set the Deepspeed log level consistent with the Trainer + ds_logger.setLevel(args.get_process_log_level()) + + if inference: + # only Z3 makes sense for the inference + if not hf_deepspeed_config.is_zero3(): + raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config") + + # in case the training config is re-used for inference + hf_deepspeed_config.del_config_sub_tree("optimizer") + hf_deepspeed_config.del_config_sub_tree("lr_scheduler") + optimizer, lr_scheduler = None, None + model_parameters = None + else: + trainer.optimizer = None # important for when deepspeed_init is used as re-init + model_parameters = list(filter(lambda p: p.requires_grad, model.parameters())) + optimizer, lr_scheduler = deepspeed_optim_sched( + trainer, hf_deepspeed_config, args, num_training_steps, model_parameters + ) + + # keep for quick debug: + # from pprint import pprint; pprint(config) + + return optimizer, lr_scheduler + + +def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True): + # it's possible that the user is trying to resume from model_path, which doesn't necessarily + # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's + # a resume from a checkpoint and not just a local pretrained weight. So we check here if the + # path contains what looks like a deepspeed checkpoint + import glob + + deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*")) + + if len(deepspeed_checkpoint_dirs) > 0: + logger.info(f"Attempting to resume from {checkpoint_path}") + # this magically updates self.optimizer and self.lr_scheduler + load_path, _ = deepspeed_engine.load_checkpoint( + checkpoint_path, + load_module_strict=load_module_strict, + load_optimizer_states=True, + load_lr_scheduler_states=True, + ) + if load_path is None: + raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}") + else: + raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}") diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/integration_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/integration_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..00074a9574b548577f60c7cb6541ca4b74422b81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/integration_utils.py @@ -0,0 +1,1914 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Integrations with other Python libraries. +""" +import functools +import importlib.metadata +import importlib.util +import json +import numbers +import os +import pickle +import shutil +import sys +import tempfile +from dataclasses import asdict, fields +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Union + +import numpy as np +import packaging.version + +from .. import __version__ as version +from ..utils import flatten_dict, is_datasets_available, is_pandas_available, is_torch_available, logging + + +logger = logging.get_logger(__name__) + +if is_torch_available(): + import torch + +# comet_ml requires to be imported before any ML frameworks +_has_comet = importlib.util.find_spec("comet_ml") is not None and os.getenv("COMET_MODE", "").upper() != "DISABLED" +if _has_comet: + try: + import comet_ml # noqa: F401 + + if hasattr(comet_ml, "config") and comet_ml.config.get_config("comet.api_key"): + _has_comet = True + else: + if os.getenv("COMET_MODE", "").upper() != "DISABLED": + logger.warning("comet_ml is installed but `COMET_API_KEY` is not set.") + _has_comet = False + except (ImportError, ValueError): + _has_comet = False + +_has_neptune = ( + importlib.util.find_spec("neptune") is not None or importlib.util.find_spec("neptune-client") is not None +) +if TYPE_CHECKING and _has_neptune: + try: + _neptune_version = importlib.metadata.version("neptune") + logger.info(f"Neptune version {_neptune_version} available.") + except importlib.metadata.PackageNotFoundError: + try: + _neptune_version = importlib.metadata.version("neptune-client") + logger.info(f"Neptune-client version {_neptune_version} available.") + except importlib.metadata.PackageNotFoundError: + _has_neptune = False + +from ..trainer_callback import ProgressCallback, TrainerCallback # noqa: E402 +from ..trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # noqa: E402 +from ..training_args import ParallelMode # noqa: E402 +from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available # noqa: E402 + + +# Integration functions: +def is_wandb_available(): + # any value of WANDB_DISABLED disables wandb + if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES: + logger.warning( + "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the " + "--report_to flag to control the integrations used for logging result (for instance --report_to none)." + ) + return False + return importlib.util.find_spec("wandb") is not None + + +def is_clearml_available(): + return importlib.util.find_spec("clearml") is not None + + +def is_comet_available(): + return _has_comet + + +def is_tensorboard_available(): + return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None + + +def is_optuna_available(): + return importlib.util.find_spec("optuna") is not None + + +def is_ray_available(): + return importlib.util.find_spec("ray") is not None + + +def is_ray_tune_available(): + if not is_ray_available(): + return False + return importlib.util.find_spec("ray.tune") is not None + + +def is_sigopt_available(): + return importlib.util.find_spec("sigopt") is not None + + +def is_azureml_available(): + if importlib.util.find_spec("azureml") is None: + return False + if importlib.util.find_spec("azureml.core") is None: + return False + return importlib.util.find_spec("azureml.core.run") is not None + + +def is_mlflow_available(): + if os.getenv("DISABLE_MLFLOW_INTEGRATION", "FALSE").upper() == "TRUE": + return False + return importlib.util.find_spec("mlflow") is not None + + +def is_dagshub_available(): + return None not in [importlib.util.find_spec("dagshub"), importlib.util.find_spec("mlflow")] + + +def is_neptune_available(): + return _has_neptune + + +def is_codecarbon_available(): + return importlib.util.find_spec("codecarbon") is not None + + +def is_flytekit_available(): + return importlib.util.find_spec("flytekit") is not None + + +def is_flyte_deck_standard_available(): + if not is_flytekit_available(): + return False + return importlib.util.find_spec("flytekitplugins.deck") is not None + + +def is_dvclive_available(): + return importlib.util.find_spec("dvclive") is not None + + +def hp_params(trial): + if is_optuna_available(): + import optuna + + if isinstance(trial, optuna.Trial): + return trial.params + if is_ray_tune_available(): + if isinstance(trial, dict): + return trial + + if is_sigopt_available(): + if isinstance(trial, dict): + return trial + + if is_wandb_available(): + if isinstance(trial, dict): + return trial + + raise RuntimeError(f"Unknown type for trial {trial.__class__}") + + +def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + import optuna + + if trainer.args.process_index == 0: + + def _objective(trial, checkpoint_dir=None): + checkpoint = None + if checkpoint_dir: + for subdir in os.listdir(checkpoint_dir): + if subdir.startswith(PREFIX_CHECKPOINT_DIR): + checkpoint = os.path.join(checkpoint_dir, subdir) + trainer.objective = None + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(trial) + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=checkpoint) + else: + trainer.train(resume_from_checkpoint=checkpoint, trial=trial) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return trainer.objective + + timeout = kwargs.pop("timeout", None) + n_jobs = kwargs.pop("n_jobs", 1) + directions = direction if isinstance(direction, list) else None + direction = None if directions is not None else direction + study = optuna.create_study(direction=direction, directions=directions, **kwargs) + study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) + if not study._is_multi_objective(): + best_trial = study.best_trial + return BestRun(str(best_trial.number), best_trial.value, best_trial.params) + else: + best_trials = study.best_trials + return [BestRun(str(best.number), best.values, best.params) for best in best_trials] + else: + for i in range(n_trials): + trainer.objective = None + args_main_rank = list(pickle.dumps(trainer.args)) + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(args_main_rank, src=0) + args = pickle.loads(bytes(args_main_rank)) + for key, value in asdict(args).items(): + if key != "local_rank": + setattr(trainer.args, key, value) + trainer.train(resume_from_checkpoint=None) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return None + + +def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + import ray + import ray.train + + def _objective(trial: dict, local_trainer): + try: + from transformers.utils.notebook import NotebookProgressCallback + + if local_trainer.pop_callback(NotebookProgressCallback): + local_trainer.add_callback(ProgressCallback) + except ModuleNotFoundError: + pass + + local_trainer.objective = None + + checkpoint = ray.train.get_checkpoint() + if checkpoint: + # Upon trial resume, the local_trainer's objective gets reset to None. + # If `local_trainer.train` is a noop (training has already reached + # the target number of epochs/steps), then this would + # trigger an unnecessary extra checkpoint at the end of training. + # -> Set the objective to a dummy value upon resume as a workaround. + local_trainer.objective = "objective" + + with checkpoint.as_directory() as checkpoint_dir: + checkpoint_path = next(Path(checkpoint_dir).glob(f"{PREFIX_CHECKPOINT_DIR}*")).as_posix() + local_trainer.train(resume_from_checkpoint=checkpoint_path, trial=trial) + else: + local_trainer.train(trial=trial) + + # If there hasn't been any evaluation during the training loop. + if getattr(local_trainer, "objective", None) is None: + metrics = local_trainer.evaluate() + local_trainer.objective = local_trainer.compute_objective(metrics) + + metrics.update({"objective": local_trainer.objective, "done": True}) + + with tempfile.TemporaryDirectory() as temp_checkpoint_dir: + local_trainer._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) + checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) + ray.train.report(metrics, checkpoint=checkpoint) + + if not trainer._memory_tracker.skip_memory_metrics: + from ..trainer_utils import TrainerMemoryTracker + + logger.warning( + "Memory tracking for your Trainer is currently " + "enabled. Automatically disabling the memory tracker " + "since the memory tracker is not serializable." + ) + trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) + + # The model and TensorBoard writer do not pickle so we have to remove them (if they exists) + # while doing the ray hp search. + _tb_writer = trainer.pop_callback(TensorBoardCallback) + trainer.model = None + + # Setup default `resources_per_trial`. + if "resources_per_trial" not in kwargs: + # Default to 1 CPU and 1 GPU (if applicable) per trial. + kwargs["resources_per_trial"] = {"cpu": 1} + if trainer.args.n_gpu > 0: + kwargs["resources_per_trial"]["gpu"] = 1 + resource_msg = "1 CPU" + (" and 1 GPU" if trainer.args.n_gpu > 0 else "") + logger.info( + "No `resources_per_trial` arg was passed into " + "`hyperparameter_search`. Setting it to a default value " + f"of {resource_msg} for each trial." + ) + # Make sure each trainer only uses GPUs that were allocated per trial. + gpus_per_trial = kwargs["resources_per_trial"].get("gpu", 0) + trainer.args._n_gpu = gpus_per_trial + + # Setup default `progress_reporter`. + if "progress_reporter" not in kwargs: + from ray.tune import CLIReporter + + kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"]) + + if "scheduler" in kwargs: + from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining + + # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. + if isinstance( + kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) + ) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == IntervalStrategy.NO): + raise RuntimeError( + "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " + "This means your trials will not report intermediate results to Ray Tune, and " + "can thus not be stopped early or used to exploit other trials parameters. " + "If this is what you want, do not use {cls}. If you would like to use {cls}, " + "make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the " + "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) + ) + + trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) + + @functools.wraps(trainable) + def dynamic_modules_import_trainable(*args, **kwargs): + """ + Wrapper around `tune.with_parameters` to ensure datasets_modules are loaded on each Actor. + + Without this, an ImportError will be thrown. See https://github.com/huggingface/transformers/issues/11565. + + Assumes that `_objective`, defined above, is a function. + """ + if is_datasets_available(): + import datasets.load + + dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), "__init__.py") + # load dynamic_modules from path + spec = importlib.util.spec_from_file_location("datasets_modules", dynamic_modules_path) + datasets_modules = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = datasets_modules + spec.loader.exec_module(datasets_modules) + return trainable(*args, **kwargs) + + # special attr set by tune.with_parameters + if hasattr(trainable, "__mixins__"): + dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ + + analysis = ray.tune.run( + dynamic_modules_import_trainable, + config=trainer.hp_space(None), + num_samples=n_trials, + **kwargs, + ) + best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3], scope=trainer.args.ray_scope) + best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config, analysis) + if _tb_writer is not None: + trainer.add_callback(_tb_writer) + return best_run + + +def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + import sigopt + + if trainer.args.process_index == 0: + if importlib.metadata.version("sigopt") >= "8.0.0": + sigopt.set_project("huggingface") + + experiment = sigopt.create_experiment( + name="huggingface-tune", + type="offline", + parameters=trainer.hp_space(None), + metrics=[{"name": "objective", "objective": direction, "strategy": "optimize"}], + parallel_bandwidth=1, + budget=n_trials, + ) + + logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + + for run in experiment.loop(): + with run: + trainer.objective = None + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(run.run) + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=None) + else: + trainer.train(resume_from_checkpoint=None, trial=run.run) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + run.log_metric("objective", trainer.objective) + + best = list(experiment.get_best_runs())[0] + best_run = BestRun(best.id, best.values["objective"].value, best.assignments) + else: + from sigopt import Connection + + conn = Connection() + proxies = kwargs.pop("proxies", None) + if proxies is not None: + conn.set_proxies(proxies) + + experiment = conn.experiments().create( + name="huggingface-tune", + parameters=trainer.hp_space(None), + metrics=[{"name": "objective", "objective": direction, "strategy": "optimize"}], + parallel_bandwidth=1, + observation_budget=n_trials, + project="huggingface", + ) + logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + + while experiment.progress.observation_count < experiment.observation_budget: + suggestion = conn.experiments(experiment.id).suggestions().create() + trainer.objective = None + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(suggestion) + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=None) + else: + trainer.train(resume_from_checkpoint=None, trial=suggestion) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + + values = [{"name": "objective", "value": trainer.objective}] + obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values) + logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]") + experiment = conn.experiments(experiment.id).fetch() + + best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0] + best_run = BestRun(best.id, best.value, best.assignments) + return best_run + else: + for i in range(n_trials): + trainer.objective = None + args_main_rank = list(pickle.dumps(trainer.args)) + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(args_main_rank, src=0) + args = pickle.loads(bytes(args_main_rank)) + for key, value in asdict(args).items(): + if key != "local_rank": + setattr(trainer.args, key, value) + trainer.train(resume_from_checkpoint=None) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return None + + +def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: + from ..integrations import is_wandb_available + + if not is_wandb_available(): + raise ImportError("This function needs wandb installed: `pip install wandb`") + import wandb + + # add WandbCallback if not already added in trainer callbacks + reporting_to_wandb = False + for callback in trainer.callback_handler.callbacks: + if isinstance(callback, WandbCallback): + reporting_to_wandb = True + break + if not reporting_to_wandb: + trainer.add_callback(WandbCallback()) + trainer.args.report_to = ["wandb"] + best_trial = {"run_id": None, "objective": None, "hyperparameters": None} + sweep_id = kwargs.pop("sweep_id", None) + project = kwargs.pop("project", None) + name = kwargs.pop("name", None) + entity = kwargs.pop("entity", None) + metric = kwargs.pop("metric", "eval/loss") + + sweep_config = trainer.hp_space(None) + sweep_config["metric"]["goal"] = direction + sweep_config["metric"]["name"] = metric + if name: + sweep_config["name"] = name + + def _objective(): + run = wandb.run if wandb.run else wandb.init() + trainer.state.trial_name = run.name + run.config.update({"assignments": {}, "metric": metric}) + config = wandb.config + + trainer.objective = None + + trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"]) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + format_metrics = rewrite_logs(metrics) + if metric not in format_metrics: + logger.warning( + f"Provided metric {metric} not found. This might result in unexpected sweeps charts. The available" + f" metrics are {format_metrics.keys()}" + ) + best_score = False + if best_trial["run_id"] is not None: + if direction == "minimize": + best_score = trainer.objective < best_trial["objective"] + elif direction == "maximize": + best_score = trainer.objective > best_trial["objective"] + + if best_score or best_trial["run_id"] is None: + best_trial["run_id"] = run.id + best_trial["objective"] = trainer.objective + best_trial["hyperparameters"] = dict(config) + + return trainer.objective + + sweep_id = wandb.sweep(sweep_config, project=project, entity=entity) if not sweep_id else sweep_id + logger.info(f"wandb sweep id - {sweep_id}") + wandb.agent(sweep_id, function=_objective, count=n_trials) + + return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"]) + + +def get_available_reporting_integrations(): + integrations = [] + if is_azureml_available() and not is_mlflow_available(): + integrations.append("azure_ml") + if is_comet_available(): + integrations.append("comet_ml") + if is_dagshub_available(): + integrations.append("dagshub") + if is_dvclive_available(): + integrations.append("dvclive") + if is_mlflow_available(): + integrations.append("mlflow") + if is_neptune_available(): + integrations.append("neptune") + if is_tensorboard_available(): + integrations.append("tensorboard") + if is_wandb_available(): + integrations.append("wandb") + if is_codecarbon_available(): + integrations.append("codecarbon") + if is_clearml_available(): + integrations.append("clearml") + return integrations + + +def rewrite_logs(d): + new_d = {} + eval_prefix = "eval_" + eval_prefix_len = len(eval_prefix) + test_prefix = "test_" + test_prefix_len = len(test_prefix) + for k, v in d.items(): + if k.startswith(eval_prefix): + new_d["eval/" + k[eval_prefix_len:]] = v + elif k.startswith(test_prefix): + new_d["test/" + k[test_prefix_len:]] = v + else: + new_d["train/" + k] = v + return new_d + + +class TensorBoardCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard). + + Args: + tb_writer (`SummaryWriter`, *optional*): + The writer to use. Will instantiate one if not set. + """ + + def __init__(self, tb_writer=None): + has_tensorboard = is_tensorboard_available() + if not has_tensorboard: + raise RuntimeError( + "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or" + " install tensorboardX." + ) + if has_tensorboard: + try: + from torch.utils.tensorboard import SummaryWriter # noqa: F401 + + self._SummaryWriter = SummaryWriter + except ImportError: + try: + from tensorboardX import SummaryWriter + + self._SummaryWriter = SummaryWriter + except ImportError: + self._SummaryWriter = None + else: + self._SummaryWriter = None + self.tb_writer = tb_writer + + def _init_summary_writer(self, args, log_dir=None): + log_dir = log_dir or args.logging_dir + if self._SummaryWriter is not None: + self.tb_writer = self._SummaryWriter(log_dir=log_dir) + + def on_train_begin(self, args, state, control, **kwargs): + if not state.is_world_process_zero: + return + + log_dir = None + + if state.is_hyper_param_search: + trial_name = state.trial_name + if trial_name is not None: + log_dir = os.path.join(args.logging_dir, trial_name) + + if self.tb_writer is None: + self._init_summary_writer(args, log_dir) + + if self.tb_writer is not None: + self.tb_writer.add_text("args", args.to_json_string()) + if "model" in kwargs: + model = kwargs["model"] + if hasattr(model, "config") and model.config is not None: + model_config_json = model.config.to_json_string() + self.tb_writer.add_text("model_config", model_config_json) + + def on_log(self, args, state, control, logs=None, **kwargs): + if not state.is_world_process_zero: + return + + if self.tb_writer is None: + self._init_summary_writer(args) + + if self.tb_writer is not None: + logs = rewrite_logs(logs) + for k, v in logs.items(): + if isinstance(v, (int, float)): + self.tb_writer.add_scalar(k, v, state.global_step) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of Tensorboard's writer.add_scalar() " + "is incorrect so we dropped this attribute." + ) + self.tb_writer.flush() + + def on_train_end(self, args, state, control, **kwargs): + if self.tb_writer: + self.tb_writer.close() + self.tb_writer = None + + +class WandbCallback(TrainerCallback): + """ + A [`TrainerCallback`] that logs metrics, media, model checkpoints to [Weight and Biases](https://www.wandb.com/). + """ + + def __init__(self): + has_wandb = is_wandb_available() + if not has_wandb: + raise RuntimeError("WandbCallback requires wandb to be installed. Run `pip install wandb`.") + if has_wandb: + import wandb + + self._wandb = wandb + self._initialized = False + # log model + if os.getenv("WANDB_LOG_MODEL", "FALSE").upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"}): + DeprecationWarning( + f"Setting `WANDB_LOG_MODEL` as {os.getenv('WANDB_LOG_MODEL')} is deprecated and will be removed in " + "version 5 of transformers. Use one of `'end'` or `'checkpoint'` instead." + ) + logger.info(f"Setting `WANDB_LOG_MODEL` from {os.getenv('WANDB_LOG_MODEL')} to `end` instead") + self._log_model = "end" + else: + self._log_model = os.getenv("WANDB_LOG_MODEL", "false").lower() + + def setup(self, args, state, model, **kwargs): + """ + Setup the optional Weights & Biases (*wandb*) integration. + + One can subclass and override this method to customize the setup if needed. Find more information + [here](https://docs.wandb.ai/guides/integrations/huggingface). You can also override the following environment + variables: + + Environment: + - **WANDB_LOG_MODEL** (`str`, *optional*, defaults to `"false"`): + Whether to log model and checkpoints during training. Can be `"end"`, `"checkpoint"` or `"false"`. If set + to `"end"`, the model will be uploaded at the end of training. If set to `"checkpoint"`, the checkpoint + will be uploaded every `args.save_steps` . If set to `"false"`, the model will not be uploaded. Use along + with [`~transformers.TrainingArguments.load_best_model_at_end`] to upload best model. + + + + Setting `WANDB_LOG_MODEL` as `bool` will be deprecated in version 5 of 🤗 Transformers. + + + - **WANDB_WATCH** (`str`, *optional* defaults to `"false"`): + Can be `"gradients"`, `"all"`, `"parameters"`, or `"false"`. Set to `"all"` to log gradients and + parameters. + - **WANDB_PROJECT** (`str`, *optional*, defaults to `"huggingface"`): + Set this to a custom string to store results in a different project. + - **WANDB_DISABLED** (`bool`, *optional*, defaults to `False`): + Whether to disable wandb entirely. Set `WANDB_DISABLED=true` to disable. + """ + if self._wandb is None: + return + self._initialized = True + if state.is_world_process_zero: + logger.info( + 'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"' + ) + combined_dict = {**args.to_dict()} + + if hasattr(model, "config") and model.config is not None: + model_config = model.config.to_dict() + combined_dict = {**model_config, **combined_dict} + trial_name = state.trial_name + init_args = {} + if trial_name is not None: + init_args["name"] = trial_name + init_args["group"] = args.run_name + else: + if not (args.run_name is None or args.run_name == args.output_dir): + init_args["name"] = args.run_name + + if self._wandb.run is None: + self._wandb.init( + project=os.getenv("WANDB_PROJECT", "huggingface"), + **init_args, + ) + # add config parameters (run may have been created manually) + self._wandb.config.update(combined_dict, allow_val_change=True) + + # define default x-axis (for latest wandb versions) + if getattr(self._wandb, "define_metric", None): + self._wandb.define_metric("train/global_step") + self._wandb.define_metric("*", step_metric="train/global_step", step_sync=True) + + # keep track of model topology and gradients, unsupported on TPU + _watch_model = os.getenv("WANDB_WATCH", "false") + if not is_torch_xla_available() and _watch_model in ("all", "parameters", "gradients"): + self._wandb.watch(model, log=_watch_model, log_freq=max(100, state.logging_steps)) + self._wandb.run._label(code="transformers_trainer") + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if self._wandb is None: + return + hp_search = state.is_hyper_param_search + if hp_search: + self._wandb.finish() + self._initialized = False + args.run_name = None + if not self._initialized: + self.setup(args, state, model, **kwargs) + + def on_train_end(self, args, state, control, model=None, tokenizer=None, **kwargs): + if self._wandb is None: + return + if self._log_model in ("end", "checkpoint") and self._initialized and state.is_world_process_zero: + from ..trainer import Trainer + + fake_trainer = Trainer(args=args, model=model, tokenizer=tokenizer) + with tempfile.TemporaryDirectory() as temp_dir: + fake_trainer.save_model(temp_dir) + metadata = ( + { + k: v + for k, v in dict(self._wandb.summary).items() + if isinstance(v, numbers.Number) and not k.startswith("_") + } + if not args.load_best_model_at_end + else { + f"eval/{args.metric_for_best_model}": state.best_metric, + "train/total_floss": state.total_flos, + } + ) + logger.info("Logging model artifacts. ...") + model_name = ( + f"model-{self._wandb.run.id}" + if (args.run_name is None or args.run_name == args.output_dir) + else f"model-{self._wandb.run.name}" + ) + artifact = self._wandb.Artifact(name=model_name, type="model", metadata=metadata) + for f in Path(temp_dir).glob("*"): + if f.is_file(): + with artifact.new_file(f.name, mode="wb") as fa: + fa.write(f.read_bytes()) + self._wandb.run.log_artifact(artifact) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + single_value_scalars = [ + "train_runtime", + "train_samples_per_second", + "train_steps_per_second", + "train_loss", + "total_flos", + ] + + if self._wandb is None: + return + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + for k, v in logs.items(): + if k in single_value_scalars: + self._wandb.run.summary[k] = v + non_scalar_logs = {k: v for k, v in logs.items() if k not in single_value_scalars} + non_scalar_logs = rewrite_logs(non_scalar_logs) + self._wandb.log({**non_scalar_logs, "train/global_step": state.global_step}) + + def on_save(self, args, state, control, **kwargs): + if self._log_model == "checkpoint" and self._initialized and state.is_world_process_zero: + checkpoint_metadata = { + k: v + for k, v in dict(self._wandb.summary).items() + if isinstance(v, numbers.Number) and not k.startswith("_") + } + + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. ...") + checkpoint_name = ( + f"checkpoint-{self._wandb.run.id}" + if (args.run_name is None or args.run_name == args.output_dir) + else f"checkpoint-{self._wandb.run.name}" + ) + artifact = self._wandb.Artifact(name=checkpoint_name, type="model", metadata=checkpoint_metadata) + artifact.add_dir(artifact_path) + self._wandb.log_artifact(artifact, aliases=[f"checkpoint-{state.global_step}"]) + + +class CometCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [Comet ML](https://www.comet.ml/site/). + """ + + def __init__(self): + if not _has_comet: + raise RuntimeError("CometCallback requires comet-ml to be installed. Run `pip install comet-ml`.") + self._initialized = False + self._log_assets = False + + def setup(self, args, state, model): + """ + Setup the optional Comet.ml integration. + + Environment: + - **COMET_MODE** (`str`, *optional*, defaults to `ONLINE`): + Whether to create an online, offline experiment or disable Comet logging. Can be `OFFLINE`, `ONLINE`, or + `DISABLED`. + - **COMET_PROJECT_NAME** (`str`, *optional*): + Comet project name for experiments. + - **COMET_OFFLINE_DIRECTORY** (`str`, *optional*): + Folder to use for saving offline experiments when `COMET_MODE` is `OFFLINE`. + - **COMET_LOG_ASSETS** (`str`, *optional*, defaults to `TRUE`): + Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be `TRUE`, or + `FALSE`. + + For a number of configurable items in the environment, see + [here](https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables). + """ + self._initialized = True + log_assets = os.getenv("COMET_LOG_ASSETS", "FALSE").upper() + if log_assets in {"TRUE", "1"}: + self._log_assets = True + if state.is_world_process_zero: + comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() + experiment = None + experiment_kwargs = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} + if comet_mode == "ONLINE": + experiment = comet_ml.Experiment(**experiment_kwargs) + experiment.log_other("Created from", "transformers") + logger.info("Automatic Comet.ml online logging enabled") + elif comet_mode == "OFFLINE": + experiment_kwargs["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") + experiment = comet_ml.OfflineExperiment(**experiment_kwargs) + experiment.log_other("Created from", "transformers") + logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") + if experiment is not None: + experiment._set_model_graph(model, framework="transformers") + experiment._log_parameters(args, prefix="args/", framework="transformers") + if hasattr(model, "config"): + experiment._log_parameters(model.config, prefix="config/", framework="transformers") + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + experiment = comet_ml.config.get_global_experiment() + if experiment is not None: + experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework="transformers") + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + experiment = comet_ml.config.get_global_experiment() + if experiment is not None: + if self._log_assets is True: + logger.info("Logging checkpoints. This may take time.") + experiment.log_asset_folder( + args.output_dir, recursive=True, log_file_name=True, step=state.global_step + ) + experiment.end() + + +class AzureMLCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [AzureML](https://pypi.org/project/azureml-sdk/). + """ + + def __init__(self, azureml_run=None): + if not is_azureml_available(): + raise RuntimeError("AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.") + self.azureml_run = azureml_run + + def on_init_end(self, args, state, control, **kwargs): + from azureml.core.run import Run + + if self.azureml_run is None and state.is_world_process_zero: + self.azureml_run = Run.get_context() + + def on_log(self, args, state, control, logs=None, **kwargs): + if self.azureml_run and state.is_world_process_zero: + for k, v in logs.items(): + if isinstance(v, (int, float)): + self.azureml_run.log(k, v, description=k) + + +class MLflowCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [MLflow](https://www.mlflow.org/). Can be disabled by setting + environment variable `DISABLE_MLFLOW_INTEGRATION = TRUE`. + """ + + def __init__(self): + if not is_mlflow_available(): + raise RuntimeError("MLflowCallback requires mlflow to be installed. Run `pip install mlflow`.") + import mlflow + + self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH + self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH + + self._initialized = False + self._auto_end_run = False + self._log_artifacts = False + self._ml_flow = mlflow + + def setup(self, args, state, model): + """ + Setup the optional MLflow integration. + + Environment: + - **HF_MLFLOW_LOG_ARTIFACTS** (`str`, *optional*): + Whether to use MLflow `.log_artifact()` facility to log artifacts. This only makes sense if logging to a + remote server, e.g. s3 or GCS. If set to `True` or *1*, will copy each saved checkpoint on each save in + [`TrainingArguments`]'s `output_dir` to the local or remote artifact storage. Using it without a remote + storage will just copy the files to your artifact location. + - **MLFLOW_TRACKING_URI** (`str`, *optional*): + Whether to store runs at a specific path or remote server. Unset by default, which skips setting the + tracking URI entirely. + - **MLFLOW_EXPERIMENT_NAME** (`str`, *optional*, defaults to `None`): + Whether to use an MLflow experiment_name under which to launch the run. Default to `None` which will point + to the `Default` experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be + activated. If an experiment with this name does not exist, a new experiment with this name is created. + - **MLFLOW_TAGS** (`str`, *optional*): + A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: + `os.environ['MLFLOW_TAGS']='{"release.candidate": "RC1", "release.version": "2.2.0"}'`. + - **MLFLOW_NESTED_RUN** (`str`, *optional*): + Whether to use MLflow nested runs. If set to `True` or *1*, will create a nested run inside the current + run. + - **MLFLOW_RUN_ID** (`str`, *optional*): + Allow to reattach to an existing run which can be usefull when resuming training from a checkpoint. When + `MLFLOW_RUN_ID` environment variable is set, `start_run` attempts to resume a run with the specified run ID + and other parameters are ignored. + - **MLFLOW_FLATTEN_PARAMS** (`str`, *optional*, defaults to `False`): + Whether to flatten the parameters dictionary before logging. + """ + self._log_artifacts = os.getenv("HF_MLFLOW_LOG_ARTIFACTS", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self._nested_run = os.getenv("MLFLOW_NESTED_RUN", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self._tracking_uri = os.getenv("MLFLOW_TRACKING_URI", None) + self._experiment_name = os.getenv("MLFLOW_EXPERIMENT_NAME", None) + self._flatten_params = os.getenv("MLFLOW_FLATTEN_PARAMS", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self._run_id = os.getenv("MLFLOW_RUN_ID", None) + + # "synchronous" flag is only available with mlflow version >= 2.8.0 + # https://github.com/mlflow/mlflow/pull/9705 + # https://github.com/mlflow/mlflow/releases/tag/v2.8.0 + self._async_log = packaging.version.parse(self._ml_flow.__version__) >= packaging.version.parse("2.8.0") + + logger.debug( + f"MLflow experiment_name={self._experiment_name}, run_name={args.run_name}, nested={self._nested_run}," + f" tags={self._nested_run}, tracking_uri={self._tracking_uri}" + ) + if state.is_world_process_zero: + if not self._ml_flow.is_tracking_uri_set(): + if self._tracking_uri: + self._ml_flow.set_tracking_uri(self._tracking_uri) + logger.debug(f"MLflow tracking URI is set to {self._tracking_uri}") + else: + logger.debug( + "Environment variable `MLFLOW_TRACKING_URI` is not provided and therefore will not be" + " explicitly set." + ) + else: + logger.debug(f"MLflow tracking URI is set to {self._ml_flow.get_tracking_uri()}") + + if self._ml_flow.active_run() is None or self._nested_run or self._run_id: + if self._experiment_name: + # Use of set_experiment() ensure that Experiment is created if not exists + self._ml_flow.set_experiment(self._experiment_name) + self._ml_flow.start_run(run_name=args.run_name, nested=self._nested_run) + logger.debug(f"MLflow run started with run_id={self._ml_flow.active_run().info.run_id}") + self._auto_end_run = True + combined_dict = args.to_dict() + if hasattr(model, "config") and model.config is not None: + model_config = model.config.to_dict() + combined_dict = {**model_config, **combined_dict} + combined_dict = flatten_dict(combined_dict) if self._flatten_params else combined_dict + # remove params that are too long for MLflow + for name, value in list(combined_dict.items()): + # internally, all values are converted to str in MLflow + if len(str(value)) > self._MAX_PARAM_VAL_LENGTH: + logger.warning( + f'Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' + " log_param() only accepts values no longer than 250 characters so we dropped this attribute." + " You can use `MLFLOW_FLATTEN_PARAMS` environment variable to flatten the parameters and" + " avoid this message." + ) + del combined_dict[name] + # MLflow cannot log more than 100 values in one go, so we have to split it + combined_dict_items = list(combined_dict.items()) + for i in range(0, len(combined_dict_items), self._MAX_PARAMS_TAGS_PER_BATCH): + if self._async_log: + self._ml_flow.log_params( + dict(combined_dict_items[i : i + self._MAX_PARAMS_TAGS_PER_BATCH]), synchronous=False + ) + else: + self._ml_flow.log_params(dict(combined_dict_items[i : i + self._MAX_PARAMS_TAGS_PER_BATCH])) + mlflow_tags = os.getenv("MLFLOW_TAGS", None) + if mlflow_tags: + mlflow_tags = json.loads(mlflow_tags) + self._ml_flow.set_tags(mlflow_tags) + self._initialized = True + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, logs, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + metrics = {} + for k, v in logs.items(): + if isinstance(v, (int, float)): + metrics[k] = v + elif isinstance(v, torch.Tensor) and v.numel() == 1: + metrics[k] = v.item() + else: + logger.warning( + f'Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' + "MLflow's log_metric() only accepts float and int types so we dropped this attribute." + ) + + if self._async_log: + self._ml_flow.log_metrics(metrics=metrics, step=state.global_step, synchronous=False) + else: + self._ml_flow.log_metrics(metrics=metrics, step=state.global_step) + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + if self._auto_end_run and self._ml_flow.active_run(): + self._ml_flow.end_run() + + def on_save(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero and self._log_artifacts: + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. This may take time.") + self._ml_flow.pyfunc.log_model( + ckpt_dir, + artifacts={"model_path": artifact_path}, + python_model=self._ml_flow.pyfunc.PythonModel(), + ) + + def __del__(self): + # if the previous run is not terminated correctly, the fluent API will + # not let you start a new run before the previous one is killed + if ( + self._auto_end_run + and callable(getattr(self._ml_flow, "active_run", None)) + and self._ml_flow.active_run() is not None + ): + self._ml_flow.end_run() + + +class DagsHubCallback(MLflowCallback): + """ + A [`TrainerCallback`] that logs to [DagsHub](https://dagshub.com/). Extends [`MLflowCallback`] + """ + + def __init__(self): + super().__init__() + if not is_dagshub_available(): + raise ImportError("DagsHubCallback requires dagshub to be installed. Run `pip install dagshub`.") + + from dagshub.upload import Repo + + self.Repo = Repo + + def setup(self, *args, **kwargs): + """ + Setup the DagsHub's Logging integration. + + Environment: + - **HF_DAGSHUB_LOG_ARTIFACTS** (`str`, *optional*): + Whether to save the data and model artifacts for the experiment. Default to `False`. + """ + + self.log_artifacts = os.getenv("HF_DAGSHUB_LOG_ARTIFACTS", "FALSE").upper() in ENV_VARS_TRUE_VALUES + self.name = os.getenv("HF_DAGSHUB_MODEL_NAME") or "main" + self.remote = os.getenv("MLFLOW_TRACKING_URI") + self.repo = self.Repo( + owner=self.remote.split(os.sep)[-2], + name=self.remote.split(os.sep)[-1].split(".")[0], + branch=os.getenv("BRANCH") or "main", + ) + self.path = Path("artifacts") + + if self.remote is None: + raise RuntimeError( + "DagsHubCallback requires the `MLFLOW_TRACKING_URI` environment variable to be set. Did you run" + " `dagshub.init()`?" + ) + + super().setup(*args, **kwargs) + + def on_train_end(self, args, state, control, **kwargs): + if self.log_artifacts: + if getattr(self, "train_dataloader", None): + torch.save(self.train_dataloader.dataset, os.path.join(args.output_dir, "dataset.pt")) + + self.repo.directory(str(self.path)).add_dir(args.output_dir) + + +class NeptuneMissingConfiguration(Exception): + def __init__(self): + super().__init__( + """ + ------ Unsupported ---- We were not able to create new runs. You provided a custom Neptune run to + `NeptuneCallback` with the `run` argument. For the integration to work fully, provide your `api_token` and + `project` by saving them as environment variables or passing them to the callback. + """ + ) + + +class NeptuneCallback(TrainerCallback): + """TrainerCallback that sends the logs to [Neptune](https://app.neptune.ai). + + Args: + api_token (`str`, *optional*): Neptune API token obtained upon registration. + You can leave this argument out if you have saved your token to the `NEPTUNE_API_TOKEN` environment + variable (strongly recommended). See full setup instructions in the + [docs](https://docs.neptune.ai/setup/installation). + project (`str`, *optional*): Name of an existing Neptune project, in the form "workspace-name/project-name". + You can find and copy the name in Neptune from the project settings -> Properties. If None (default), the + value of the `NEPTUNE_PROJECT` environment variable is used. + name (`str`, *optional*): Custom name for the run. + base_namespace (`str`, optional, defaults to "finetuning"): In the Neptune run, the root namespace + that will contain all of the metadata logged by the callback. + log_parameters (`bool`, *optional*, defaults to `True`): + If True, logs all Trainer arguments and model parameters provided by the Trainer. + log_checkpoints (`str`, *optional*): If "same", uploads checkpoints whenever they are saved by the Trainer. + If "last", uploads only the most recently saved checkpoint. If "best", uploads the best checkpoint (among + the ones saved by the Trainer). If `None`, does not upload checkpoints. + run (`Run`, *optional*): Pass a Neptune run object if you want to continue logging to an existing run. + Read more about resuming runs in the [docs](https://docs.neptune.ai/logging/to_existing_object). + **neptune_run_kwargs (*optional*): + Additional keyword arguments to be passed directly to the + [`neptune.init_run()`](https://docs.neptune.ai/api/neptune#init_run) function when a new run is created. + + For instructions and examples, see the [Transformers integration + guide](https://docs.neptune.ai/integrations/transformers) in the Neptune documentation. + """ + + integration_version_key = "source_code/integrations/transformers" + model_parameters_key = "model_parameters" + trial_name_key = "trial" + trial_params_key = "trial_params" + trainer_parameters_key = "trainer_parameters" + flat_metrics = {"train/epoch"} + + def __init__( + self, + *, + api_token: Optional[str] = None, + project: Optional[str] = None, + name: Optional[str] = None, + base_namespace: str = "finetuning", + run=None, + log_parameters: bool = True, + log_checkpoints: Optional[str] = None, + **neptune_run_kwargs, + ): + if not is_neptune_available(): + raise ValueError( + "NeptuneCallback requires the Neptune client library to be installed. " + "To install the library, run `pip install neptune`." + ) + + try: + from neptune import Run + from neptune.internal.utils import verify_type + except ImportError: + from neptune.new.internal.utils import verify_type + from neptune.new.metadata_containers.run import Run + + verify_type("api_token", api_token, (str, type(None))) + verify_type("project", project, (str, type(None))) + verify_type("name", name, (str, type(None))) + verify_type("base_namespace", base_namespace, str) + verify_type("run", run, (Run, type(None))) + verify_type("log_parameters", log_parameters, bool) + verify_type("log_checkpoints", log_checkpoints, (str, type(None))) + + self._base_namespace_path = base_namespace + self._log_parameters = log_parameters + self._log_checkpoints = log_checkpoints + self._initial_run: Optional[Run] = run + + self._run = None + self._is_monitoring_run = False + self._run_id = None + self._force_reset_monitoring_run = False + self._init_run_kwargs = {"api_token": api_token, "project": project, "name": name, **neptune_run_kwargs} + + self._volatile_checkpoints_dir = None + self._should_upload_checkpoint = self._log_checkpoints is not None + self._recent_checkpoint_path = None + + if self._log_checkpoints in {"last", "best"}: + self._target_checkpoints_namespace = f"checkpoints/{self._log_checkpoints}" + self._should_clean_recently_uploaded_checkpoint = True + else: + self._target_checkpoints_namespace = "checkpoints" + self._should_clean_recently_uploaded_checkpoint = False + + def _stop_run_if_exists(self): + if self._run: + self._run.stop() + del self._run + self._run = None + + def _initialize_run(self, **additional_neptune_kwargs): + try: + from neptune import init_run + from neptune.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException + except ImportError: + from neptune.new import init_run + from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException + + self._stop_run_if_exists() + + try: + run_params = additional_neptune_kwargs.copy() + run_params.update(self._init_run_kwargs) + self._run = init_run(**run_params) + self._run_id = self._run["sys/id"].fetch() + except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e: + raise NeptuneMissingConfiguration() from e + + def _use_initial_run(self): + self._run = self._initial_run + self._is_monitoring_run = True + self._run_id = self._run["sys/id"].fetch() + self._initial_run = None + + def _ensure_run_with_monitoring(self): + if self._initial_run is not None: + self._use_initial_run() + else: + if not self._force_reset_monitoring_run and self._is_monitoring_run: + return + + if self._run and not self._is_monitoring_run and not self._force_reset_monitoring_run: + self._initialize_run(with_id=self._run_id) + self._is_monitoring_run = True + else: + self._initialize_run() + self._force_reset_monitoring_run = False + + def _ensure_at_least_run_without_monitoring(self): + if self._initial_run is not None: + self._use_initial_run() + else: + if not self._run: + self._initialize_run( + with_id=self._run_id, + capture_stdout=False, + capture_stderr=False, + capture_hardware_metrics=False, + capture_traceback=False, + ) + self._is_monitoring_run = False + + @property + def run(self): + if self._run is None: + self._ensure_at_least_run_without_monitoring() + return self._run + + @property + def _metadata_namespace(self): + return self.run[self._base_namespace_path] + + def _log_integration_version(self): + self.run[NeptuneCallback.integration_version_key] = version + + def _log_trainer_parameters(self, args): + self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict() + + def _log_model_parameters(self, model): + from neptune.utils import stringify_unsupported + + if model and hasattr(model, "config") and model.config is not None: + self._metadata_namespace[NeptuneCallback.model_parameters_key] = stringify_unsupported( + model.config.to_dict() + ) + + def _log_hyper_param_search_parameters(self, state): + if state and hasattr(state, "trial_name"): + self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name + + if state and hasattr(state, "trial_params") and state.trial_params is not None: + self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params + + def _log_model_checkpoint(self, source_directory: str, checkpoint: str): + target_path = relative_path = os.path.join(source_directory, checkpoint) + + if self._volatile_checkpoints_dir is not None: + consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint) + try: + # Remove leading ../ from a relative path. + cpkt_path = relative_path.replace("..", "").lstrip(os.path.sep) + copy_path = os.path.join(consistent_checkpoint_path, cpkt_path) + shutil.copytree(relative_path, copy_path) + target_path = consistent_checkpoint_path + except IOError as e: + logger.warning( + "NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{}'. " + "Could fail trying to upload.".format(e) + ) + + self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path) + + if self._should_clean_recently_uploaded_checkpoint and self._recent_checkpoint_path is not None: + self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path) + + self._recent_checkpoint_path = relative_path + + def on_init_end(self, args, state, control, **kwargs): + self._volatile_checkpoints_dir = None + if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): + self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name + + if self._log_checkpoints == "best" and not args.load_best_model_at_end: + raise ValueError("To save the best model checkpoint, the load_best_model_at_end argument must be enabled.") + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not state.is_world_process_zero: + return + + self._ensure_run_with_monitoring() + self._force_reset_monitoring_run = True + + self._log_integration_version() + if self._log_parameters: + self._log_trainer_parameters(args) + self._log_model_parameters(model) + + if state.is_hyper_param_search: + self._log_hyper_param_search_parameters(state) + + def on_train_end(self, args, state, control, **kwargs): + self._stop_run_if_exists() + + def __del__(self): + if self._volatile_checkpoints_dir is not None: + shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True) + + self._stop_run_if_exists() + + def on_save(self, args, state, control, **kwargs): + if self._should_upload_checkpoint: + self._log_model_checkpoint(args.output_dir, f"checkpoint-{state.global_step}") + + def on_evaluate(self, args, state, control, metrics=None, **kwargs): + if self._log_checkpoints == "best": + best_metric_name = args.metric_for_best_model + if not best_metric_name.startswith("eval_"): + best_metric_name = f"eval_{best_metric_name}" + + metric_value = metrics.get(best_metric_name) + + operator = np.greater if args.greater_is_better else np.less + + self._should_upload_checkpoint = state.best_metric is None or operator(metric_value, state.best_metric) + + @classmethod + def get_run(cls, trainer): + for callback in trainer.callback_handler.callbacks: + if isinstance(callback, cls): + return callback.run + + raise Exception("The trainer doesn't have a NeptuneCallback configured.") + + def on_log(self, args, state, control, logs: Optional[Dict[str, float]] = None, **kwargs): + if not state.is_world_process_zero: + return + + if logs is not None: + for name, value in rewrite_logs(logs).items(): + if isinstance(value, (int, float)): + if name in NeptuneCallback.flat_metrics: + self._metadata_namespace[name] = value + else: + self._metadata_namespace[name].log(value, step=state.global_step) + + +class CodeCarbonCallback(TrainerCallback): + """ + A [`TrainerCallback`] that tracks the CO2 emission of training. + """ + + def __init__(self): + if not is_codecarbon_available(): + raise RuntimeError( + "CodeCarbonCallback requires `codecarbon` to be installed. Run `pip install codecarbon`." + ) + import codecarbon + + self._codecarbon = codecarbon + self.tracker = None + + def on_init_end(self, args, state, control, **kwargs): + if self.tracker is None and state.is_local_process_zero: + # CodeCarbon will automatically handle environment variables for configuration + self.tracker = self._codecarbon.EmissionsTracker(output_dir=args.output_dir) + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if self.tracker and state.is_local_process_zero: + self.tracker.start() + + def on_train_end(self, args, state, control, **kwargs): + if self.tracker and state.is_local_process_zero: + self.tracker.stop() + + +class ClearMLCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [ClearML](https://clear.ml/). + + Environment: + - **CLEARML_PROJECT** (`str`, *optional*, defaults to `HuggingFace Transformers`): + ClearML project name. + - **CLEARML_TASK** (`str`, *optional*, defaults to `Trainer`): + ClearML task name. + - **CLEARML_LOG_MODEL** (`bool`, *optional*, defaults to `False`): + Whether to log models as artifacts during training. + """ + + log_suffix = "" + + _hparams_section = "Transformers" + _model_config_section = "Model Configuration" + _ignore_hparams_overrides = "_ignore_hparams_ui_overrides_" + _ignoge_model_config_overrides = "_ignore_model_config_ui_overrides_" + _model_config_description = "The configuration of model number {}." + _model_config_description_note = ( + "Note that, when cloning this task and running it remotely," + " the configuration might be applied to another model instead of this one." + " To avoid this, initialize the task externally by calling `Task.init`" + " before the `ClearMLCallback` is instantiated." + ) + _train_run_counter = 0 + _model_connect_counter = 0 + _task_created_in_callback = False + _should_close_on_train_end = None + + def __init__(self): + if is_clearml_available(): + import clearml + + self._clearml = clearml + else: + raise RuntimeError("ClearMLCallback requires 'clearml' to be installed. Run `pip install clearml`.") + + self._initialized = False + self._clearml_task = None + + self._log_model = False + self._checkpoints_saved = [] + + def setup(self, args, state, model, tokenizer, **kwargs): + if self._clearml is None: + return + if self._initialized: + return + ClearMLCallback._train_run_counter += 1 + ClearMLCallback._model_connect_counter += 1 + ClearMLCallback.log_suffix = ( + "" if ClearMLCallback._train_run_counter == 1 else "_" + str(ClearMLCallback._train_run_counter) + ) + if state.is_world_process_zero: + logger.info("Automatic ClearML logging enabled.") + if self._clearml_task is None: + if ClearMLCallback._should_close_on_train_end is None: + if not self._clearml.Task.running_locally() or self._clearml.Task.current_task(): + ClearMLCallback._should_close_on_train_end = False + else: + ClearMLCallback._should_close_on_train_end = True + + # This might happen when running inside of a pipeline, where the task is already initialized + # from outside of Hugging Face + if self._clearml.Task.running_locally() and self._clearml.Task.current_task(): + self._clearml_task = self._clearml.Task.current_task() + self._log_model = os.getenv( + "CLEARML_LOG_MODEL", + "FALSE" if not ClearMLCallback._task_created_in_callback else "TRUE", + ).upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"}) + logger.info("External ClearML Task has been connected.") + else: + self._clearml_task = self._clearml.Task.init( + project_name=os.getenv("CLEARML_PROJECT", "HuggingFace Transformers"), + task_name=os.getenv("CLEARML_TASK", "Trainer"), + auto_connect_frameworks={"tensorboard": False, "pytorch": False}, + output_uri=True, + ) + self._log_model = os.getenv("CLEARML_LOG_MODEL", "TRUE").upper() in ENV_VARS_TRUE_VALUES.union( + {"TRUE"} + ) + ClearMLCallback._task_created_in_callback = True + logger.info("ClearML Task has been initialized.") + self._initialized = True + + suffixed_hparams_section = ClearMLCallback._hparams_section + ClearMLCallback.log_suffix + ignore_hparams_config_section = suffixed_hparams_section + "/" + ClearMLCallback._ignore_hparams_overrides + if self._clearml.Task.running_locally(): + self._copy_training_args_as_hparams(args, suffixed_hparams_section) + self._clearml_task.set_parameter( + name=ignore_hparams_config_section, + value=True, + value_type=bool, + description=( + "If True, ignore Transformers hyperparameters overrides done in the UI/backend " + + "when running remotely. Otherwise, the overrides will be applied when running remotely" + ), + ) + elif not self._clearml_task.get_parameter(ignore_hparams_config_section, default=True, cast=True): + self._clearml_task.connect(args, suffixed_hparams_section) + else: + self._copy_training_args_as_hparams( + args, ClearMLCallback._hparams_section + ClearMLCallback.log_suffix + ) + + if getattr(model, "config", None) is not None: + ignore_model_config_section = ( + suffixed_hparams_section + "/" + ClearMLCallback._ignoge_model_config_overrides + ) + configuration_object_description = ClearMLCallback._model_config_description.format( + ClearMLCallback._model_connect_counter + ) + if ClearMLCallback._model_connect_counter != ClearMLCallback._train_run_counter: + configuration_object_description += " " + ClearMLCallback._model_config_description_note + if self._clearml.Task.running_locally(): + self._clearml_task.set_parameter( + name=ignore_model_config_section, + value=True, + value_type=bool, + description=( + "If True, ignore Transformers model configuration overrides done in the UI/backend " + + "when running remotely. Otherwise, the overrides will be applied when running remotely" + ), + ) + self._clearml_task.set_configuration_object( + name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix, + config_dict=model.config.to_dict(), + description=configuration_object_description, + ) + elif not self._clearml_task.get_parameter(ignore_model_config_section, default=True, cast=True): + model.config = model.config.from_dict( + self._clearml_task.get_configuration_object_as_dict( + ClearMLCallback._model_config_section + ClearMLCallback.log_suffix + ) + ) + else: + self._clearml_task.set_configuration_object( + name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix, + config_dict=model.config.to_dict(), + description=configuration_object_description, + ) + + def on_train_begin(self, args, state, control, model=None, tokenizer=None, **kwargs): + if self._clearml is None: + return + self._checkpoints_saved = [] + if state.is_hyper_param_search: + self._initialized = False + if not self._initialized: + self.setup(args, state, model, tokenizer, **kwargs) + + def on_train_end(self, args, state, control, **kwargs): + if ClearMLCallback._should_close_on_train_end: + self._clearml_task.close() + ClearMLCallback._train_run_counter = 0 + + def on_log(self, args, state, control, model=None, tokenizer=None, logs=None, **kwargs): + if self._clearml is None: + return + if not self._initialized: + self.setup(args, state, model, tokenizer, **kwargs) + if state.is_world_process_zero: + eval_prefix = "eval_" + eval_prefix_len = len(eval_prefix) + test_prefix = "test_" + test_prefix_len = len(test_prefix) + single_value_scalars = [ + "train_runtime", + "train_samples_per_second", + "train_steps_per_second", + "train_loss", + "total_flos", + "epoch", + ] + for k, v in logs.items(): + if isinstance(v, (int, float)): + if k in single_value_scalars: + self._clearml_task.get_logger().report_single_value( + name=k + ClearMLCallback.log_suffix, value=v + ) + elif k.startswith(eval_prefix): + self._clearml_task.get_logger().report_scalar( + title="eval" + ClearMLCallback.log_suffix, + series=k[eval_prefix_len:], + value=v, + iteration=state.global_step, + ) + elif k.startswith(test_prefix): + self._clearml_task.get_logger().report_scalar( + title="test" + ClearMLCallback.log_suffix, + series=k[test_prefix_len:], + value=v, + iteration=state.global_step, + ) + else: + self._clearml_task.get_logger().report_scalar( + title="train" + ClearMLCallback.log_suffix, + series=k, + value=v, + iteration=state.global_step, + ) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of ClearML logger's report_scalar() " + "is incorrect so we dropped this attribute." + ) + + def on_save(self, args, state, control, **kwargs): + if self._log_model and self._clearml_task and state.is_world_process_zero: + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + name = ckpt_dir + ClearMLCallback.log_suffix + logger.info(f"Logging checkpoint artifact `{name}`. This may take some time.") + output_model = self._clearml.OutputModel(task=self._clearml_task, name=name) + output_model.connect(task=self._clearml_task, name=name) + output_model.update_weights_package( + weights_path=artifact_path, + target_filename=ckpt_dir, + iteration=state.global_step, + auto_delete_file=False, + ) + self._checkpoints_saved.append(output_model) + while args.save_total_limit and args.save_total_limit < len(self._checkpoints_saved): + try: + self._clearml.model.Model.remove( + self._checkpoints_saved[0], + delete_weights_file=True, + force=True, + raise_on_errors=True, + ) + except Exception as e: + logger.warning( + "Could not remove checkpoint `{}` after going over the `save_total_limit`. Error is: {}".format( + self._checkpoints_saved[0].name, e + ) + ) + break + self._checkpoints_saved = self._checkpoints_saved[1:] + + def _copy_training_args_as_hparams(self, training_args, prefix): + as_dict = { + field.name: getattr(training_args, field.name) + for field in fields(training_args) + if field.init and not field.name.endswith("_token") + } + flat_dict = {str(k): v for k, v in self._clearml.utilities.proxy_object.flatten_dictionary(as_dict).items()} + self._clearml_task._arguments.copy_from_dict(flat_dict, prefix=prefix) + + +class FlyteCallback(TrainerCallback): + """A [`TrainerCallback`] that sends the logs to [Flyte](https://flyte.org/). + NOTE: This callback only works within a Flyte task. + + Args: + save_log_history (`bool`, *optional*, defaults to `True`): + When set to True, the training logs are saved as a Flyte Deck. + + sync_checkpoints (`bool`, *optional*, defaults to `True`): + When set to True, checkpoints are synced with Flyte and can be used to resume training in the case of an + interruption. + + Example: + + ```python + # Note: This example skips over some setup steps for brevity. + from flytekit import current_context, task + + + @task + def train_hf_transformer(): + cp = current_context().checkpoint + trainer = Trainer(..., callbacks=[FlyteCallback()]) + output = trainer.train(resume_from_checkpoint=cp.restore()) + ``` + """ + + def __init__(self, save_log_history: bool = True, sync_checkpoints: bool = True): + super().__init__() + if not is_flytekit_available(): + raise ImportError("FlyteCallback requires flytekit to be installed. Run `pip install flytekit`.") + + if not is_flyte_deck_standard_available() or not is_pandas_available(): + logger.warning( + "Syncing log history requires both flytekitplugins-deck-standard and pandas to be installed. " + "Run `pip install flytekitplugins-deck-standard pandas` to enable this feature." + ) + save_log_history = False + + from flytekit import current_context + + self.cp = current_context().checkpoint + self.save_log_history = save_log_history + self.sync_checkpoints = sync_checkpoints + + def on_save(self, args, state, control, **kwargs): + if self.sync_checkpoints and state.is_world_process_zero: + ckpt_dir = f"checkpoint-{state.global_step}" + artifact_path = os.path.join(args.output_dir, ckpt_dir) + + logger.info(f"Syncing checkpoint in {ckpt_dir} to Flyte. This may take time.") + self.cp.save(artifact_path) + + def on_train_end(self, args, state, control, **kwargs): + if self.save_log_history: + import pandas as pd + from flytekit import Deck + from flytekitplugins.deck.renderer import TableRenderer + + log_history_df = pd.DataFrame(state.log_history) + Deck("Log History", TableRenderer().to_html(log_history_df)) + + +class DVCLiveCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). + + Use the environment variables below in `setup` to configure the integration. To customize this callback beyond + those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). + + Args: + live (`dvclive.Live`, *optional*, defaults to `None`): + Optional Live instance. If None, a new instance will be created using **kwargs. + log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): + Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, + the final checkpoint is logged at the end of training. If set to `"all"`, the entire + [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. + """ + + def __init__( + self, + live: Optional[Any] = None, + log_model: Optional[Union[Literal["all"], bool]] = None, + **kwargs, + ): + if not is_dvclive_available(): + raise RuntimeError("DVCLiveCallback requires dvclive to be installed. Run `pip install dvclive`.") + from dvclive import Live + + self._initialized = False + self.live = None + if isinstance(live, Live): + self.live = live + elif live is not None: + raise RuntimeError(f"Found class {live.__class__} for live, expected dvclive.Live") + + self._log_model = log_model + if self._log_model is None: + log_model_env = os.getenv("HF_DVCLIVE_LOG_MODEL", "FALSE") + if log_model_env.upper() in ENV_VARS_TRUE_VALUES: + self._log_model = True + elif log_model_env.lower() == "all": + self._log_model = "all" + + def setup(self, args, state, model): + """ + Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see + [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). + + Environment: + - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): + Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or + *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire + [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. + """ + from dvclive import Live + + self._initialized = True + if state.is_world_process_zero: + if not self.live: + self.live = Live() + self.live.log_params(args.to_dict()) + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + from dvclive.plots import Metric + from dvclive.utils import standardize_metric_name + + for key, value in logs.items(): + if Metric.could_log(value): + self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{value}" of type {type(value)} for key "{key}" as a scalar. ' + "This invocation of DVCLive's Live.log_metric() " + "is incorrect so we dropped this attribute." + ) + self.live.next_step() + + def on_save(self, args, state, control, **kwargs): + if self._log_model == "all" and self._initialized and state.is_world_process_zero: + self.live.log_artifact(args.output_dir) + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + from transformers.trainer import Trainer + + if self._log_model is True: + fake_trainer = Trainer(args=args, model=kwargs.get("model"), tokenizer=kwargs.get("tokenizer")) + name = "best" if args.load_best_model_at_end else "last" + output_dir = os.path.join(args.output_dir, name) + fake_trainer.save_model(output_dir) + self.live.log_artifact(output_dir, name=name, type="model", copy=True) + self.live.end() + + +INTEGRATION_TO_CALLBACK = { + "azure_ml": AzureMLCallback, + "comet_ml": CometCallback, + "mlflow": MLflowCallback, + "neptune": NeptuneCallback, + "tensorboard": TensorBoardCallback, + "wandb": WandbCallback, + "codecarbon": CodeCarbonCallback, + "clearml": ClearMLCallback, + "dagshub": DagsHubCallback, + "flyte": FlyteCallback, + "dvclive": DVCLiveCallback, +} + + +def get_reporting_integration_callbacks(report_to): + for integration in report_to: + if integration not in INTEGRATION_TO_CALLBACK: + raise ValueError( + f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported." + ) + + return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/peft.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/peft.py new file mode 100644 index 0000000000000000000000000000000000000000..e04d2399527c1b4a0ad9556751aff6da4ab13ec1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/peft.py @@ -0,0 +1,476 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import warnings +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from ..utils import ( + check_peft_version, + find_adapter_config_file, + is_accelerate_available, + is_peft_available, + is_torch_available, + logging, +) + + +if is_accelerate_available(): + from accelerate import dispatch_model + from accelerate.utils import get_balanced_memory, infer_auto_device_map + +# Minimum PEFT version supported for the integration +MIN_PEFT_VERSION = "0.5.0" + +if TYPE_CHECKING: + if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class PeftAdapterMixin: + """ + A class containing all functions for loading and using adapters weights that are supported in PEFT library. For + more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT + library: https://huggingface.co/docs/peft/index + + Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods + that anyone can load, train and run with this mixin class: + - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora + - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 + - AdaLora: https://arxiv.org/abs/2303.10512 + + Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" + into a torch module. For using these methods, please refer to the usage guide of PEFT library. + + With this mixin, if the correct PEFT version is installed, it is possible to: + + - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model + - Attach new adapters in the model and train them with Trainer or by your own. + - Attach multiple adapters and iteratively activate / deactivate them + - Activate / deactivate all adapters from the model. + - Get the `state_dict` of the active adapter. + """ + + _hf_peft_config_loaded = False + + def load_adapter( + self, + peft_model_id: Optional[str] = None, + adapter_name: Optional[str] = None, + revision: Optional[str] = None, + token: Optional[str] = None, + device_map: Optional[str] = "auto", + max_memory: Optional[str] = None, + offload_folder: Optional[str] = None, + offload_index: Optional[int] = None, + peft_config: Dict[str, Any] = None, + adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None, + adapter_kwargs: Optional[Dict[str, Any]] = None, + ) -> None: + """ + Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we + invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft + + Requires peft as a backend to load the adapter weights. + + Args: + peft_model_id (`str`, *optional*): + The identifier of the model to look for on the Hub, or a local path to the saved adapter config file + and adapter weights. + adapter_name (`str`, *optional*): + The adapter name to use. If not set, will use the default adapter. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + token (`str`, `optional`): + Whether to use authentication token to load the remote folder. Userful to load private repositories + that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to + cache it. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, `optional`): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_index (`int`, `optional`): + `offload_index` argument to be passed to `accelerate.dispatch_model` method. + peft_config (`Dict[str, Any]`, *optional*): + The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts + methods. This argument is used in case users directly pass PEFT state dicts + adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the adapter to load. This argument is used in case users directly pass PEFT state + dicts + adapter_kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and + `find_adapter_config_file` method. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + adapter_name = adapter_name if adapter_name is not None else "default" + if adapter_kwargs is None: + adapter_kwargs = {} + + from peft import PeftConfig, inject_adapter_in_model, load_peft_weights + from peft.utils import set_peft_model_state_dict + + if self._hf_peft_config_loaded and adapter_name in self.peft_config: + raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") + + if peft_model_id is None and (adapter_state_dict is None and peft_config is None): + raise ValueError( + "You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter." + ) + + # We keep `revision` in the signature for backward compatibility + if revision is not None and "revision" not in adapter_kwargs: + adapter_kwargs["revision"] = revision + elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]: + logger.error( + "You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. " + "The one in `adapter_kwargs` will be used." + ) + + # Override token with adapter_kwargs' token + if "token" in adapter_kwargs: + token = adapter_kwargs.pop("token") + + if peft_config is None: + adapter_config_file = find_adapter_config_file( + peft_model_id, + token=token, + **adapter_kwargs, + ) + + if adapter_config_file is None: + raise ValueError( + f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the " + "adapter model." + ) + + peft_config = PeftConfig.from_pretrained( + peft_model_id, + token=token, + **adapter_kwargs, + ) + + # Create and add fresh new adapters into the model. + inject_adapter_in_model(peft_config, self, adapter_name) + + if not self._hf_peft_config_loaded: + self._hf_peft_config_loaded = True + + if peft_model_id is not None: + adapter_state_dict = load_peft_weights(peft_model_id, token=token, **adapter_kwargs) + + # We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility + processed_adapter_state_dict = {} + prefix = "base_model.model." + for key, value in adapter_state_dict.items(): + if key.startswith(prefix): + new_key = key[len(prefix) :] + else: + new_key = key + processed_adapter_state_dict[new_key] = value + + # Load state dict + incompatible_keys = set_peft_model_state_dict(self, processed_adapter_state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0: + logger.warning( + f"Loading adapter weights from {peft_model_id} led to unexpected keys not found in the model: " + f" {incompatible_keys.unexpected_keys}. " + ) + + # Re-dispatch model and hooks in case the model is offloaded to CPU / Disk. + if ( + (getattr(self, "hf_device_map", None) is not None) + and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) + and len(self.peft_config) == 1 + ): + self._dispatch_accelerate_model( + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_index=offload_index, + ) + + def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None: + r""" + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default + name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the + default adapter name). + + Args: + adapter_config (`~peft.PeftConfig`): + The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts + methods + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + from peft import PeftConfig, inject_adapter_in_model + + adapter_name = adapter_name or "default" + + if not self._hf_peft_config_loaded: + self._hf_peft_config_loaded = True + elif adapter_name in self.peft_config: + raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") + + if not isinstance(adapter_config, PeftConfig): + raise ValueError( + f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." + ) + + # Retrieve the name or path of the model, one could also use self.config._name_or_path + # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100 + adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None) + inject_adapter_in_model(adapter_config, self, adapter_name) + + self.set_adapter(adapter_name) + + def set_adapter(self, adapter_name: Union[List[str], str]) -> None: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters. + + Args: + adapter_name (`Union[List[str], str]`): + The name of the adapter to set. Can be also a list of strings to set multiple adapters. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + elif isinstance(adapter_name, list): + missing = set(adapter_name) - set(self.peft_config) + if len(missing) > 0: + raise ValueError( + f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." + f" current loaded adapters are: {list(self.peft_config.keys())}" + ) + elif adapter_name not in self.peft_config: + raise ValueError( + f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}" + ) + + from peft.tuners.tuners_utils import BaseTunerLayer + from peft.utils import ModulesToSaveWrapper + + _adapters_has_been_set = False + + for _, module in self.named_modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + # For backward compatbility with previous PEFT versions + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_name) + else: + module.active_adapter = adapter_name + _adapters_has_been_set = True + + if not _adapters_has_been_set: + raise ValueError( + "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." + ) + + def disable_adapters(self) -> None: + r""" + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Disable all adapters that are attached to the model. This leads to inferring with the base model only. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + from peft.utils import ModulesToSaveWrapper + + for _, module in self.named_modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + # The recent version of PEFT need to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=False) + else: + module.disable_adapters = True + + def enable_adapters(self) -> None: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Enable adapters that are attached to the model. The model will use `self.active_adapter()` + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + # The recent version of PEFT need to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=True) + else: + module.disable_adapters = False + + def active_adapters(self) -> List[str]: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters + for inference) returns the list of all active adapters so that users can deal with them accordingly. + + For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return + a single string. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not is_peft_available(): + raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + active_adapters = module.active_adapter + break + + # For previous PEFT versions + if isinstance(active_adapters, str): + active_adapters = [active_adapters] + + return active_adapters + + def active_adapter(self) -> str: + warnings.warn( + "The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning + ) + + return self.active_adapters()[0] + + def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict: + """ + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + official documentation: https://huggingface.co/docs/peft + + Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. + If no adapter_name is passed, the active adapter is used. + + Args: + adapter_name (`str`, *optional*): + The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft import get_peft_model_state_dict + + if adapter_name is None: + adapter_name = self.active_adapter() + + adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name) + return adapter_state_dict + + def _dispatch_accelerate_model( + self, + device_map: str, + max_memory: Optional[int] = None, + offload_folder: Optional[str] = None, + offload_index: Optional[int] = None, + ) -> None: + """ + Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with + accelerate (i.e. with `device_map=xxx`) + + Args: + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_index (`int`, *optional*): + The offload_index argument to be passed to `accelerate.dispatch_model` method. + """ + dispatch_model_kwargs = {} + # Safety checker for previous `accelerate` versions + # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ + if "offload_index" in inspect.signature(dispatch_model).parameters: + dispatch_model_kwargs["offload_index"] = offload_index + + no_split_module_classes = self._no_split_modules + + if device_map != "sequential": + max_memory = get_balanced_memory( + self, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + low_zero=(device_map == "balanced_low_0"), + ) + if isinstance(device_map, str): + device_map = infer_auto_device_map( + self, max_memory=max_memory, no_split_module_classes=no_split_module_classes + ) + dispatch_model( + self, + device_map=device_map, + offload_dir=offload_folder, + **dispatch_model_kwargs, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/quanto.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/quanto.py new file mode 100644 index 0000000000000000000000000000000000000000..67fe9166d334e5732f1476801ec8fd62ce6b95b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/quanto.py @@ -0,0 +1,94 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..utils import is_torch_available + + +if is_torch_available(): + import torch + + +def replace_with_quanto_layers( + model, + quantization_config=None, + modules_to_not_convert=None, + current_key_name=None, + has_been_replaced=False, +): + """ + Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers. + Returns the converted model and a boolean that indicates if the conversion has been successfull or not. + + Args: + model (`torch.nn.Module`): + The model to convert, can be any `torch.nn.Module` instance. + quantization_config (`AqlmConfig`, defaults to `None`): + The quantization config object that contains the quantization parameters. + modules_to_not_convert (`list`, *optional*, defaults to `None`): + A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be + converted. + current_key_name (`list`, *optional*, defaults to `None`): + A list that contains the current key name. This is used for recursion and should not be passed by the user. + has_been_replaced (`bool`, *optional*, defaults to `None`): + A boolean that indicates if the conversion has been successful or not. This is used for recursion and + should not be passed by the user. + """ + from accelerate import init_empty_weights + from quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8 + + w_mapping = {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2} + a_mapping = {None: None, "float8": qfloat8, "int8": qint8} + + if modules_to_not_convert is None: + modules_to_not_convert = [] + + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): + with init_empty_weights(): + if isinstance(module, torch.nn.Linear): + model._modules[name] = QLinear( + in_features=module.in_features, + out_features=module.out_features, + bias=module.bias is not None, + dtype=module.weight.dtype, + weights=w_mapping[quantization_config.weights], + activations=a_mapping[quantization_config.activations], + ) + model._modules[name].requires_grad_(False) + has_been_replaced = True + elif isinstance(module, torch.nn.LayerNorm): + if quantization_config.activations is not None: + model._modules[name] = QLayerNorm( + module.normalized_shape, + module.eps, + module.elementwise_affine, + module.bias is not None, + activations=a_mapping[quantization_config.activations], + ) + has_been_replaced = True + if len(list(module.children())) > 0: + _, has_been_replaced = replace_with_quanto_layers( + module, + quantization_config=quantization_config, + modules_to_not_convert=modules_to_not_convert, + current_key_name=current_key_name, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/integrations/tpu.py b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..29262789dc98558ecc872b9e84cda4468e4b85ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/integrations/tpu.py @@ -0,0 +1,36 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from torch.utils.data import DataLoader + +from ..utils import is_torch_xla_available + + +def tpu_spmd_dataloader(dataloader: DataLoader): + if is_torch_xla_available(): + import torch_xla.distributed.parallel_loader as pl + + assert isinstance( + dataloader, pl.MpDeviceLoader + ), "The dataloader must be a `torch_xla.distributed.parallel_loader.MpDeviceLoader`." + + # This is to support PyTorch/XLA FSDP via SPMD. + # Here we shard the input data's 0th dim across the fsdp axis. + import torch_xla.distributed.spmd as xs + + sharding_spec = xs.ShardingSpec(xs.get_global_mesh(), ("fsdp", None)) + dataloader._parallel_loader_kwargs["input_sharding"] = sharding_spec + return dataloader + else: + return dataloader diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..94befaa851d9f73b9503811483583f39ed1f239c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__init__.py @@ -0,0 +1,1108 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import warnings +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +from huggingface_hub import model_info + +from ..configuration_utils import PretrainedConfig +from ..dynamic_module_utils import get_class_from_dynamic_module +from ..feature_extraction_utils import PreTrainedFeatureExtractor +from ..image_processing_utils import BaseImageProcessor +from ..models.auto.configuration_auto import AutoConfig +from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor +from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor +from ..models.auto.modeling_auto import AutoModelForDepthEstimation, AutoModelForImageToImage +from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer +from ..tokenization_utils import PreTrainedTokenizer +from ..utils import ( + CONFIG_NAME, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + cached_file, + extract_commit_hash, + find_adapter_config_file, + is_kenlm_available, + is_offline_mode, + is_peft_available, + is_pyctcdecode_available, + is_tf_available, + is_torch_available, + logging, +) +from .audio_classification import AudioClassificationPipeline +from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline +from .base import ( + ArgumentHandler, + CsvPipelineDataFormat, + JsonPipelineDataFormat, + PipedPipelineDataFormat, + Pipeline, + PipelineDataFormat, + PipelineException, + PipelineRegistry, + get_default_model_and_revision, + infer_framework_load_model, +) +from .conversational import Conversation, ConversationalPipeline +from .depth_estimation import DepthEstimationPipeline +from .document_question_answering import DocumentQuestionAnsweringPipeline +from .feature_extraction import FeatureExtractionPipeline +from .fill_mask import FillMaskPipeline +from .image_classification import ImageClassificationPipeline +from .image_feature_extraction import ImageFeatureExtractionPipeline +from .image_segmentation import ImageSegmentationPipeline +from .image_to_image import ImageToImagePipeline +from .image_to_text import ImageToTextPipeline +from .mask_generation import MaskGenerationPipeline +from .object_detection import ObjectDetectionPipeline +from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline +from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline +from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline +from .text_classification import TextClassificationPipeline +from .text_generation import TextGenerationPipeline +from .text_to_audio import TextToAudioPipeline +from .token_classification import ( + AggregationStrategy, + NerPipeline, + TokenClassificationArgumentHandler, + TokenClassificationPipeline, +) +from .video_classification import VideoClassificationPipeline +from .visual_question_answering import VisualQuestionAnsweringPipeline +from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline +from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline +from .zero_shot_image_classification import ZeroShotImageClassificationPipeline +from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline + + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import ( + TFAutoModel, + TFAutoModelForCausalLM, + TFAutoModelForImageClassification, + TFAutoModelForMaskedLM, + TFAutoModelForQuestionAnswering, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForTableQuestionAnswering, + TFAutoModelForTokenClassification, + TFAutoModelForVision2Seq, + TFAutoModelForZeroShotImageClassification, + ) + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import ( + AutoModel, + AutoModelForAudioClassification, + AutoModelForCausalLM, + AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForMaskedLM, + AutoModelForMaskGeneration, + AutoModelForObjectDetection, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTableQuestionAnswering, + AutoModelForTextToSpectrogram, + AutoModelForTextToWaveform, + AutoModelForTokenClassification, + AutoModelForVideoClassification, + AutoModelForVision2Seq, + AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotImageClassification, + AutoModelForZeroShotObjectDetection, + ) + + +if TYPE_CHECKING: + from ..modeling_tf_utils import TFPreTrainedModel + from ..modeling_utils import PreTrainedModel + from ..tokenization_utils_fast import PreTrainedTokenizerFast + + +logger = logging.get_logger(__name__) + + +# Register all the supported tasks here +TASK_ALIASES = { + "sentiment-analysis": "text-classification", + "ner": "token-classification", + "vqa": "visual-question-answering", + "text-to-speech": "text-to-audio", +} +SUPPORTED_TASKS = { + "audio-classification": { + "impl": AudioClassificationPipeline, + "tf": (), + "pt": (AutoModelForAudioClassification,) if is_torch_available() else (), + "default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}}, + "type": "audio", + }, + "automatic-speech-recognition": { + "impl": AutomaticSpeechRecognitionPipeline, + "tf": (), + "pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}}, + "type": "multimodal", + }, + "text-to-audio": { + "impl": TextToAudioPipeline, + "tf": (), + "pt": (AutoModelForTextToWaveform, AutoModelForTextToSpectrogram) if is_torch_available() else (), + "default": {"model": {"pt": ("suno/bark-small", "645cfba")}}, + "type": "text", + }, + "feature-extraction": { + "impl": FeatureExtractionPipeline, + "tf": (TFAutoModel,) if is_tf_available() else (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilbert-base-cased", "935ac13"), + "tf": ("distilbert/distilbert-base-cased", "935ac13"), + } + }, + "type": "multimodal", + }, + "text-classification": { + "impl": TextClassificationPipeline, + "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), + "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), + "tf": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), + }, + }, + "type": "text", + }, + "token-classification": { + "impl": TokenClassificationPipeline, + "tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (), + "pt": (AutoModelForTokenClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), + "tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), + }, + }, + "type": "text", + }, + "question-answering": { + "impl": QuestionAnsweringPipeline, + "tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (), + "pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"), + "tf": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"), + }, + }, + "type": "text", + }, + "table-question-answering": { + "impl": TableQuestionAnsweringPipeline, + "pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (), + "tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (), + "default": { + "model": { + "pt": ("google/tapas-base-finetuned-wtq", "69ceee2"), + "tf": ("google/tapas-base-finetuned-wtq", "69ceee2"), + }, + }, + "type": "text", + }, + "visual-question-answering": { + "impl": VisualQuestionAnsweringPipeline, + "pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (), + "tf": (), + "default": { + "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")}, + }, + "type": "multimodal", + }, + "document-question-answering": { + "impl": DocumentQuestionAnsweringPipeline, + "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), + "tf": (), + "default": { + "model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")}, + }, + "type": "multimodal", + }, + "fill-mask": { + "impl": FillMaskPipeline, + "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (), + "pt": (AutoModelForMaskedLM,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("distilbert/distilroberta-base", "ec58a5b"), + "tf": ("distilbert/distilroberta-base", "ec58a5b"), + } + }, + "type": "text", + }, + "summarization": { + "impl": SummarizationPipeline, + "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), + "default": { + "model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("google-t5/t5-small", "d769bba")} + }, + "type": "text", + }, + # This task is a special case as it's parametrized by SRC, TGT languages. + "translation": { + "impl": TranslationPipeline, + "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), + "default": { + ("en", "fr"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + ("en", "de"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + ("en", "ro"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + }, + "type": "text", + }, + "text2text-generation": { + "impl": Text2TextGenerationPipeline, + "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), + "default": {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, + "type": "text", + }, + "text-generation": { + "impl": TextGenerationPipeline, + "tf": (TFAutoModelForCausalLM,) if is_tf_available() else (), + "pt": (AutoModelForCausalLM,) if is_torch_available() else (), + "default": {"model": {"pt": ("openai-community/gpt2", "6c0e608"), "tf": ("openai-community/gpt2", "6c0e608")}}, + "type": "text", + }, + "zero-shot-classification": { + "impl": ZeroShotClassificationPipeline, + "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), + "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("facebook/bart-large-mnli", "c626438"), + "tf": ("FacebookAI/roberta-large-mnli", "130fb28"), + }, + "config": { + "pt": ("facebook/bart-large-mnli", "c626438"), + "tf": ("FacebookAI/roberta-large-mnli", "130fb28"), + }, + }, + "type": "text", + }, + "zero-shot-image-classification": { + "impl": ZeroShotImageClassificationPipeline, + "tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (), + "pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("openai/clip-vit-base-patch32", "f4881ba"), + "tf": ("openai/clip-vit-base-patch32", "f4881ba"), + } + }, + "type": "multimodal", + }, + "zero-shot-audio-classification": { + "impl": ZeroShotAudioClassificationPipeline, + "tf": (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("laion/clap-htsat-fused", "973b6e5"), + } + }, + "type": "multimodal", + }, + "conversational": { + "impl": ConversationalPipeline, + "tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (), + "pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (), + "default": { + "model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")} + }, + "type": "text", + }, + "image-classification": { + "impl": ImageClassificationPipeline, + "tf": (TFAutoModelForImageClassification,) if is_tf_available() else (), + "pt": (AutoModelForImageClassification,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/vit-base-patch16-224", "5dca96d"), + "tf": ("google/vit-base-patch16-224", "5dca96d"), + } + }, + "type": "image", + }, + "image-feature-extraction": { + "impl": ImageFeatureExtractionPipeline, + "tf": (TFAutoModel,) if is_tf_available() else (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/vit-base-patch16-224", "3f49326"), + "tf": ("google/vit-base-patch16-224", "3f49326"), + } + }, + "type": "image", + }, + "image-segmentation": { + "impl": ImageSegmentationPipeline, + "tf": (), + "pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}}, + "type": "multimodal", + }, + "image-to-text": { + "impl": ImageToTextPipeline, + "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (), + "pt": (AutoModelForVision2Seq,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("ydshieh/vit-gpt2-coco-en", "65636df"), + "tf": ("ydshieh/vit-gpt2-coco-en", "65636df"), + } + }, + "type": "multimodal", + }, + "object-detection": { + "impl": ObjectDetectionPipeline, + "tf": (), + "pt": (AutoModelForObjectDetection,) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}}, + "type": "multimodal", + }, + "zero-shot-object-detection": { + "impl": ZeroShotObjectDetectionPipeline, + "tf": (), + "pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (), + "default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}}, + "type": "multimodal", + }, + "depth-estimation": { + "impl": DepthEstimationPipeline, + "tf": (), + "pt": (AutoModelForDepthEstimation,) if is_torch_available() else (), + "default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}}, + "type": "image", + }, + "video-classification": { + "impl": VideoClassificationPipeline, + "tf": (), + "pt": (AutoModelForVideoClassification,) if is_torch_available() else (), + "default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}}, + "type": "video", + }, + "mask-generation": { + "impl": MaskGenerationPipeline, + "tf": (), + "pt": (AutoModelForMaskGeneration,) if is_torch_available() else (), + "default": {"model": {"pt": ("facebook/sam-vit-huge", "997b15")}}, + "type": "multimodal", + }, + "image-to-image": { + "impl": ImageToImagePipeline, + "tf": (), + "pt": (AutoModelForImageToImage,) if is_torch_available() else (), + "default": {"model": {"pt": ("caidas/swin2SR-classical-sr-x2-64", "4aaedcb")}}, + "type": "image", + }, +} + +NO_FEATURE_EXTRACTOR_TASKS = set() +NO_IMAGE_PROCESSOR_TASKS = set() +NO_TOKENIZER_TASKS = set() + +# Those model configs are special, they are generic over their task, meaning +# any tokenizer/feature_extractor might be use for a given model so we cannot +# use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to +# see if the model defines such objects or not. +MULTI_MODEL_AUDIO_CONFIGS = {"SpeechEncoderDecoderConfig"} +MULTI_MODEL_VISION_CONFIGS = {"VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"} +for task, values in SUPPORTED_TASKS.items(): + if values["type"] == "text": + NO_FEATURE_EXTRACTOR_TASKS.add(task) + NO_IMAGE_PROCESSOR_TASKS.add(task) + elif values["type"] in {"image", "video"}: + NO_TOKENIZER_TASKS.add(task) + elif values["type"] in {"audio"}: + NO_TOKENIZER_TASKS.add(task) + NO_IMAGE_PROCESSOR_TASKS.add(task) + elif values["type"] != "multimodal": + raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}") + +PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES) + + +def get_supported_tasks() -> List[str]: + """ + Returns a list of supported task strings. + """ + return PIPELINE_REGISTRY.get_supported_tasks() + + +def get_task(model: str, token: Optional[str] = None, **deprecated_kwargs) -> str: + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if is_offline_mode(): + raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode") + try: + info = model_info(model, token=token) + except Exception as e: + raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}") + if not info.pipeline_tag: + raise RuntimeError( + f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically" + ) + if getattr(info, "library_name", "transformers") != "transformers": + raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers") + task = info.pipeline_tag + return task + + +def check_task(task: str) -> Tuple[str, Dict, Any]: + """ + Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and + default models if they exist. + + Args: + task (`str`): + The task defining which pipeline will be returned. Currently accepted tasks are: + + - `"audio-classification"` + - `"automatic-speech-recognition"` + - `"conversational"` + - `"depth-estimation"` + - `"document-question-answering"` + - `"feature-extraction"` + - `"fill-mask"` + - `"image-classification"` + - `"image-feature-extraction"` + - `"image-segmentation"` + - `"image-to-text"` + - `"image-to-image"` + - `"object-detection"` + - `"question-answering"` + - `"summarization"` + - `"table-question-answering"` + - `"text2text-generation"` + - `"text-classification"` (alias `"sentiment-analysis"` available) + - `"text-generation"` + - `"text-to-audio"` (alias `"text-to-speech"` available) + - `"token-classification"` (alias `"ner"` available) + - `"translation"` + - `"translation_xx_to_yy"` + - `"video-classification"` + - `"visual-question-answering"` (alias `"vqa"` available) + - `"zero-shot-classification"` + - `"zero-shot-image-classification"` + - `"zero-shot-object-detection"` + + Returns: + (normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name + (removed alias and options). The actual dictionary required to initialize the pipeline and some extra task + options for parametrized tasks like "translation_XX_to_YY" + + + """ + return PIPELINE_REGISTRY.check_task(task) + + +def clean_custom_task(task_info): + import transformers + + if "impl" not in task_info: + raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.") + pt_class_names = task_info.get("pt", ()) + if isinstance(pt_class_names, str): + pt_class_names = [pt_class_names] + task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names) + tf_class_names = task_info.get("tf", ()) + if isinstance(tf_class_names, str): + tf_class_names = [tf_class_names] + task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names) + return task_info, None + + +def pipeline( + task: str = None, + model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, + config: Optional[Union[str, PretrainedConfig]] = None, + tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, + feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, + image_processor: Optional[Union[str, BaseImageProcessor]] = None, + framework: Optional[str] = None, + revision: Optional[str] = None, + use_fast: bool = True, + token: Optional[Union[str, bool]] = None, + device: Optional[Union[int, str, "torch.device"]] = None, + device_map=None, + torch_dtype=None, + trust_remote_code: Optional[bool] = None, + model_kwargs: Dict[str, Any] = None, + pipeline_class: Optional[Any] = None, + **kwargs, +) -> Pipeline: + """ + Utility factory method to build a [`Pipeline`]. + + Pipelines are made of: + + - A [tokenizer](tokenizer) in charge of mapping raw textual input to token. + - A [model](model) to make predictions from the inputs. + - Some (optional) post processing for enhancing model's output. + + Args: + task (`str`): + The task defining which pipeline will be returned. Currently accepted tasks are: + + - `"audio-classification"`: will return a [`AudioClassificationPipeline`]. + - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. + - `"conversational"`: will return a [`ConversationalPipeline`]. + - `"depth-estimation"`: will return a [`DepthEstimationPipeline`]. + - `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`]. + - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. + - `"fill-mask"`: will return a [`FillMaskPipeline`]:. + - `"image-classification"`: will return a [`ImageClassificationPipeline`]. + - `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`]. + - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. + - `"image-to-image"`: will return a [`ImageToImagePipeline`]. + - `"image-to-text"`: will return a [`ImageToTextPipeline`]. + - `"mask-generation"`: will return a [`MaskGenerationPipeline`]. + - `"object-detection"`: will return a [`ObjectDetectionPipeline`]. + - `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. + - `"summarization"`: will return a [`SummarizationPipeline`]. + - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. + - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. + - `"text-classification"` (alias `"sentiment-analysis"` available): will return a + [`TextClassificationPipeline`]. + - `"text-generation"`: will return a [`TextGenerationPipeline`]:. + - `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:. + - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. + - `"translation"`: will return a [`TranslationPipeline`]. + - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. + - `"video-classification"`: will return a [`VideoClassificationPipeline`]. + - `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`]. + - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. + - `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`]. + - `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`]. + - `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`]. + + model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*): + The model that will be used by the pipeline to make predictions. This can be a model identifier or an + actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or + [`TFPreTrainedModel`] (for TensorFlow). + + If not provided, the default for the `task` will be loaded. + config (`str` or [`PretrainedConfig`], *optional*): + The configuration that will be used by the pipeline to instantiate the model. This can be a model + identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`]. + + If not provided, the default configuration file for the requested model will be used. That means that if + `model` is given, its default configuration will be used. However, if `model` is not supplied, this + `task`'s default model's config is used instead. + tokenizer (`str` or [`PreTrainedTokenizer`], *optional*): + The tokenizer that will be used by the pipeline to encode data for the model. This can be a model + identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`]. + + If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model` + is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string). + However, if `config` is also not given or not a string, then the default tokenizer for the given `task` + will be loaded. + feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*): + The feature extractor that will be used by the pipeline to encode data for the model. This can be a model + identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`]. + + Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal + models. Multi-modal models will also require a tokenizer to be passed. + + If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If + `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it + is a string). However, if `config` is also not given or not a string, then the default feature extractor + for the given `task` will be loaded. + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. + + If no framework is specified, will default to the one currently installed. If no framework is specified and + both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is + provided. + revision (`str`, *optional*, defaults to `"main"`): + When passing a task name or a string model identifier: The specific model version to use. It can be a + branch name, a tag name, or a commit id, since we use a git-based system for storing models and other + artifacts on huggingface.co, so `revision` can be any identifier allowed by git. + use_fast (`bool`, *optional*, defaults to `True`): + Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). + use_auth_token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + device (`int` or `str` or `torch.device`): + Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this + pipeline will be allocated. + device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set + `device_map="auto"` to compute the most optimized `device_map` automatically (see + [here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload) + for more information). + + + + Do not use `device_map` AND `device` at the same time as they will conflict + + + + torch_dtype (`str` or `torch.dtype`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model + (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, + tokenization or even pipeline files. This option should only be set to `True` for repositories you trust + and in which you have read the code, as it will execute code present on the Hub on your local machine. + model_kwargs (`Dict[str, Any]`, *optional*): + Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., + **model_kwargs)` function. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the specific pipeline init (see the documentation for the + corresponding pipeline class for possible values). + + Returns: + [`Pipeline`]: A suitable pipeline for the task. + + Examples: + + ```python + >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer + + >>> # Sentiment analysis pipeline + >>> analyzer = pipeline("sentiment-analysis") + + >>> # Question answering pipeline, specifying the checkpoint identifier + >>> oracle = pipeline( + ... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased" + ... ) + + >>> # Named entity recognition pipeline, passing in a specific model and tokenizer + >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") + >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") + >>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer) + ```""" + if model_kwargs is None: + model_kwargs = {} + # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, + # this is to keep BC). + use_auth_token = model_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + code_revision = kwargs.pop("code_revision", None) + commit_hash = kwargs.pop("_commit_hash", None) + + hub_kwargs = { + "revision": revision, + "token": token, + "trust_remote_code": trust_remote_code, + "_commit_hash": commit_hash, + } + + if task is None and model is None: + raise RuntimeError( + "Impossible to instantiate a pipeline without either a task or a model " + "being specified. " + "Please provide a task class or a model" + ) + + if model is None and tokenizer is not None: + raise RuntimeError( + "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer" + " may not be compatible with the default model. Please provide a PreTrainedModel class or a" + " path/identifier to a pretrained model when providing tokenizer." + ) + if model is None and feature_extractor is not None: + raise RuntimeError( + "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided" + " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class" + " or a path/identifier to a pretrained model when providing feature_extractor." + ) + if isinstance(model, Path): + model = str(model) + + if commit_hash is None: + pretrained_model_name_or_path = None + if isinstance(config, str): + pretrained_model_name_or_path = config + elif config is None and isinstance(model, str): + pretrained_model_name_or_path = model + + if not isinstance(config, PretrainedConfig) and pretrained_model_name_or_path is not None: + # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible + resolved_config_file = cached_file( + pretrained_model_name_or_path, + CONFIG_NAME, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + cache_dir=model_kwargs.get("cache_dir"), + **hub_kwargs, + ) + hub_kwargs["_commit_hash"] = extract_commit_hash(resolved_config_file, commit_hash) + else: + hub_kwargs["_commit_hash"] = getattr(config, "_commit_hash", None) + + # Config is the primordial information item. + # Instantiate config if needed + if isinstance(config, str): + config = AutoConfig.from_pretrained( + config, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs + ) + hub_kwargs["_commit_hash"] = config._commit_hash + elif config is None and isinstance(model, str): + # Check for an adapter file in the model path if PEFT is available + if is_peft_available(): + # `find_adapter_config_file` doesn't accept `trust_remote_code` + _hub_kwargs = {k: v for k, v in hub_kwargs.items() if k != "trust_remote_code"} + maybe_adapter_path = find_adapter_config_file( + model, + token=hub_kwargs["token"], + revision=hub_kwargs["revision"], + _commit_hash=hub_kwargs["_commit_hash"], + ) + + if maybe_adapter_path is not None: + with open(maybe_adapter_path, "r", encoding="utf-8") as f: + adapter_config = json.load(f) + model = adapter_config["base_model_name_or_path"] + + config = AutoConfig.from_pretrained( + model, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs + ) + hub_kwargs["_commit_hash"] = config._commit_hash + + custom_tasks = {} + if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: + custom_tasks = config.custom_pipelines + if task is None and trust_remote_code is not False: + if len(custom_tasks) == 1: + task = list(custom_tasks.keys())[0] + else: + raise RuntimeError( + "We can't infer the task automatically for this model as there are multiple tasks available. Pick " + f"one in {', '.join(custom_tasks.keys())}" + ) + + if task is None and model is not None: + if not isinstance(model, str): + raise RuntimeError( + "Inferring the task automatically requires to check the hub with a model_id defined as a `str`. " + f"{model} is not a valid model_id." + ) + task = get_task(model, token) + + # Retrieve the task + if task in custom_tasks: + normalized_task = task + targeted_task, task_options = clean_custom_task(custom_tasks[task]) + if pipeline_class is None: + if not trust_remote_code: + raise ValueError( + "Loading this pipeline requires you to execute the code in the pipeline file in that" + " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" + " set the option `trust_remote_code=True` to remove this error." + ) + class_ref = targeted_task["impl"] + pipeline_class = get_class_from_dynamic_module( + class_ref, + model, + code_revision=code_revision, + **hub_kwargs, + ) + else: + normalized_task, targeted_task, task_options = check_task(task) + if pipeline_class is None: + pipeline_class = targeted_task["impl"] + + # Use default model/config/tokenizer for the task if no model is provided + if model is None: + # At that point framework might still be undetermined + model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options) + revision = revision if revision is not None else default_revision + logger.warning( + f"No model was supplied, defaulted to {model} and revision" + f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n" + "Using a pipeline without specifying a model name and revision in production is not recommended." + ) + if config is None and isinstance(model, str): + config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) + hub_kwargs["_commit_hash"] = config._commit_hash + + if device_map is not None: + if "device_map" in model_kwargs: + raise ValueError( + 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those' + " arguments might conflict, use only one.)" + ) + if device is not None: + logger.warning( + "Both `device` and `device_map` are specified. `device` will override `device_map`. You" + " will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`." + ) + model_kwargs["device_map"] = device_map + if torch_dtype is not None: + if "torch_dtype" in model_kwargs: + raise ValueError( + 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those' + " arguments might conflict, use only one.)" + ) + if isinstance(torch_dtype, str) and hasattr(torch, torch_dtype): + torch_dtype = getattr(torch, torch_dtype) + model_kwargs["torch_dtype"] = torch_dtype + + model_name = model if isinstance(model, str) else None + + # Load the correct model if possible + # Infer the framework from the model if not already defined + if isinstance(model, str) or framework is None: + model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]} + framework, model = infer_framework_load_model( + model, + model_classes=model_classes, + config=config, + framework=framework, + task=task, + **hub_kwargs, + **model_kwargs, + ) + + model_config = model.config + hub_kwargs["_commit_hash"] = model.config._commit_hash + load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None + load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None + load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None + + # If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while + # `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some + # vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`. + # TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue. + # This block is only temporarily to make CI green. + if load_image_processor and load_feature_extractor: + load_feature_extractor = False + + if ( + tokenizer is None + and not load_tokenizer + and normalized_task not in NO_TOKENIZER_TASKS + # Using class name to avoid importing the real class. + and ( + model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS + or model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS + ) + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_tokenizer = True + if ( + image_processor is None + and not load_image_processor + and normalized_task not in NO_IMAGE_PROCESSOR_TASKS + # Using class name to avoid importing the real class. + and model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_image_processor = True + if ( + feature_extractor is None + and not load_feature_extractor + and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS + # Using class name to avoid importing the real class. + and model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_feature_extractor = True + + if task in NO_TOKENIZER_TASKS: + # These will never require a tokenizer. + # the model on the other hand might have a tokenizer, but + # the files could be missing from the hub, instead of failing + # on such repos, we just force to not load it. + load_tokenizer = False + + if task in NO_FEATURE_EXTRACTOR_TASKS: + load_feature_extractor = False + if task in NO_IMAGE_PROCESSOR_TASKS: + load_image_processor = False + + if load_tokenizer: + # Try to infer tokenizer from model or config name (if provided as str) + if tokenizer is None: + if isinstance(model_name, str): + tokenizer = model_name + elif isinstance(config, str): + tokenizer = config + else: + # Impossible to guess what is the right tokenizer here + raise Exception( + "Impossible to guess which tokenizer to use. " + "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer." + ) + + # Instantiate tokenizer if needed + if isinstance(tokenizer, (str, tuple)): + if isinstance(tokenizer, tuple): + # For tuple we have (tokenizer name, {kwargs}) + use_fast = tokenizer[1].pop("use_fast", use_fast) + tokenizer_identifier = tokenizer[0] + tokenizer_kwargs = tokenizer[1] + else: + tokenizer_identifier = tokenizer + tokenizer_kwargs = model_kwargs.copy() + tokenizer_kwargs.pop("torch_dtype", None) + + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs + ) + + if load_image_processor: + # Try to infer image processor from model or config name (if provided as str) + if image_processor is None: + if isinstance(model_name, str): + image_processor = model_name + elif isinstance(config, str): + image_processor = config + # Backward compatibility, as `feature_extractor` used to be the name + # for `ImageProcessor`. + elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor): + image_processor = feature_extractor + else: + # Impossible to guess what is the right image_processor here + raise Exception( + "Impossible to guess which image processor to use. " + "Please provide a PreTrainedImageProcessor class or a path/identifier " + "to a pretrained image processor." + ) + + # Instantiate image_processor if needed + if isinstance(image_processor, (str, tuple)): + image_processor = AutoImageProcessor.from_pretrained( + image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs + ) + + if load_feature_extractor: + # Try to infer feature extractor from model or config name (if provided as str) + if feature_extractor is None: + if isinstance(model_name, str): + feature_extractor = model_name + elif isinstance(config, str): + feature_extractor = config + else: + # Impossible to guess what is the right feature_extractor here + raise Exception( + "Impossible to guess which feature extractor to use. " + "Please provide a PreTrainedFeatureExtractor class or a path/identifier " + "to a pretrained feature extractor." + ) + + # Instantiate feature_extractor if needed + if isinstance(feature_extractor, (str, tuple)): + feature_extractor = AutoFeatureExtractor.from_pretrained( + feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs + ) + + if ( + feature_extractor._processor_class + and feature_extractor._processor_class.endswith("WithLM") + and isinstance(model_name, str) + ): + try: + import kenlm # to trigger `ImportError` if not installed + from pyctcdecode import BeamSearchDecoderCTC + + if os.path.isdir(model_name) or os.path.isfile(model_name): + decoder = BeamSearchDecoderCTC.load_from_dir(model_name) + else: + language_model_glob = os.path.join( + BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*" + ) + alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME + allow_patterns = [language_model_glob, alphabet_filename] + decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns) + + kwargs["decoder"] = decoder + except ImportError as e: + logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}") + if not is_kenlm_available(): + logger.warning("Try to install `kenlm`: `pip install kenlm") + + if not is_pyctcdecode_available(): + logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode") + + if task == "translation" and model.config.task_specific_params: + for key in model.config.task_specific_params: + if key.startswith("translation"): + task = key + warnings.warn( + f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', + UserWarning, + ) + break + + if tokenizer is not None: + kwargs["tokenizer"] = tokenizer + + if feature_extractor is not None: + kwargs["feature_extractor"] = feature_extractor + + if torch_dtype is not None: + kwargs["torch_dtype"] = torch_dtype + + if image_processor is not None: + kwargs["image_processor"] = image_processor + + if device is not None: + kwargs["device"] = device + + return pipeline_class(model=model, framework=framework, task=task, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b37db358d13c5fb3ae39d29ea2b62c2c75870210 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22a8285a1026a55c0975980e86ddbc275b857ab6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3073281ee4a85b51d17cb2e19d4d22adb7534e98 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eba785265dc9d6e25bc5f2f0074edaf8747585a7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb56040bcb387392d39ce50350ea7f0471e8a2f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/depth_estimation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12caf3a8080463baee549f5b77d77eee50b9127c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a9d931e8d75f364db93f398be839ed260c2e00b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33741dc973fdedc23193a65599a560401e6fd997 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_feature_extraction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d9992fcfd37faddfd0e82e9afb67e3acc14be93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37bc04ddce7071074f616d007ef9daa58305855a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_image.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f674aa2de6d78c747f3c5774c94d3428825ee2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6154c37350e7f24678e4f0c43c3fdab7b547bdd8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/mask_generation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec1af5081772fe953dbee332698f5a7cf096e4bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e221159f772a3d4920220f12dbcc3a2f3f4c050 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea1a50cf327cbcc7a3f8666cab64cabb46b6f603 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e5b66ad66e4030546b0ed1a1d2e0defc6614752 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a5621a8a2de31171d05d672139747865380819e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e418b34e40f33ecabead457fa6ebdccfff2c04d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f6026391b8b70ff02c476ba355350f41449fd16 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_to_audio.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb8f0ac6ea2da0f247fcfc1977744aa880f69c3d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..328fcd5517191e002be94997d61c83d302fda94a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/visual_question_answering.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b9ad8ef2a2d05919f48ddb006838057c341302a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_audio_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0831d22000841cc60a673d4db51730dd4dabc6ff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9354de7d30b38673d186648b3a4f693ad9c68bd1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/audio_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e8f626db644eeae3baabf5c35e4c97ab712bb7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/audio_classification.py @@ -0,0 +1,215 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess +from typing import Union + +import numpy as np +import requests + +from ..utils import add_end_docstrings, is_torch_available, is_torchaudio_available, logging +from .base import Pipeline, build_pipeline_init_args + + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array: + """ + Helper function to read an audio file through ffmpeg. + """ + ar = f"{sampling_rate}" + ac = "1" + format_for_conversion = "f32le" + ffmpeg_command = [ + "ffmpeg", + "-i", + "pipe:0", + "-ac", + ac, + "-ar", + ar, + "-f", + format_for_conversion, + "-hide_banner", + "-loglevel", + "quiet", + "pipe:1", + ] + + try: + ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + except FileNotFoundError: + raise ValueError("ffmpeg was not found but is required to load audio files from filename") + output_stream = ffmpeg_process.communicate(bpayload) + out_bytes = output_stream[0] + + audio = np.frombuffer(out_bytes, np.float32) + if audio.shape[0] == 0: + raise ValueError("Malformed soundfile") + return audio + + +@add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True)) +class AudioClassificationPipeline(Pipeline): + """ + Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a + raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio + formats. + + Example: + + ```python + >>> from transformers import pipeline + + >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks") + >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") + [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + + This pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"audio-classification"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=audio-classification). + """ + + def __init__(self, *args, **kwargs): + # Default, might be overriden by the model.config. + kwargs["top_k"] = 5 + super().__init__(*args, **kwargs) + + if self.framework != "pt": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES) + + def __call__( + self, + inputs: Union[np.ndarray, bytes, str], + **kwargs, + ): + """ + Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more + information. + + Args: + inputs (`np.ndarray` or `bytes` or `str` or `dict`): + The inputs is either : + - `str` that is the filename of the audio file, the file will be read at the correct sampling rate + to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. + - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the + same way. + - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) + Raw audio at the correct sampling rate (no further check will be done) + - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this + pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int, + "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or + `"array"` is used to denote the raw audio waveform. + top_k (`int`, *optional*, defaults to None): + The number of top labels that will be returned by the pipeline. If the provided number is `None` or + higher than the number of labels available in the model configuration, it will default to the number of + labels. + + Return: + A list of `dict` with the following keys: + + - **label** (`str`) -- The label predicted. + - **score** (`float`) -- The corresponding probability. + """ + return super().__call__(inputs, **kwargs) + + def _sanitize_parameters(self, top_k=None, **kwargs): + # No parameters on this pipeline right now + postprocess_params = {} + if top_k is not None: + if top_k > self.model.config.num_labels: + top_k = self.model.config.num_labels + postprocess_params["top_k"] = top_k + return {}, {}, postprocess_params + + def preprocess(self, inputs): + if isinstance(inputs, str): + if inputs.startswith("http://") or inputs.startswith("https://"): + # We need to actually check for a real protocol, otherwise it's impossible to use a local file + # like http_huggingface_co.png + inputs = requests.get(inputs).content + else: + with open(inputs, "rb") as f: + inputs = f.read() + + if isinstance(inputs, bytes): + inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) + + if isinstance(inputs, dict): + # Accepting `"array"` which is the key defined in `datasets` for + # better integration + if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): + raise ValueError( + "When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a " + '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, ' + "containing the sampling_rate associated with that array" + ) + + _inputs = inputs.pop("raw", None) + if _inputs is None: + # Remove path which will not be used from `datasets`. + inputs.pop("path", None) + _inputs = inputs.pop("array", None) + in_sampling_rate = inputs.pop("sampling_rate") + inputs = _inputs + if in_sampling_rate != self.feature_extractor.sampling_rate: + import torch + + if is_torchaudio_available(): + from torchaudio import functional as F + else: + raise ImportError( + "torchaudio is required to resample audio samples in AudioClassificationPipeline. " + "The torchaudio package can be installed through: `pip install torchaudio`." + ) + + inputs = F.resample( + torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate + ).numpy() + + if not isinstance(inputs, np.ndarray): + raise ValueError("We expect a numpy ndarray as input") + if len(inputs.shape) != 1: + raise ValueError("We expect a single channel audio input for AudioClassificationPipeline") + + processed = self.feature_extractor( + inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" + ) + return processed + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, top_k=5): + probs = model_outputs.logits[0].softmax(-1) + scores, ids = probs.topk(top_k) + + scores = scores.tolist() + ids = ids.tolist() + + labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] + + return labels diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/automatic_speech_recognition.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/automatic_speech_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d0f13679092249fa892acd4ebe25e7245d756a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/automatic_speech_recognition.py @@ -0,0 +1,737 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import defaultdict +from typing import TYPE_CHECKING, Dict, Optional, Union + +import numpy as np +import requests + +from ..tokenization_utils import PreTrainedTokenizer +from ..utils import is_torch_available, is_torchaudio_available, logging +from .audio_utils import ffmpeg_read +from .base import ChunkPipeline + + +if TYPE_CHECKING: + from pyctcdecode import BeamSearchDecoderCTC + + from ..feature_extraction_sequence_utils import SequenceFeatureExtractor + from ..modeling_utils import PreTrainedModel + +logger = logging.get_logger(__name__) + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES + + +def rescale_stride(stride, ratio): + """ + Rescales the stride values from audio space to tokens/logits space. + + (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. + """ + # Shape is [B, SEQ] for tokens + # [B, SEQ, V] for logits + + new_strides = [] + for input_n, left, right in stride: + token_n = int(round(input_n * ratio)) + left = int(round(left / input_n * token_n)) + right = int(round(right / input_n * token_n)) + new_stride = (token_n, left, right) + new_strides.append(new_stride) + + return new_strides + + +def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right, dtype=None): + inputs_len = inputs.shape[0] + step = chunk_len - stride_left - stride_right + for chunk_start_idx in range(0, inputs_len, step): + chunk_end_idx = chunk_start_idx + chunk_len + chunk = inputs[chunk_start_idx:chunk_end_idx] + processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt") + if dtype is not None: + processed = processed.to(dtype=dtype) + _stride_left = 0 if chunk_start_idx == 0 else stride_left + # all right strides must be full, otherwise it is the last item + is_last = chunk_end_idx > inputs_len if stride_right > 0 else chunk_end_idx >= inputs_len + _stride_right = 0 if is_last else stride_right + + chunk_len = chunk.shape[0] + stride = (chunk_len, _stride_left, _stride_right) + if chunk.shape[0] > _stride_left: + yield {"is_last": is_last, "stride": stride, **processed} + if is_last: + break + + +def _fast_find_longest_common_sequence(sequence_left, sequence_right): + seq_len_left = len(sequence_left) + seq_len_right = len(sequence_right) + counter = [[0] * (seq_len_right + 1) for _ in range(seq_len_left + 1)] + longest = 0 + for i in range(seq_len_left): + for j in range(seq_len_right): + if sequence_left[i] == sequence_right[j]: + previous_counter = counter[i][j] + 1 + counter[i + 1][j + 1] = previous_counter + if previous_counter > longest: + longest = previous_counter + + counter = np.array(counter) + # we return the idx of the first element of the longest common sequence in the left sequence + index_left = np.argwhere(counter == longest)[-1][0] - longest if longest != 0 else -1 + index_right = np.argwhere(counter == longest)[-1][1] - longest if longest != 0 else -1 + return index_left, index_right, longest + + +def _find_longest_common_sequence(sequences, tokenizer): + # TODO Use a faster algorithm this can probably be done in O(n) + # using suffix array. + # It might be tedious to do because of fault tolerance. + # We actually have a really good property which is that the total sequence + # MUST be those subsequences in order. + # Also the algorithm should be more tolerant to errors. + sequence = [tok_id for tok_id in sequences[0][0].tolist() if tok_id not in tokenizer.all_special_ids] + for new_seq in sequences[1:]: + new_sequence = [tok_id for tok_id in new_seq[0].tolist() if tok_id not in tokenizer.all_special_ids] + + index = 0 + max_ = 0.0 + for i in range(1, len(new_sequence) + 1): + # epsilon to favor long perfect matches + eps = i / 10000.0 + matches = np.sum(np.array(sequence[-i:]) == np.array(new_sequence[:i])) + matching = matches / i + eps + if matches > 1 and matching > max_: + index = i + max_ = matching + sequence.extend(new_sequence[index:]) + return np.array(sequence) + + +class AutomaticSpeechRecognitionPipeline(ChunkPipeline): + """ + Pipeline that aims at extracting spoken text contained within some audio. + + The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for + to support multiple audio formats + + Example: + + ```python + >>> from transformers import pipeline + + >>> transcriber = pipeline(model="openai/whisper-base") + >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") + {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'} + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + Arguments: + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from + [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. + feature_extractor ([`SequenceFeatureExtractor`]): + The feature extractor that will be used by the pipeline to encode waveform for the model. + tokenizer ([`PreTrainedTokenizer`]): + The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from + [`PreTrainedTokenizer`]. + decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*): + [PyCTCDecode's + BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180) + can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information. + chunk_length_s (`float`, *optional*, defaults to 0): + The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default). + + + + For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking + blog post](https://huggingface.co/blog/asr-chunking). + + + + stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`): + The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables + the model to *see* more context and infer letters better than without this context but the pipeline + discards the stride bits at the end to make the final reconstitution as perfect as possible. + + + + For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking + blog post](https://huggingface.co/blog/asr-chunking). + + + + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. If no framework is specified, will default to the one currently installed. If no framework is + specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if + no model is provided. + device (Union[`int`, `torch.device`], *optional*): + Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the + model on the associated CUDA device id. + torch_dtype (Union[`int`, `torch.dtype`], *optional*): + The data-type (dtype) of the computation. Setting this to `None` will use float32 precision. Set to + `torch.float16` or `torch.bfloat16` to use half-precision in the respective dtypes. + + """ + + def __init__( + self, + model: "PreTrainedModel", + feature_extractor: Union["SequenceFeatureExtractor", str] = None, + tokenizer: Optional[PreTrainedTokenizer] = None, + decoder: Optional[Union["BeamSearchDecoderCTC", str]] = None, + device: Union[int, "torch.device"] = None, + torch_dtype: Optional[Union[str, "torch.dtype"]] = None, + **kwargs, + ): + # set the model type so we can check we have the right pre- and post-processing parameters + if model.config.model_type == "whisper": + self.type = "seq2seq_whisper" + elif model.__class__.__name__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.values(): + self.type = "seq2seq" + elif ( + feature_extractor._processor_class + and feature_extractor._processor_class.endswith("WithLM") + and decoder is not None + ): + self.decoder = decoder + self.type = "ctc_with_lm" + else: + self.type = "ctc" + + super().__init__(model, tokenizer, feature_extractor, device=device, torch_dtype=torch_dtype, **kwargs) + + def __call__( + self, + inputs: Union[np.ndarray, bytes, str], + **kwargs, + ): + """ + Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`] + documentation for more information. + + Args: + inputs (`np.ndarray` or `bytes` or `str` or `dict`): + The inputs is either : + - `str` that is either the filename of a local audio file, or a public URL address to download the + audio file. The file will be read at the correct sampling rate to get the waveform using + *ffmpeg*. This requires *ffmpeg* to be installed on the system. + - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the + same way. + - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) + Raw audio at the correct sampling rate (no further check will be done) + - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this + pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw": + np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to + treat the first `left` samples and last `right` samples to be ignored in decoding (but used at + inference to provide more context to the model). Only use `stride` with CTC models. + return_timestamps (*optional*, `str` or `bool`): + Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for + other sequence-to-sequence models. + + For CTC models, timestamps can take one of two formats: + - `"char"`: the pipeline will return timestamps along the text for every character in the text. For + instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7, + 0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before + `0.6` seconds. + - `"word"`: the pipeline will return timestamps along the text for every word in the text. For + instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": + (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and + before `0.9` seconds. + + For the Whisper model, timestamps can take one of two formats: + - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted + through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps + by inspecting the cross-attention weights. + - `True`: the pipeline will return timestamps along the text for *segments* of words in the text. + For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the + model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds. + Note that a segment of text refers to a sequence of one or more words, rather than individual + words as with word-level timestamps. + generate_kwargs (`dict`, *optional*): + The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a + complete overview of generate, check the [following + guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). + max_new_tokens (`int`, *optional*): + The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. + + Return: + `Dict`: A dictionary with the following keys: + - **text** (`str`): The recognized text. + - **chunks** (*optional(, `List[Dict]`) + When using `return_timestamps`, the `chunks` will become a list containing all the various text + chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": + "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing + `"".join(chunk["text"] for chunk in output["chunks"])`. + """ + return super().__call__(inputs, **kwargs) + + def _sanitize_parameters( + self, + chunk_length_s=None, + stride_length_s=None, + ignore_warning=None, + decoder_kwargs=None, + return_timestamps=None, + return_language=None, + generate_kwargs=None, + max_new_tokens=None, + ): + # No parameters on this pipeline right now + preprocess_params = {} + if chunk_length_s is not None: + if self.type == "seq2seq" and not ignore_warning: + logger.warning( + "Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily" + " be entirely accurate and will have caveats. More information:" + " https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...," + " ignore_warning=True)" + ) + preprocess_params["chunk_length_s"] = chunk_length_s + if stride_length_s is not None: + preprocess_params["stride_length_s"] = stride_length_s + + forward_params = defaultdict(dict) + if max_new_tokens is not None: + forward_params["max_new_tokens"] = max_new_tokens + if generate_kwargs is not None: + if max_new_tokens is not None and "max_new_tokens" in generate_kwargs: + raise ValueError( + "`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use" + " only 1 version" + ) + forward_params.update(generate_kwargs) + + postprocess_params = {} + if decoder_kwargs is not None: + postprocess_params["decoder_kwargs"] = decoder_kwargs + if return_timestamps is not None: + # Check whether we have a valid setting for return_timestamps and throw an error before we perform a forward pass + if self.type == "seq2seq" and return_timestamps: + raise ValueError("We cannot return_timestamps yet on non-CTC models apart from Whisper!") + if self.type == "ctc_with_lm" and return_timestamps != "word": + raise ValueError("CTC with LM can only predict word level timestamps, set `return_timestamps='word'`") + if self.type == "ctc" and return_timestamps not in ["char", "word"]: + raise ValueError( + "CTC can either predict character level timestamps, or word level timestamps. " + "Set `return_timestamps='char'` or `return_timestamps='word'` as required." + ) + if self.type == "seq2seq_whisper" and return_timestamps == "char": + raise ValueError( + "Whisper cannot return `char` timestamps, only word level or segment level timestamps. " + "Use `return_timestamps='word'` or `return_timestamps=True` respectively." + ) + forward_params["return_timestamps"] = return_timestamps + postprocess_params["return_timestamps"] = return_timestamps + if return_language is not None: + if self.type != "seq2seq_whisper": + raise ValueError("Only Whisper can return language for now.") + postprocess_params["return_language"] = return_language + + return preprocess_params, forward_params, postprocess_params + + def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): + if isinstance(inputs, str): + if inputs.startswith("http://") or inputs.startswith("https://"): + # We need to actually check for a real protocol, otherwise it's impossible to use a local file + # like http_huggingface_co.png + inputs = requests.get(inputs).content + else: + with open(inputs, "rb") as f: + inputs = f.read() + + if isinstance(inputs, bytes): + inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) + + stride = None + extra = {} + if isinstance(inputs, dict): + stride = inputs.pop("stride", None) + # Accepting `"array"` which is the key defined in `datasets` for + # better integration + if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): + raise ValueError( + "When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a " + '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, ' + "containing the sampling_rate associated with that array" + ) + + _inputs = inputs.pop("raw", None) + if _inputs is None: + # Remove path which will not be used from `datasets`. + inputs.pop("path", None) + _inputs = inputs.pop("array", None) + in_sampling_rate = inputs.pop("sampling_rate") + extra = inputs + inputs = _inputs + if in_sampling_rate != self.feature_extractor.sampling_rate: + if is_torchaudio_available(): + from torchaudio import functional as F + else: + raise ImportError( + "torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. " + "The torchaudio package can be installed through: `pip install torchaudio`." + ) + + inputs = F.resample( + torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate + ).numpy() + ratio = self.feature_extractor.sampling_rate / in_sampling_rate + else: + ratio = 1 + if stride is not None: + if stride[0] + stride[1] > inputs.shape[0]: + raise ValueError("Stride is too large for input") + + # Stride needs to get the chunk length here, it's going to get + # swallowed by the `feature_extractor` later, and then batching + # can add extra data in the inputs, so we need to keep track + # of the original length in the stride so we can cut properly. + stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio))) + if not isinstance(inputs, np.ndarray): + raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`") + if len(inputs.shape) != 1: + raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline") + + if chunk_length_s: + if stride_length_s is None: + stride_length_s = chunk_length_s / 6 + + if isinstance(stride_length_s, (int, float)): + stride_length_s = [stride_length_s, stride_length_s] + + # XXX: Carefuly, this variable will not exist in `seq2seq` setting. + # Currently chunking is not possible at this level for `seq2seq` so + # it's ok. + align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1) + chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to) + stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to) + stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to) + + if chunk_len < stride_left + stride_right: + raise ValueError("Chunk length must be superior to stride length") + + for item in chunk_iter( + inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.torch_dtype + ): + yield item + else: + if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples: + processed = self.feature_extractor( + inputs, + sampling_rate=self.feature_extractor.sampling_rate, + truncation=False, + padding="longest", + return_tensors="pt", + ) + else: + processed = self.feature_extractor( + inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" + ) + + if self.torch_dtype is not None: + processed = processed.to(dtype=self.torch_dtype) + if stride is not None: + if self.type == "seq2seq": + raise ValueError("Stride is only usable with CTC models, try removing it !") + + processed["stride"] = stride + yield {"is_last": True, **processed, **extra} + + def _forward(self, model_inputs, return_timestamps=False, **generate_kwargs): + attention_mask = model_inputs.pop("attention_mask", None) + stride = model_inputs.pop("stride", None) + is_last = model_inputs.pop("is_last") + + if self.type in {"seq2seq", "seq2seq_whisper"}: + encoder = self.model.get_encoder() + # Consume values so we can let extra information flow freely through + # the pipeline (important for `partial` in microphone) + if "input_features" in model_inputs: + inputs = model_inputs.pop("input_features") + elif "input_values" in model_inputs: + inputs = model_inputs.pop("input_values") + else: + raise ValueError( + "Seq2Seq speech recognition model requires either a " + f"`input_features` or `input_values` key, but only has {model_inputs.keys()}" + ) + + # custom processing for Whisper timestamps and word-level timestamps + if return_timestamps and self.type == "seq2seq_whisper": + generate_kwargs["return_timestamps"] = return_timestamps + if return_timestamps == "word": + generate_kwargs["return_token_timestamps"] = True + generate_kwargs["return_segments"] = True + + if stride is not None: + if isinstance(stride, tuple): + generate_kwargs["num_frames"] = stride[0] // self.feature_extractor.hop_length + else: + generate_kwargs["num_frames"] = [s[0] // self.feature_extractor.hop_length for s in stride] + + if self.type == "seq2seq_whisper" and inputs.shape[-1] > self.feature_extractor.nb_max_frames: + generate_kwargs["input_features"] = inputs + else: + generate_kwargs["encoder_outputs"] = encoder(inputs, attention_mask=attention_mask) + + tokens = self.model.generate( + attention_mask=attention_mask, + **generate_kwargs, + ) + # whisper longform generation stores timestamps in "segments" + if return_timestamps == "word" and self.type == "seq2seq_whisper": + if "segments" not in tokens: + out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]} + else: + token_timestamps = [ + torch.cat([segment["token_timestamps"] for segment in segment_list]) + for segment_list in tokens["segments"] + ] + out = {"tokens": tokens["sequences"], "token_timestamps": token_timestamps} + else: + out = {"tokens": tokens} + if self.type == "seq2seq_whisper": + if stride is not None: + out["stride"] = stride + + else: + inputs = { + self.model.main_input_name: model_inputs.pop(self.model.main_input_name), + "attention_mask": attention_mask, + } + outputs = self.model(**inputs) + logits = outputs.logits + + if self.type == "ctc_with_lm": + out = {"logits": logits} + else: + out = {"tokens": logits.argmax(dim=-1)} + if stride is not None: + # Send stride to `postprocess`. + # it needs to be handled there where + # the pieces are to be concatenated. + ratio = 1 / self.model.config.inputs_to_logits_ratio + if isinstance(stride, tuple): + out["stride"] = rescale_stride([stride], ratio)[0] + else: + out["stride"] = rescale_stride(stride, ratio) + # Leftover + extra = model_inputs + return {"is_last": is_last, **out, **extra} + + def postprocess( + self, model_outputs, decoder_kwargs: Optional[Dict] = None, return_timestamps=None, return_language=None + ): + # Optional return types + optional = {} + + final_items = [] + key = "logits" if self.type == "ctc_with_lm" else "tokens" + stride = None + for outputs in model_outputs: + items = outputs[key].numpy() + stride = outputs.get("stride", None) + if stride is not None and self.type in {"ctc", "ctc_with_lm"}: + total_n, left, right = stride + # Total_n might be < logits.shape[1] + # because of padding, that's why + # we need to reconstruct this information + # This won't work with left padding (which doesn't exist right now) + right_n = total_n - right + items = items[:, left:right_n] + final_items.append(items) + + if stride and self.type == "seq2seq": + items = _find_longest_common_sequence(final_items, self.tokenizer) + elif self.type == "seq2seq_whisper": + time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions + # Send the chunking back to seconds, it's easier to handle in whisper + sampling_rate = self.feature_extractor.sampling_rate + for output in model_outputs: + if "stride" in output: + chunk_len, stride_left, stride_right = output["stride"] + # Go back in seconds + chunk_len /= sampling_rate + stride_left /= sampling_rate + stride_right /= sampling_rate + output["stride"] = chunk_len, stride_left, stride_right + + text, optional = self.tokenizer._decode_asr( + model_outputs, + return_timestamps=return_timestamps, + return_language=return_language, + time_precision=time_precision, + ) + else: + items = np.concatenate(final_items, axis=1) + items = items.squeeze(0) + + if self.type == "ctc_with_lm": + if decoder_kwargs is None: + decoder_kwargs = {} + beams = self.decoder.decode_beams(items, **decoder_kwargs) + text = beams[0][0] + if return_timestamps: + # Simply cast from pyctcdecode format to wav2vec2 format to leverage + # pre-existing code later + chunk_offset = beams[0][2] + offsets = [] + for word, (start_offset, end_offset) in chunk_offset: + offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset}) + elif self.type != "seq2seq_whisper": + skip_special_tokens = self.type != "ctc" + text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens) + if return_timestamps: + offsets = self.tokenizer.decode( + items, skip_special_tokens=skip_special_tokens, output_char_offsets=True + )["char_offsets"] + if return_timestamps == "word": + offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char) + + if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}: + chunks = [] + for item in offsets: + start = item["start_offset"] * self.model.config.inputs_to_logits_ratio + start /= self.feature_extractor.sampling_rate + + stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio + stop /= self.feature_extractor.sampling_rate + + chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)}) + optional["chunks"] = chunks + + extra = defaultdict(list) + for output in model_outputs: + output.pop("tokens", None) + output.pop("logits", None) + output.pop("is_last", None) + output.pop("stride", None) + output.pop("token_timestamps", None) + for k, v in output.items(): + extra[k].append(v) + return {"text": text, **optional, **extra} + + +def _find_timestamp_sequence(sequences, tokenizer, feature_extractor, max_source_positions): + """ + Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since + `WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only + iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is + processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to + properly compute the final `offset`. + """ + # index of the first timestamp token + timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1 + items = [] + # approximation of the token to time ratio : ~0.2seconds + time_precision = feature_extractor.chunk_length / max_source_positions + time = 0 + for seq_idx, item in enumerate(sequences): + sequence, stride = item + if isinstance(sequence, list): + sequence = np.array(sequence) + chunk_len, stride_left, stride_right = stride + sequence = sequence.squeeze(0) + # get rid of the `forced_decoder_idx` that are use to parametrize the generation + begin_idx = np.where(sequence == timestamp_begin)[0][0] if timestamp_begin in sequence else 0 + sequence = sequence[begin_idx:] + + timestamp_tokens = sequence >= timestamp_begin + if seq_idx != 0 and sum(timestamp_tokens) > 0: + consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 + last_timestamp = np.where(timestamp_tokens)[0][-1] + consecutive = np.append(consecutive, last_timestamp) if last_timestamp not in consecutive else consecutive + time -= stride_left + stride_right + offset = int((time / feature_extractor.sampling_rate) / time_precision) + overlap_time = int((stride_left / feature_extractor.sampling_rate) / time_precision) + # relevant timestamps are in the overlapping part + relevant_timestamp = np.where(sequence[consecutive] >= timestamp_begin + overlap_time)[0] + if relevant_timestamp.shape[0] > 0: + relevant_timestamp = ( + consecutive[relevant_timestamp[0] - 1] if relevant_timestamp[0] > 0 else consecutive[0] + ) + # if a big stride is used, we need to check some of the previous items for the best overlap + best_match = 0 + sliced_sequence = [] + for idx, previous_sequence in enumerate(reversed(items)): + previous_tokens = previous_sequence[1:-1] + if previous_sequence[0] < (timestamp_begin + offset - overlap_time) and idx != 0: + break # the previous sequence is too far in the past + if len(previous_tokens) > 0: + # find the longest common sequence between the overlapping parts + index_left, index_right, match_length = _fast_find_longest_common_sequence( + sequence[1:relevant_timestamp], previous_tokens + ) + # don't do anything if only 1 token was matched + if match_length > 1 and match_length > best_match: + best_match = match_length + best_idx = idx + end_of_curr_sequence_idx = ( + np.where(sequence[index_left + 1 :] >= timestamp_begin)[0][0] + 1 + ) + end_of_curr_sequence_idx = end_of_curr_sequence_idx + 1 + index_left + # if all the tokens are matched, suffix + if index_left == 0 and match_length == len(previous_tokens): + sliced_sequence = np.insert( + sequence[index_left + 1 : end_of_curr_sequence_idx], 0, previous_sequence[0] + ) + sliced_sequence[-1] = previous_sequence[-1] + # if part of the previous sequence is not taken + elif index_left >= 0: + sliced_sequence = sequence[index_left + 1 : end_of_curr_sequence_idx] + # let's insert the missing part of the previous sequence + previous_slice = ( + previous_sequence[: index_right + 1] if index_right > 0 else [previous_sequence[0]] + ) + sliced_sequence = np.insert(sliced_sequence, 0, previous_slice) + sliced_sequence[-1] += offset + + if len(sliced_sequence) > 0: + items[len(items) - best_idx - 1] = sliced_sequence + items = items[: len(items) - best_idx] + sequence = sequence[end_of_curr_sequence_idx:] + + # sequence might have changed + timestamp_tokens = sequence >= timestamp_begin + consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 + if sum(timestamp_tokens) > 0: + last_timestamp = np.where(timestamp_tokens)[0][-1] + consecutive = ( + np.append(consecutive, last_timestamp + 1) if last_timestamp not in consecutive else consecutive + ) + + if len(consecutive) > 0: + last_slice = 0 + for current_slice in consecutive: + actual_offset = items[-1][-1] if seq_idx != 0 or last_slice != 0 else sequence[0] + sliced_tokens = sequence[last_slice:current_slice] + duration = sliced_tokens[-1] - sliced_tokens[0] + sliced_tokens[0] = actual_offset + sliced_tokens[-1] = actual_offset + duration + items.append(sliced_tokens) + last_slice = current_slice + + time += chunk_len + result = [] + for i in range(len(items)): + result += items[i].tolist() + return result diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/base.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/base.py new file mode 100644 index 0000000000000000000000000000000000000000..25645fbaae061eb2ebc0ab7be52aa4cf2bc3c327 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/base.py @@ -0,0 +1,1364 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +import csv +import importlib +import json +import os +import pickle +import sys +import traceback +import types +import warnings +from abc import ABC, abstractmethod +from collections import UserDict +from contextlib import contextmanager +from os.path import abspath, exists +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +from ..dynamic_module_utils import custom_object_save +from ..feature_extraction_utils import PreTrainedFeatureExtractor +from ..image_processing_utils import BaseImageProcessor +from ..modelcard import ModelCard +from ..models.auto.configuration_auto import AutoConfig +from ..tokenization_utils import PreTrainedTokenizer +from ..utils import ( + ModelOutput, + PushToHubMixin, + add_end_docstrings, + copy_func, + infer_framework, + is_tf_available, + is_torch_available, + is_torch_cuda_available, + is_torch_mlu_available, + is_torch_mps_available, + is_torch_npu_available, + is_torch_xpu_available, + logging, +) + + +GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"] + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import TFAutoModel + +if is_torch_available(): + import torch + from torch.utils.data import DataLoader, Dataset + + from ..models.auto.modeling_auto import AutoModel + + # Re-export for backward compatibility + from .pt_utils import KeyDataset +else: + Dataset = None + KeyDataset = None + +if TYPE_CHECKING: + from ..modeling_tf_utils import TFPreTrainedModel + from ..modeling_utils import PreTrainedModel + + +logger = logging.get_logger(__name__) + + +def no_collate_fn(items): + if len(items) != 1: + raise ValueError("This collate_fn is meant to be used with batch_size=1") + return items[0] + + +def _pad(items, key, padding_value, padding_side): + batch_size = len(items) + if isinstance(items[0][key], torch.Tensor): + # Others include `attention_mask` etc... + shape = items[0][key].shape + dim = len(shape) + if key in ["pixel_values", "image"]: + # This is probable image so padding shouldn't be necessary + # B, C, H, W + return torch.cat([item[key] for item in items], dim=0) + elif dim == 4 and key == "input_features": + # this is probably a mel spectrogram batched + return torch.cat([item[key] for item in items], dim=0) + max_length = max(item[key].shape[1] for item in items) + min_length = min(item[key].shape[1] for item in items) + dtype = items[0][key].dtype + + if dim == 2: + if max_length == min_length: + # Bypass for `ImageGPT` which doesn't provide a padding value, yet + # we can consistently pad since the size should be matching + return torch.cat([item[key] for item in items], dim=0) + tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value + elif dim == 3: + tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value + elif dim == 4: + tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value + + for i, item in enumerate(items): + if dim == 2: + if padding_side == "left": + tensor[i, -len(item[key][0]) :] = item[key][0].clone() + else: + tensor[i, : len(item[key][0])] = item[key][0].clone() + elif dim == 3: + if padding_side == "left": + tensor[i, -len(item[key][0]) :, :] = item[key][0].clone() + else: + tensor[i, : len(item[key][0]), :] = item[key][0].clone() + elif dim == 4: + if padding_side == "left": + tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone() + else: + tensor[i, : len(item[key][0]), :, :] = item[key][0].clone() + + return tensor + else: + return [item[key] for item in items] + + +def pad_collate_fn(tokenizer, feature_extractor): + # Tokenizer + t_padding_side = None + # Feature extractor + f_padding_side = None + if tokenizer is None and feature_extractor is None: + raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching") + if tokenizer is not None: + if tokenizer.pad_token_id is None: + raise ValueError( + "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with " + "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`." + ) + else: + t_padding_value = tokenizer.pad_token_id + t_padding_side = tokenizer.padding_side + if feature_extractor is not None: + # Feature extractor can be images, where no padding is expected + f_padding_value = getattr(feature_extractor, "padding_value", None) + f_padding_side = getattr(feature_extractor, "padding_side", None) + + if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side: + raise ValueError( + f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}" + ) + padding_side = "right" + if t_padding_side is not None: + padding_side = t_padding_side + if f_padding_side is not None: + padding_side = f_padding_side + + def inner(items): + keys = set(items[0].keys()) + for item in items: + if set(item.keys()) != keys: + raise ValueError( + f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !=" + f" {keys})" + ) + # input_values, input_pixels, input_ids, ... + padded = {} + for key in keys: + if key in {"input_ids"}: + # ImageGPT uses a feature extractor + if tokenizer is None and feature_extractor is not None: + _padding_value = f_padding_value + else: + _padding_value = t_padding_value + elif key in {"input_values", "pixel_values", "input_features"}: + _padding_value = f_padding_value + elif key in {"p_mask", "special_tokens_mask"}: + _padding_value = 1 + elif key in {"attention_mask", "token_type_ids"}: + _padding_value = 0 + else: + # This is likely another random key maybe even user provided + _padding_value = 0 + padded[key] = _pad(items, key, _padding_value, padding_side) + return padded + + return inner + + +def infer_framework_load_model( + model, + config: AutoConfig, + model_classes: Optional[Dict[str, Tuple[type]]] = None, + task: Optional[str] = None, + framework: Optional[str] = None, + **model_kwargs, +): + """ + Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). + + If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is + actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to + instantiate the model twice, this model is returned for use by the pipeline. + + If both frameworks are installed and available for `model`, PyTorch is selected. + + Args: + model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. + config ([`AutoConfig`]): + The config associated with the model to help using the correct class + model_classes (dictionary `str` to `type`, *optional*): + A mapping framework to class. + task (`str`): + The task defining which pipeline will be returned. + model_kwargs: + Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., + **model_kwargs)` function. + + Returns: + `Tuple`: A tuple framework, model. + """ + if not is_tf_available() and not is_torch_available(): + raise RuntimeError( + "At least one of TensorFlow 2.0 or PyTorch should be installed. " + "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " + "To install PyTorch, read the instructions at https://pytorch.org/." + ) + if isinstance(model, str): + model_kwargs["_from_pipeline"] = task + class_tuple = () + look_pt = is_torch_available() and framework in {"pt", None} + look_tf = is_tf_available() and framework in {"tf", None} + if model_classes: + if look_pt: + class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) + if look_tf: + class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) + if config.architectures: + classes = [] + for architecture in config.architectures: + transformers_module = importlib.import_module("transformers") + if look_pt: + _class = getattr(transformers_module, architecture, None) + if _class is not None: + classes.append(_class) + if look_tf: + _class = getattr(transformers_module, f"TF{architecture}", None) + if _class is not None: + classes.append(_class) + class_tuple = class_tuple + tuple(classes) + + if len(class_tuple) == 0: + raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") + + all_traceback = {} + for model_class in class_tuple: + kwargs = model_kwargs.copy() + if framework == "pt" and model.endswith(".h5"): + kwargs["from_tf"] = True + logger.warning( + "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " + "Trying to load the model with PyTorch." + ) + elif framework == "tf" and model.endswith(".bin"): + kwargs["from_pt"] = True + logger.warning( + "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " + "Trying to load the model with Tensorflow." + ) + + try: + model = model_class.from_pretrained(model, **kwargs) + if hasattr(model, "eval"): + model = model.eval() + # Stop loading on the first successful load. + break + except (OSError, ValueError): + all_traceback[model_class.__name__] = traceback.format_exc() + continue + + if isinstance(model, str): + error = "" + for class_name, trace in all_traceback.items(): + error += f"while loading with {class_name}, an error is thrown:\n{trace}\n" + raise ValueError( + f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" + ) + + if framework is None: + framework = infer_framework(model.__class__) + return framework, model + + +def infer_framework_from_model( + model, + model_classes: Optional[Dict[str, Tuple[type]]] = None, + task: Optional[str] = None, + framework: Optional[str] = None, + **model_kwargs, +): + """ + Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). + + If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is + actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to + instantiate the model twice, this model is returned for use by the pipeline. + + If both frameworks are installed and available for `model`, PyTorch is selected. + + Args: + model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. + model_classes (dictionary `str` to `type`, *optional*): + A mapping framework to class. + task (`str`): + The task defining which pipeline will be returned. + model_kwargs: + Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., + **model_kwargs)` function. + + Returns: + `Tuple`: A tuple framework, model. + """ + if isinstance(model, str): + config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) + else: + config = model.config + return infer_framework_load_model( + model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs + ) + + +def get_framework(model, revision: Optional[str] = None): + """ + Select framework (TensorFlow or PyTorch) to use. + + Args: + model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): + If both frameworks are installed, picks the one corresponding to the model passed (either a model class or + the model name). If no specific model is provided, defaults to using PyTorch. + """ + warnings.warn( + "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", + FutureWarning, + ) + if not is_tf_available() and not is_torch_available(): + raise RuntimeError( + "At least one of TensorFlow 2.0 or PyTorch should be installed. " + "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " + "To install PyTorch, read the instructions at https://pytorch.org/." + ) + if isinstance(model, str): + if is_torch_available() and not is_tf_available(): + model = AutoModel.from_pretrained(model, revision=revision) + elif is_tf_available() and not is_torch_available(): + model = TFAutoModel.from_pretrained(model, revision=revision) + else: + try: + model = AutoModel.from_pretrained(model, revision=revision) + except OSError: + model = TFAutoModel.from_pretrained(model, revision=revision) + + framework = infer_framework(model.__class__) + return framework + + +def get_default_model_and_revision( + targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] +) -> Union[str, Tuple[str, str]]: + """ + Select a default model to use for a given task. Defaults to pytorch if ambiguous. + + Args: + targeted_task (`Dict` ): + Dictionary representing the given task, that should contain default models + + framework (`str`, None) + "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. + + task_options (`Any`, None) + Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for + translation task. + + Returns + + `str` The model string representing the default model for this pipeline + """ + if is_torch_available() and not is_tf_available(): + framework = "pt" + elif is_tf_available() and not is_torch_available(): + framework = "tf" + + defaults = targeted_task["default"] + if task_options: + if task_options not in defaults: + raise ValueError(f"The task does not provide any default models for options {task_options}") + default_models = defaults[task_options]["model"] + elif "model" in defaults: + default_models = targeted_task["default"]["model"] + else: + # XXX This error message needs to be updated to be more generic if more tasks are going to become + # parametrized + raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"') + + if framework is None: + framework = "pt" + + return default_models[framework] + + +class PipelineException(Exception): + """ + Raised by a [`Pipeline`] when handling __call__. + + Args: + task (`str`): The task of the pipeline. + model (`str`): The model used by the pipeline. + reason (`str`): The error message to display. + """ + + def __init__(self, task: str, model: str, reason: str): + super().__init__(reason) + + self.task = task + self.model = model + + +class ArgumentHandler(ABC): + """ + Base interface for handling arguments for each [`~pipelines.Pipeline`]. + """ + + @abstractmethod + def __call__(self, *args, **kwargs): + raise NotImplementedError() + + +class PipelineDataFormat: + """ + Base class for all the pipeline supported data format both for reading and writing. Supported data formats + currently includes: + + - JSON + - CSV + - stdin/stdout (pipe) + + `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to + pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format. + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + SUPPORTED_FORMATS = ["json", "csv", "pipe"] + + def __init__( + self, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite: bool = False, + ): + self.output_path = output_path + self.input_path = input_path + self.column = column.split(",") if column is not None else [""] + self.is_multi_columns = len(self.column) > 1 + + if self.is_multi_columns: + self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column] + + if output_path is not None and not overwrite: + if exists(abspath(self.output_path)): + raise OSError(f"{self.output_path} already exists on disk") + + if input_path is not None: + if not exists(abspath(self.input_path)): + raise OSError(f"{self.input_path} doesnt exist on disk") + + @abstractmethod + def __iter__(self): + raise NotImplementedError() + + @abstractmethod + def save(self, data: Union[dict, List[dict]]): + """ + Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. + + Args: + data (`dict` or list of `dict`): The data to store. + """ + raise NotImplementedError() + + def save_binary(self, data: Union[dict, List[dict]]) -> str: + """ + Save the provided data object as a pickle-formatted binary data on the disk. + + Args: + data (`dict` or list of `dict`): The data to store. + + Returns: + `str`: Path where the data has been saved. + """ + path, _ = os.path.splitext(self.output_path) + binary_path = os.path.extsep.join((path, "pickle")) + + with open(binary_path, "wb+") as f_output: + pickle.dump(data, f_output) + + return binary_path + + @staticmethod + def from_str( + format: str, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite=False, + ) -> "PipelineDataFormat": + """ + Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. + + Args: + format (`str`): + The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. + output_path (`str`, *optional*): + Where to save the outgoing data. + input_path (`str`, *optional*): + Where to look for the input data. + column (`str`, *optional*): + The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + + Returns: + [`~pipelines.PipelineDataFormat`]: The proper data format. + """ + if format == "json": + return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) + elif format == "csv": + return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) + elif format == "pipe": + return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) + else: + raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)") + + +class CsvPipelineDataFormat(PipelineDataFormat): + """ + Support for pipelines using CSV data format. + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + def __init__( + self, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite=False, + ): + super().__init__(output_path, input_path, column, overwrite=overwrite) + + def __iter__(self): + with open(self.input_path, "r") as f: + reader = csv.DictReader(f) + for row in reader: + if self.is_multi_columns: + yield {k: row[c] for k, c in self.column} + else: + yield row[self.column[0]] + + def save(self, data: List[dict]): + """ + Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. + + Args: + data (`List[dict]`): The data to store. + """ + with open(self.output_path, "w") as f: + if len(data) > 0: + writer = csv.DictWriter(f, list(data[0].keys())) + writer.writeheader() + writer.writerows(data) + + +class JsonPipelineDataFormat(PipelineDataFormat): + """ + Support for pipelines using JSON file format. + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + def __init__( + self, + output_path: Optional[str], + input_path: Optional[str], + column: Optional[str], + overwrite=False, + ): + super().__init__(output_path, input_path, column, overwrite=overwrite) + + with open(input_path, "r") as f: + self._entries = json.load(f) + + def __iter__(self): + for entry in self._entries: + if self.is_multi_columns: + yield {k: entry[c] for k, c in self.column} + else: + yield entry[self.column[0]] + + def save(self, data: dict): + """ + Save the provided data object in a json file. + + Args: + data (`dict`): The data to store. + """ + with open(self.output_path, "w") as f: + json.dump(data, f) + + +class PipedPipelineDataFormat(PipelineDataFormat): + """ + Read data from piped input to the python process. For multi columns data, columns should separated by \t + + If columns are provided, then the output will be a dictionary with {column_x: value_x} + + Args: + output_path (`str`): Where to save the outgoing data. + input_path (`str`): Where to look for the input data. + column (`str`): The column to read. + overwrite (`bool`, *optional*, defaults to `False`): + Whether or not to overwrite the `output_path`. + """ + + def __iter__(self): + for line in sys.stdin: + # Split for multi-columns + if "\t" in line: + line = line.split("\t") + if self.column: + # Dictionary to map arguments + yield {kwargs: l for (kwargs, _), l in zip(self.column, line)} + else: + yield tuple(line) + + # No dictionary to map arguments + else: + yield line + + def save(self, data: dict): + """ + Print the data. + + Args: + data (`dict`): The data to store. + """ + print(data) + + def save_binary(self, data: Union[dict, List[dict]]) -> str: + if self.output_path is None: + raise KeyError( + "When using piped input on pipeline outputting large object requires an output file path. " + "Please provide such output path through --output argument." + ) + + return super().save_binary(data) + + +class _ScikitCompat(ABC): + """ + Interface layer for the Scikit and Keras compatibility. + """ + + @abstractmethod + def transform(self, X): + raise NotImplementedError() + + @abstractmethod + def predict(self, X): + raise NotImplementedError() + + +def build_pipeline_init_args( + has_tokenizer: bool = False, + has_feature_extractor: bool = False, + has_image_processor: bool = False, + supports_binary_output: bool = True, +) -> str: + docstring = r""" + Arguments: + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from + [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.""" + if has_tokenizer: + docstring += r""" + tokenizer ([`PreTrainedTokenizer`]): + The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from + [`PreTrainedTokenizer`].""" + if has_feature_extractor: + docstring += r""" + feature_extractor ([`SequenceFeatureExtractor`]): + The feature extractor that will be used by the pipeline to encode data for the model. This object inherits from + [`SequenceFeatureExtractor`].""" + if has_image_processor: + docstring += r""" + image_processor ([`BaseImageProcessor`]): + The image processor that will be used by the pipeline to encode data for the model. This object inherits from + [`BaseImageProcessor`].""" + docstring += r""" + modelcard (`str` or [`ModelCard`], *optional*): + Model card attributed to the model for this pipeline. + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. + + If no framework is specified, will default to the one currently installed. If no framework is specified and + both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is + provided. + task (`str`, defaults to `""`): + A task-identifier for the pipeline. + num_workers (`int`, *optional*, defaults to 8): + When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of + workers to be used. + batch_size (`int`, *optional*, defaults to 1): + When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of + the batch to use, for inference this is not always beneficial, please read [Batching with + pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) . + args_parser ([`~pipelines.ArgumentHandler`], *optional*): + Reference to the object in charge of parsing supplied pipeline parameters. + device (`int`, *optional*, defaults to -1): + Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on + the associated CUDA device id. You can pass native `torch.device` or a `str` too + torch_dtype (`str` or `torch.dtype`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model + (`torch.float16`, `torch.bfloat16`, ... or `"auto"`)""" + if supports_binary_output: + docstring += r""" + binary_output (`bool`, *optional*, defaults to `False`): + Flag indicating if the output the pipeline should happen in a serialized format (i.e., pickle) or as + the raw output data e.g. text.""" + return docstring + + +PIPELINE_INIT_ARGS = build_pipeline_init_args( + has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, supports_binary_output=True +) + + +if is_torch_available(): + from transformers.pipelines.pt_utils import ( + PipelineChunkIterator, + PipelineDataset, + PipelineIterator, + PipelinePackIterator, + ) + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_feature_extractor=True, has_image_processor=True)) +class Pipeline(_ScikitCompat, PushToHubMixin): + """ + The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across + different pipelines. + + Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following + operations: + + Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output + + Pipeline supports running on CPU or GPU through the device argument (see below). + + Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object + as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output` + constructor argument. If set to `True`, the output will be stored in the pickle format. + """ + + default_input_names = None + + def __init__( + self, + model: Union["PreTrainedModel", "TFPreTrainedModel"], + tokenizer: Optional[PreTrainedTokenizer] = None, + feature_extractor: Optional[PreTrainedFeatureExtractor] = None, + image_processor: Optional[BaseImageProcessor] = None, + modelcard: Optional[ModelCard] = None, + framework: Optional[str] = None, + task: str = "", + args_parser: ArgumentHandler = None, + device: Union[int, "torch.device"] = None, + torch_dtype: Optional[Union[str, "torch.dtype"]] = None, + binary_output: bool = False, + **kwargs, + ): + if framework is None: + framework, model = infer_framework_load_model(model, config=model.config) + + self.task = task + self.model = model + self.tokenizer = tokenizer + self.feature_extractor = feature_extractor + self.image_processor = image_processor + self.modelcard = modelcard + self.framework = framework + + # `accelerate` device map + hf_device_map = getattr(self.model, "hf_device_map", None) + + if hf_device_map is not None and device is not None: + raise ValueError( + "The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please " + "discard the `device` argument when creating your pipeline object." + ) + + if device is None: + if hf_device_map is not None: + # Take the first device used by `accelerate`. + device = next(iter(hf_device_map.values())) + else: + device = -1 + + if is_torch_available() and self.framework == "pt": + if isinstance(device, torch.device): + if device.type == "xpu" and not is_torch_xpu_available(check_device=True): + raise ValueError(f'{device} is not available, you should use device="cpu" instead') + self.device = device + elif isinstance(device, str): + if "xpu" in device and not is_torch_xpu_available(check_device=True): + raise ValueError(f'{device} is not available, you should use device="cpu" instead') + self.device = torch.device(device) + elif device < 0: + self.device = torch.device("cpu") + elif is_torch_mlu_available(): + self.device = torch.device(f"mlu:{device}") + elif is_torch_cuda_available(): + self.device = torch.device(f"cuda:{device}") + elif is_torch_npu_available(): + self.device = torch.device(f"npu:{device}") + elif is_torch_xpu_available(check_device=True): + self.device = torch.device(f"xpu:{device}") + elif is_torch_mps_available(): + self.device = torch.device(f"mps:{device}") + else: + raise ValueError(f"{device} unrecognized or not available.") + else: + self.device = device if device is not None else -1 + + self.binary_output = binary_output + + # We shouldn't call `model.to()` for models loaded with accelerate + if ( + self.framework == "pt" + and self.device is not None + and not (isinstance(self.device, int) and self.device < 0) + and hf_device_map is None + ): + self.model.to(self.device) + + # Update config and generation_config with task specific parameters + task_specific_params = self.model.config.task_specific_params + if task_specific_params is not None and task in task_specific_params: + self.model.config.update(task_specific_params.get(task)) + if self.model.can_generate(): + self.model.generation_config.update(**task_specific_params.get(task)) + + self.call_count = 0 + self._batch_size = kwargs.pop("batch_size", None) + self._num_workers = kwargs.pop("num_workers", None) + self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs) + + # Pipelines calling `generate`: if the tokenizer has a pad token but the model doesn't, set it in the + # forward params so that `generate` is aware of the pad token. + if ( + self.tokenizer is not None + and self.model.can_generate() + and self.tokenizer.pad_token_id is not None + and self.model.generation_config.pad_token_id is None + ): + self._forward_params["pad_token_id"] = self.tokenizer.pad_token_id + + if self.image_processor is None and self.feature_extractor is not None: + if isinstance(self.feature_extractor, BaseImageProcessor): + # Backward compatible change, if users called + # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor()) + # then we should keep working + self.image_processor = self.feature_extractor + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + safe_serialization: bool = True, + **kwargs, + ): + """ + Save the pipeline's model and tokenizer. + + Args: + save_directory (`str` or `os.PathLike`): + A path to the directory where to saved. It will be created if it doesn't exist. + safe_serialization (`str`): + Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + os.makedirs(save_directory, exist_ok=True) + + if hasattr(self, "_registered_impl"): + # Add info to the config + pipeline_info = self._registered_impl.copy() + custom_pipelines = {} + for task, info in pipeline_info.items(): + if info["impl"] != self.__class__: + continue + + info = info.copy() + module_name = info["impl"].__module__ + last_module = module_name.split(".")[-1] + # Change classes into their names/full names + info["impl"] = f"{last_module}.{info['impl'].__name__}" + info["pt"] = tuple(c.__name__ for c in info["pt"]) + info["tf"] = tuple(c.__name__ for c in info["tf"]) + + custom_pipelines[task] = info + self.model.config.custom_pipelines = custom_pipelines + # Save the pipeline custom code + custom_object_save(self, save_directory) + + kwargs["safe_serialization"] = safe_serialization + self.model.save_pretrained(save_directory, **kwargs) + + if self.tokenizer is not None: + self.tokenizer.save_pretrained(save_directory, **kwargs) + + if self.feature_extractor is not None: + self.feature_extractor.save_pretrained(save_directory, **kwargs) + + if self.image_processor is not None: + self.image_processor.save_pretrained(save_directory, **kwargs) + + if self.modelcard is not None: + self.modelcard.save_pretrained(save_directory) + + def transform(self, X): + """ + Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). + """ + return self(X) + + def predict(self, X): + """ + Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). + """ + return self(X) + + @property + def torch_dtype(self) -> Optional["torch.dtype"]: + """ + Torch dtype of the model (if it's Pytorch model), `None` otherwise. + """ + return getattr(self.model, "dtype", None) + + @contextmanager + def device_placement(self): + """ + Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. + + Returns: + Context manager + + Examples: + + ```python + # Explicitly ask for tensor allocation on CUDA device :0 + pipe = pipeline(..., device=0) + with pipe.device_placement(): + # Every framework specific tensor allocation will be done on the request device + output = pipe(...) + ```""" + if self.framework == "tf": + with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"): + yield + else: + if self.device.type == "cuda": + with torch.cuda.device(self.device): + yield + elif self.device.type == "mlu": + with torch.mlu.device(self.device): + yield + else: + yield + + def ensure_tensor_on_device(self, **inputs): + """ + Ensure PyTorch tensors are on the specified device. + + Args: + inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored): + The tensors to place on `self.device`. + Recursive on lists **only**. + + Return: + `Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device. + """ + return self._ensure_tensor_on_device(inputs, self.device) + + def _ensure_tensor_on_device(self, inputs, device): + if isinstance(inputs, ModelOutput): + return ModelOutput( + {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} + ) + elif isinstance(inputs, dict): + return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} + elif isinstance(inputs, UserDict): + return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}) + elif isinstance(inputs, list): + return [self._ensure_tensor_on_device(item, device) for item in inputs] + elif isinstance(inputs, tuple): + return tuple([self._ensure_tensor_on_device(item, device) for item in inputs]) + elif isinstance(inputs, torch.Tensor): + return inputs.to(device) + else: + return inputs + + def check_model_type(self, supported_models: Union[List[str], dict]): + """ + Check if the model class is in supported by the pipeline. + + Args: + supported_models (`List[str]` or `dict`): + The list of models supported by the pipeline, or a dictionary with model class values. + """ + if not isinstance(supported_models, list): # Create from a model mapping + supported_models_names = [] + for _, model_name in supported_models.items(): + # Mapping can now contain tuples of models for the same configuration. + if isinstance(model_name, tuple): + supported_models_names.extend(list(model_name)) + else: + supported_models_names.append(model_name) + if hasattr(supported_models, "_model_mapping"): + for _, model in supported_models._model_mapping._extra_content.items(): + if isinstance(model_name, tuple): + supported_models_names.extend([m.__name__ for m in model]) + else: + supported_models_names.append(model.__name__) + supported_models = supported_models_names + if self.model.__class__.__name__ not in supported_models: + logger.error( + f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are" + f" {supported_models}." + ) + + @abstractmethod + def _sanitize_parameters(self, **pipeline_parameters): + """ + _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__` + methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`, + `forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This + lets you keep defaults in function signatures, which is more "natural". + + It is not meant to be called directly, it will be automatically called and the final parameters resolved by + `__init__` and `__call__` + """ + raise NotImplementedError("_sanitize_parameters not implemented") + + @abstractmethod + def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]: + """ + Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for + `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items. + """ + raise NotImplementedError("preprocess not implemented") + + @abstractmethod + def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput: + """ + _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might + involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess` + and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible. + + It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional + code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part + of the code (leading to faster inference). + """ + raise NotImplementedError("_forward not implemented") + + @abstractmethod + def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any: + """ + Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into + something more friendly. Generally it will output a list or a dict or results (containing just strings and + numbers). + """ + raise NotImplementedError("postprocess not implemented") + + def get_inference_context(self): + return torch.no_grad + + def forward(self, model_inputs, **forward_params): + with self.device_placement(): + if self.framework == "tf": + model_inputs["training"] = False + model_outputs = self._forward(model_inputs, **forward_params) + elif self.framework == "pt": + inference_context = self.get_inference_context() + with inference_context(): + model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) + model_outputs = self._forward(model_inputs, **forward_params) + model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu")) + else: + raise ValueError(f"Framework {self.framework} is not supported") + return model_outputs + + def get_iterator( + self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params + ): + if isinstance(inputs, collections.abc.Sized): + dataset = PipelineDataset(inputs, self.preprocess, preprocess_params) + else: + if num_workers > 1: + logger.warning( + "For iterable dataset using num_workers>1 is likely to result" + " in errors since everything is iterable, setting `num_workers=1`" + " to guarantee correctness." + ) + num_workers = 1 + dataset = PipelineIterator(inputs, self.preprocess, preprocess_params) + if "TOKENIZERS_PARALLELISM" not in os.environ: + logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + # TODO hack by collating feature_extractor and image_processor + feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor + collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) + dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) + model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) + final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) + return final_iterator + + def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs): + if args: + logger.warning(f"Ignoring args : {args}") + + if num_workers is None: + if self._num_workers is None: + num_workers = 0 + else: + num_workers = self._num_workers + if batch_size is None: + if self._batch_size is None: + batch_size = 1 + else: + batch_size = self._batch_size + + preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs) + + # Fuse __init__ params and __call__ params without modifying the __init__ ones. + preprocess_params = {**self._preprocess_params, **preprocess_params} + forward_params = {**self._forward_params, **forward_params} + postprocess_params = {**self._postprocess_params, **postprocess_params} + + self.call_count += 1 + if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda": + logger.warning_once( + "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a" + " dataset", + ) + + is_dataset = Dataset is not None and isinstance(inputs, Dataset) + is_generator = isinstance(inputs, types.GeneratorType) + is_list = isinstance(inputs, list) + + is_iterable = is_dataset or is_generator or is_list + + # TODO make the get_iterator work also for `tf` (and `flax`). + can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list) + + if is_list: + if can_use_iterator: + final_iterator = self.get_iterator( + inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params + ) + outputs = list(final_iterator) + return outputs + else: + return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params) + elif can_use_iterator: + return self.get_iterator( + inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params + ) + elif is_iterable: + return self.iterate(inputs, preprocess_params, forward_params, postprocess_params) + elif self.framework == "pt" and isinstance(self, ChunkPipeline): + return next( + iter( + self.get_iterator( + [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params + ) + ) + ) + else: + return self.run_single(inputs, preprocess_params, forward_params, postprocess_params) + + def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params): + return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs] + + def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): + model_inputs = self.preprocess(inputs, **preprocess_params) + model_outputs = self.forward(model_inputs, **forward_params) + outputs = self.postprocess(model_outputs, **postprocess_params) + return outputs + + def iterate(self, inputs, preprocess_params, forward_params, postprocess_params): + # This function should become `get_iterator` again, this is a temporary + # easy solution. + for input_ in inputs: + yield self.run_single(input_, preprocess_params, forward_params, postprocess_params) + + +Pipeline.push_to_hub = copy_func(Pipeline.push_to_hub) +if Pipeline.push_to_hub.__doc__ is not None: + Pipeline.push_to_hub.__doc__ = Pipeline.push_to_hub.__doc__.format( + object="pipe", object_class="pipeline", object_files="pipeline file" + ).replace(".from_pretrained", "") + + +class ChunkPipeline(Pipeline): + def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): + all_outputs = [] + for model_inputs in self.preprocess(inputs, **preprocess_params): + model_outputs = self.forward(model_inputs, **forward_params) + all_outputs.append(model_outputs) + outputs = self.postprocess(all_outputs, **postprocess_params) + return outputs + + def get_iterator( + self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params + ): + if "TOKENIZERS_PARALLELISM" not in os.environ: + logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + if num_workers > 1: + logger.warning( + "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable," + " setting `num_workers=1` to guarantee correctness." + ) + num_workers = 1 + dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params) + + # TODO hack by collating feature_extractor and image_processor + feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor + collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) + dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) + model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) + final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) + return final_iterator + + +class PipelineRegistry: + def __init__(self, supported_tasks: Dict[str, Any], task_aliases: Dict[str, str]) -> None: + self.supported_tasks = supported_tasks + self.task_aliases = task_aliases + + def get_supported_tasks(self) -> List[str]: + supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys()) + supported_task.sort() + return supported_task + + def check_task(self, task: str) -> Tuple[str, Dict, Any]: + if task in self.task_aliases: + task = self.task_aliases[task] + if task in self.supported_tasks: + targeted_task = self.supported_tasks[task] + return task, targeted_task, None + + if task.startswith("translation"): + tokens = task.split("_") + if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to": + targeted_task = self.supported_tasks["translation"] + task = "translation" + return task, targeted_task, (tokens[1], tokens[3]) + raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format") + + raise KeyError( + f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}" + ) + + def register_pipeline( + self, + task: str, + pipeline_class: type, + pt_model: Optional[Union[type, Tuple[type]]] = None, + tf_model: Optional[Union[type, Tuple[type]]] = None, + default: Optional[Dict] = None, + type: Optional[str] = None, + ) -> None: + if task in self.supported_tasks: + logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...") + + if pt_model is None: + pt_model = () + elif not isinstance(pt_model, tuple): + pt_model = (pt_model,) + + if tf_model is None: + tf_model = () + elif not isinstance(tf_model, tuple): + tf_model = (tf_model,) + + task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model} + + if default is not None: + if "model" not in default and ("pt" in default or "tf" in default): + default = {"model": default} + task_impl["default"] = default + + if type is not None: + task_impl["type"] = type + + self.supported_tasks[task] = task_impl + pipeline_class._registered_impl = {task: task_impl} + + def to_dict(self): + return self.supported_tasks diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/conversational.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/conversational.py new file mode 100644 index 0000000000000000000000000000000000000000..257f693c9d2ea34159048ec204f3f615c591fb74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/conversational.py @@ -0,0 +1,322 @@ +import uuid +import warnings +from typing import Any, Dict, List, Union + +from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging +from .base import Pipeline, build_pipeline_init_args + + +if is_tf_available(): + import tensorflow as tf + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class Conversation: + """ + Utility class containing a conversation and its history. This class is meant to be used as an input to the + [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user + inputs and generated model responses. + + Arguments: + messages (Union[str, List[Dict[str, str]]], *optional*): + The initial messages to start the conversation, either a string, or a list of dicts containing "role" and + "content" keys. If a string is passed, it is interpreted as a single message with the "user" role. + conversation_id (`uuid.UUID`, *optional*): + Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the + conversation. + + Usage: + + ```python + conversation = Conversation("Going to the movies tonight - any suggestions?") + conversation.add_message({"role": "assistant", "content": "The Big lebowski."}) + conversation.add_message({"role": "user", "content": "Is it good?"}) + ```""" + + def __init__( + self, messages: Union[str, List[Dict[str, str]]] = None, conversation_id: uuid.UUID = None, **deprecated_kwargs + ): + if not conversation_id: + conversation_id = uuid.uuid4() + + if messages is None: + text = deprecated_kwargs.pop("text", None) + if text is not None: + messages = [{"role": "user", "content": text}] + else: + messages = [] + elif isinstance(messages, str): + messages = [{"role": "user", "content": messages}] + + # This block deals with the legacy args - new code should just totally + # avoid past_user_inputs and generated_responses + self._num_processed_user_inputs = 0 + generated_responses = deprecated_kwargs.pop("generated_responses", None) + past_user_inputs = deprecated_kwargs.pop("past_user_inputs", None) + if generated_responses is not None and past_user_inputs is None: + raise ValueError("generated_responses cannot be passed without past_user_inputs!") + if past_user_inputs is not None: + legacy_messages = [] + if generated_responses is None: + generated_responses = [] + # We structure it this way instead of using zip() because the lengths may differ by 1 + for i in range(max([len(past_user_inputs), len(generated_responses)])): + if i < len(past_user_inputs): + legacy_messages.append({"role": "user", "content": past_user_inputs[i]}) + if i < len(generated_responses): + legacy_messages.append({"role": "assistant", "content": generated_responses[i]}) + messages = legacy_messages + messages + + self.uuid = conversation_id + self.messages = messages + + def __eq__(self, other): + if not isinstance(other, Conversation): + return False + return self.uuid == other.uuid or self.messages == other.messages + + def add_message(self, message: Dict[str, str]): + if not set(message.keys()) == {"role", "content"}: + raise ValueError("Message should contain only 'role' and 'content' keys!") + if message["role"] not in ("user", "assistant", "system"): + raise ValueError("Only 'user', 'assistant' and 'system' roles are supported for now!") + self.messages.append(message) + + def add_user_input(self, text: str, overwrite: bool = False): + """ + Add a user input to the conversation for the next round. This is a legacy method that assumes that inputs must + alternate user/assistant/user/assistant, and so will not add multiple user messages in succession. We recommend + just using `add_message` with role "user" instead. + """ + if len(self) > 0 and self[-1]["role"] == "user": + if overwrite: + logger.warning( + f'User input added while unprocessed input was existing: "{self[-1]["content"]}" was overwritten ' + f'with: "{text}".' + ) + self[-1]["content"] = text + else: + logger.warning( + f'User input added while unprocessed input was existing: "{self[-1]["content"]}" new input ' + f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' + ) + else: + self.messages.append({"role": "user", "content": text}) + + def append_response(self, response: str): + """ + This is a legacy method. We recommend just using `add_message` with an appropriate role instead. + """ + self.messages.append({"role": "assistant", "content": response}) + + def mark_processed(self): + """ + This is a legacy method, as the Conversation no longer distinguishes between processed and unprocessed user + input. We set a counter here to keep behaviour mostly backward-compatible, but in general you should just read + the messages directly when writing new code. + """ + self._num_processed_user_inputs = len(self._user_messages) + + def __iter__(self): + for message in self.messages: + yield message + + def __getitem__(self, item): + return self.messages[item] + + def __setitem__(self, key, value): + self.messages[key] = value + + def __len__(self): + return len(self.messages) + + def __repr__(self): + """ + Generates a string representation of the conversation. + + Returns: + `str`: + + Example: + Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user: Going to the movies tonight - any suggestions? + bot: The Big Lebowski + """ + output = f"Conversation id: {self.uuid}\n" + for message in self.messages: + output += f"{message['role']}: {message['content']}\n" + return output + + def iter_texts(self): + # This is a legacy method for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + for message in self.messages: + yield message["role"] == "user", message["content"] + + @property + def _user_messages(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + return [message["content"] for message in self.messages if message["role"] == "user"] + + @property + def past_user_inputs(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. The modern class does not care about which messages are "processed" + # or not. + if not self._user_messages: + return [] + # In the past, the most recent user message had to be mark_processed() before being included + # in past_user_messages. The class essentially had a single-message buffer, representing messages that + # had not yet been replied to. This is no longer the case, but we mimic the behaviour in this property + # for backward compatibility. + if self.messages[-1]["role"] != "user" or self._num_processed_user_inputs == len(self._user_messages): + return self._user_messages + + return self._user_messages[:-1] + + @property + def generated_responses(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + return [message["content"] for message in self.messages if message["role"] == "assistant"] + + @property + def new_user_input(self): + # This is a legacy property for backwards compatibility. It is recommended to just directly access + # conversation.messages instead. + return self._user_messages[-1] + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True), + r""" + min_length_for_response (`int`, *optional*, defaults to 32): + The minimum length (in number of tokens) for a response.""", +) +class ConversationalPipeline(Pipeline): + """ + Multi-turn conversational pipeline. + + Example: + + ```python + >>> from transformers import pipeline, Conversation + # Any model with a chat template can be used in a ConversationalPipeline. + + >>> chatbot = pipeline(model="facebook/blenderbot-400M-distill") + >>> # Conversation objects initialized with a string will treat it as a user message + >>> conversation = Conversation("I'm looking for a movie - what's your favourite one?") + >>> conversation = chatbot(conversation) + >>> conversation.messages[-1]["content"] + "I don't really have a favorite movie, but I do like action movies. What about you?" + + >>> conversation.add_message({"role": "user", "content": "That's interesting, why do you like action movies?"}) + >>> conversation = chatbot(conversation) + >>> conversation.messages[-1]["content"] + " I think it's just because they're so fast-paced and action-fantastic." + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"conversational"`. + + This pipeline can be used with any model that has a [chat + template](https://huggingface.co/docs/transformers/chat_templating) set. + """ + + def __init__(self, *args, **kwargs): + warnings.warn( + "`ConversationalPipeline` is now deprecated, and the functionality has been moved to the standard `text-generation` pipeline, which now accepts lists of message dicts as well as strings. This class will be removed in v4.42.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + if self.tokenizer.pad_token_id is None: + self.tokenizer.pad_token = self.tokenizer.eos_token + + def _sanitize_parameters(self, min_length_for_response=None, clean_up_tokenization_spaces=None, **generate_kwargs): + preprocess_params = {} + forward_params = {} + postprocess_params = {} + + if min_length_for_response is not None: + preprocess_params["min_length_for_response"] = min_length_for_response + + if "max_length" in generate_kwargs: + forward_params["max_length"] = generate_kwargs["max_length"] + # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) + if clean_up_tokenization_spaces is not None: + postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + + if generate_kwargs: + forward_params.update(generate_kwargs) + return preprocess_params, forward_params, postprocess_params + + def __call__(self, conversations: Union[List[Dict], Conversation, List[Conversation]], num_workers=0, **kwargs): + r""" + Generate responses for the conversation(s) given as inputs. + + Args: + conversations (a [`Conversation`] or a list of [`Conversation`]): + Conversation to generate responses for. Inputs can also be passed as a list of dictionaries with `role` + and `content` keys - in this case, they will be converted to `Conversation` objects automatically. + Multiple conversations in either format may be passed as a list. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): + Whether or not to clean up the potential extra spaces in the text output. + generate_kwargs: + Additional keyword arguments to pass along to the generate method of the model (see the generate method + corresponding to your framework [here](./model#generative-models)). + + Returns: + [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those + containing a new user input. + """ + # XXX: num_workers==0 is required to be backward compatible + # Otherwise the threads will require a Conversation copy. + # This will definitely hinder performance on GPU, but has to be opted + # in because of this BC change. + if isinstance(conversations, list) and isinstance(conversations[0], dict): + conversations = Conversation(conversations) + elif isinstance(conversations, list) and isinstance(conversations[0], list): + conversations = [Conversation(conv) for conv in conversations] + outputs = super().__call__(conversations, num_workers=num_workers, **kwargs) + if isinstance(outputs, list) and len(outputs) == 1: + return outputs[0] + return outputs + + def preprocess(self, conversation: Conversation, min_length_for_response=32) -> Dict[str, Any]: + input_ids = self.tokenizer.apply_chat_template(conversation, add_generation_prompt=True) + + if self.framework == "pt": + input_ids = torch.LongTensor([input_ids]) + elif self.framework == "tf": + input_ids = tf.constant([input_ids]) + return {"input_ids": input_ids, "conversation": conversation} + + def _forward(self, model_inputs, **generate_kwargs): + n = model_inputs["input_ids"].shape[1] + conversation = model_inputs.pop("conversation") + if "max_length" not in generate_kwargs and "max_new_tokens" not in generate_kwargs: + generate_kwargs["max_new_tokens"] = 256 + output_ids = self.model.generate(**model_inputs, **generate_kwargs) + if self.model.config.is_encoder_decoder: + start_position = 1 + else: + start_position = n + return {"output_ids": output_ids[:, start_position:], "conversation": conversation} + + def postprocess(self, model_outputs, clean_up_tokenization_spaces=True): + output_ids = model_outputs["output_ids"] + answer = self.tokenizer.decode( + output_ids[0], + skip_special_tokens=True, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + ) + conversation = model_outputs["conversation"] + conversation.add_message({"role": "assistant", "content": answer}) + return conversation diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/depth_estimation.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/depth_estimation.py new file mode 100644 index 0000000000000000000000000000000000000000..c6431a499717a43f5e4974bfd1104e31ef9498ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/depth_estimation.py @@ -0,0 +1,111 @@ +from typing import List, Union + +import numpy as np + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class DepthEstimationPipeline(Pipeline): + """ + Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image. + + Example: + + ```python + >>> from transformers import pipeline + + >>> depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf") + >>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") + >>> # This is a tensor with the values being the depth expressed in meters for each pixel + >>> output["predicted_depth"].shape + torch.Size([1, 384, 384]) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + + This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"depth-estimation"`. + + See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "vision") + self.check_model_type(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES) + + def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): + """ + Predict the depth(s) of the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images, which must then be passed as a string. + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL + images. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A dictionary or a list of dictionaries containing result. If the input is a single image, will return a + dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to + the images. + + The dictionaries contain the following keys: + + - **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`. + - **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`. + """ + return super().__call__(images, **kwargs) + + def _sanitize_parameters(self, timeout=None, **kwargs): + preprocess_params = {} + if timeout is not None: + preprocess_params["timeout"] = timeout + return preprocess_params, {}, {} + + def preprocess(self, image, timeout=None): + image = load_image(image, timeout) + self.image_size = image.size + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs): + predicted_depth = model_outputs.predicted_depth + prediction = torch.nn.functional.interpolate( + predicted_depth.unsqueeze(1), size=self.image_size[::-1], mode="bicubic", align_corners=False + ) + output = prediction.squeeze().cpu().numpy() + formatted = (output * 255 / np.max(output)).astype("uint8") + depth = Image.fromarray(formatted) + output_dict = {} + output_dict["predicted_depth"] = predicted_depth + output_dict["depth"] = depth + return output_dict diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/document_question_answering.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/document_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..64714390b04f1d7cbc94af9aa7f339a6fa19b373 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/document_question_answering.py @@ -0,0 +1,502 @@ +# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import List, Optional, Tuple, Union + +import numpy as np + +from ..utils import ( + ExplicitEnum, + add_end_docstrings, + is_pytesseract_available, + is_torch_available, + is_vision_available, + logging, +) +from .base import ChunkPipeline, build_pipeline_init_args +from .question_answering import select_starts_ends + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES + +TESSERACT_LOADED = False +if is_pytesseract_available(): + TESSERACT_LOADED = True + import pytesseract + +logger = logging.get_logger(__name__) + + +# normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. +# However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an +# unnecessary dependency. +def normalize_box(box, width, height): + return [ + int(1000 * (box[0] / width)), + int(1000 * (box[1] / height)), + int(1000 * (box[2] / width)), + int(1000 * (box[3] / height)), + ] + + +def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): + """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" + # apply OCR + data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) + words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] + + # filter empty words and corresponding coordinates + irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] + words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] + left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] + top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] + width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] + height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] + + # turn coordinates into (left, top, left+width, top+height) format + actual_boxes = [] + for x, y, w, h in zip(left, top, width, height): + actual_box = [x, y, x + w, y + h] + actual_boxes.append(actual_box) + + image_width, image_height = image.size + + # finally, normalize the bounding boxes + normalized_boxes = [] + for box in actual_boxes: + normalized_boxes.append(normalize_box(box, image_width, image_height)) + + if len(words) != len(normalized_boxes): + raise ValueError("Not as many words as there are bounding boxes") + + return words, normalized_boxes + + +class ModelType(ExplicitEnum): + LayoutLM = "layoutlm" + LayoutLMv2andv3 = "layoutlmv2andv3" + VisionEncoderDecoder = "vision_encoder_decoder" + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True)) +class DocumentQuestionAnsweringPipeline(ChunkPipeline): + # TODO: Update task_summary docs to include an example with document QA and then update the first sentence + """ + Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are + similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd + words/boxes) as input instead of text context. + + Example: + + ```python + >>> from transformers import pipeline + + >>> document_qa = pipeline(model="impira/layoutlm-document-qa") + >>> document_qa( + ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", + ... question="What is the invoice number?", + ... ) + [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This document question answering pipeline can currently be loaded from [`pipeline`] using the following task + identifier: `"document-question-answering"`. + + The models that this pipeline can use are models that have been fine-tuned on a document question answering task. + See the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"): + raise ValueError( + "`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer " + f"(`{self.tokenizer.__class__.__name__}`) is provided." + ) + + if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": + self.model_type = ModelType.VisionEncoderDecoder + if self.model.config.encoder.model_type != "donut-swin": + raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") + else: + self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES) + if self.model.config.__class__.__name__ == "LayoutLMConfig": + self.model_type = ModelType.LayoutLM + else: + self.model_type = ModelType.LayoutLMv2andv3 + + def _sanitize_parameters( + self, + padding=None, + doc_stride=None, + max_question_len=None, + lang: Optional[str] = None, + tesseract_config: Optional[str] = None, + max_answer_len=None, + max_seq_len=None, + top_k=None, + handle_impossible_answer=None, + timeout=None, + **kwargs, + ): + preprocess_params, postprocess_params = {}, {} + if padding is not None: + preprocess_params["padding"] = padding + if doc_stride is not None: + preprocess_params["doc_stride"] = doc_stride + if max_question_len is not None: + preprocess_params["max_question_len"] = max_question_len + if max_seq_len is not None: + preprocess_params["max_seq_len"] = max_seq_len + if lang is not None: + preprocess_params["lang"] = lang + if tesseract_config is not None: + preprocess_params["tesseract_config"] = tesseract_config + if timeout is not None: + preprocess_params["timeout"] = timeout + + if top_k is not None: + if top_k < 1: + raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") + postprocess_params["top_k"] = top_k + if max_answer_len is not None: + if max_answer_len < 1: + raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") + postprocess_params["max_answer_len"] = max_answer_len + if handle_impossible_answer is not None: + postprocess_params["handle_impossible_answer"] = handle_impossible_answer + + return preprocess_params, {}, postprocess_params + + def __call__( + self, + image: Union["Image.Image", str], + question: Optional[str] = None, + word_boxes: Tuple[str, List[float]] = None, + **kwargs, + ): + """ + Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an + optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not + provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for + LayoutLM-like models which require them as input. For Donut, no OCR is run. + + You can invoke the pipeline several ways: + + - `pipeline(image=image, question=question)` + - `pipeline(image=image, question=question, word_boxes=word_boxes)` + - `pipeline([{"image": image, "question": question}])` + - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` + + Args: + image (`str` or `PIL.Image`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. If given a single image, it can be + broadcasted to multiple questions. + question (`str`): + A question to ask of the document. + word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): + A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the + pipeline will use these words and boxes instead of running OCR on the image to derive them for models + that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the + pipeline without having to re-run it each time. + top_k (`int`, *optional*, defaults to 1): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + top_k answers if there are not enough options available within the context. + doc_stride (`int`, *optional*, defaults to 128): + If the words in the document are too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + max_answer_len (`int`, *optional*, defaults to 15): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_seq_len (`int`, *optional*, defaults to 384): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. + max_question_len (`int`, *optional*, defaults to 64): + The maximum length of the question after tokenization. It will be truncated if needed. + handle_impossible_answer (`bool`, *optional*, defaults to `False`): + Whether or not we accept impossible as an answer. + lang (`str`, *optional*): + Language to use while running OCR. Defaults to english. + tesseract_config (`str`, *optional*): + Additional flags to pass to tesseract while running OCR. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: + + - **score** (`float`) -- The probability associated to the answer. + - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided + `word_boxes`). + - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided + `word_boxes`). + - **answer** (`str`) -- The answer to the question. + - **words** (`list[int]`) -- The index of each word/box pair that is in the answer + """ + if isinstance(question, str): + inputs = {"question": question, "image": image} + if word_boxes is not None: + inputs["word_boxes"] = word_boxes + else: + inputs = image + return super().__call__(inputs, **kwargs) + + def preprocess( + self, + input, + padding="do_not_pad", + doc_stride=None, + max_seq_len=None, + word_boxes: Tuple[str, List[float]] = None, + lang=None, + tesseract_config="", + timeout=None, + ): + # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR + # to support documents with enough tokens that overflow the model's window + if max_seq_len is None: + max_seq_len = self.tokenizer.model_max_length + + if doc_stride is None: + doc_stride = min(max_seq_len // 2, 256) + + image = None + image_features = {} + if input.get("image", None) is not None: + image = load_image(input["image"], timeout=timeout) + if self.image_processor is not None: + image_features.update(self.image_processor(images=image, return_tensors=self.framework)) + elif self.feature_extractor is not None: + image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) + elif self.model_type == ModelType.VisionEncoderDecoder: + raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") + + words, boxes = None, None + if not self.model_type == ModelType.VisionEncoderDecoder: + if "word_boxes" in input: + words = [x[0] for x in input["word_boxes"]] + boxes = [x[1] for x in input["word_boxes"]] + elif "words" in image_features and "boxes" in image_features: + words = image_features.pop("words")[0] + boxes = image_features.pop("boxes")[0] + elif image is not None: + if not TESSERACT_LOADED: + raise ValueError( + "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," + " but pytesseract is not available" + ) + if TESSERACT_LOADED: + words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) + else: + raise ValueError( + "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" + " run OCR to derive words and boxes" + ) + + if self.tokenizer.padding_side != "right": + raise ValueError( + "Document question answering only supports tokenizers whose padding side is 'right', not" + f" {self.tokenizer.padding_side}" + ) + + if self.model_type == ModelType.VisionEncoderDecoder: + task_prompt = f'{input["question"]}' + # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py + encoding = { + "inputs": image_features["pixel_values"], + "decoder_input_ids": self.tokenizer( + task_prompt, add_special_tokens=False, return_tensors=self.framework + ).input_ids, + "return_dict_in_generate": True, + } + yield { + **encoding, + "p_mask": None, + "word_ids": None, + "words": None, + "output_attentions": True, + "is_last": True, + } + else: + tokenizer_kwargs = {} + if self.model_type == ModelType.LayoutLM: + tokenizer_kwargs["text"] = input["question"].split() + tokenizer_kwargs["text_pair"] = words + tokenizer_kwargs["is_split_into_words"] = True + else: + tokenizer_kwargs["text"] = [input["question"]] + tokenizer_kwargs["text_pair"] = [words] + tokenizer_kwargs["boxes"] = [boxes] + + encoding = self.tokenizer( + padding=padding, + max_length=max_seq_len, + stride=doc_stride, + return_token_type_ids=True, + truncation="only_second", + return_overflowing_tokens=True, + **tokenizer_kwargs, + ) + # TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs + # FIXME: ydshieh and/or Narsil + encoding.pop("overflow_to_sample_mapping", None) # We do not use this + + num_spans = len(encoding["input_ids"]) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) + # This logic mirrors the logic in the question_answering pipeline + p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] + for span_idx in range(num_spans): + if self.framework == "pt": + span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()} + if "pixel_values" in image_features: + span_encoding["image"] = image_features["pixel_values"] + else: + raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + + input_ids_span_idx = encoding["input_ids"][span_idx] + # keep the cls_token unmasked (some models use it to indicate unanswerable questions) + if self.tokenizer.cls_token_id is not None: + cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] + for cls_index in cls_indices: + p_mask[span_idx][cls_index] = 0 + + # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] + # for SEP tokens, and the word's bounding box for words in the original document. + if "boxes" not in tokenizer_kwargs: + bbox = [] + for input_id, sequence_id, word_id in zip( + encoding.input_ids[span_idx], + encoding.sequence_ids(span_idx), + encoding.word_ids(span_idx), + ): + if sequence_id == 1: + bbox.append(boxes[word_id]) + elif input_id == self.tokenizer.sep_token_id: + bbox.append([1000] * 4) + else: + bbox.append([0] * 4) + + if self.framework == "pt": + span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0) + elif self.framework == "tf": + raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + yield { + **span_encoding, + "p_mask": p_mask[span_idx], + "word_ids": encoding.word_ids(span_idx), + "words": words, + "is_last": span_idx == num_spans - 1, + } + + def _forward(self, model_inputs, **generate_kwargs): + p_mask = model_inputs.pop("p_mask", None) + word_ids = model_inputs.pop("word_ids", None) + words = model_inputs.pop("words", None) + is_last = model_inputs.pop("is_last", False) + + if self.model_type == ModelType.VisionEncoderDecoder: + model_outputs = self.model.generate(**model_inputs, **generate_kwargs) + else: + model_outputs = self.model(**model_inputs) + + model_outputs = dict(model_outputs.items()) + model_outputs["p_mask"] = p_mask + model_outputs["word_ids"] = word_ids + model_outputs["words"] = words + model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) + model_outputs["is_last"] = is_last + return model_outputs + + def postprocess(self, model_outputs, top_k=1, **kwargs): + if self.model_type == ModelType.VisionEncoderDecoder: + answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs] + else: + answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) + + answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] + return answers + + def postprocess_encoder_decoder_single(self, model_outputs, **kwargs): + sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0] + + # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer + # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). + sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + ret = { + "answer": None, + } + + answer = re.search(r"(.*)", sequence) + if answer is not None: + ret["answer"] = answer.group(1).strip() + return ret + + def postprocess_extractive_qa( + self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs + ): + min_null_score = 1000000 # large and positive + answers = [] + for output in model_outputs: + words = output["words"] + + starts, ends, scores, min_null_score = select_starts_ends( + start=output["start_logits"], + end=output["end_logits"], + p_mask=output["p_mask"], + attention_mask=output["attention_mask"].numpy() + if output.get("attention_mask", None) is not None + else None, + min_null_score=min_null_score, + top_k=top_k, + handle_impossible_answer=handle_impossible_answer, + max_answer_len=max_answer_len, + ) + word_ids = output["word_ids"] + for start, end, score in zip(starts, ends, scores): + word_start, word_end = word_ids[start], word_ids[end] + if word_start is not None and word_end is not None: + answers.append( + { + "score": float(score), + "answer": " ".join(words[word_start : word_end + 1]), + "start": word_start, + "end": word_end, + } + ) + + if handle_impossible_answer: + answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) + + return answers diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/feature_extraction.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/feature_extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..7d67a615ac02d29625f51242e1f747b39e6118bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/feature_extraction.py @@ -0,0 +1,86 @@ +from typing import Dict + +from ..utils import add_end_docstrings +from .base import GenericTensor, Pipeline, build_pipeline_init_args + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False), + r""" + tokenize_kwargs (`dict`, *optional*): + Additional dictionary of keyword arguments passed along to the tokenizer. + return_tensors (`bool`, *optional*): + If `True`, returns a tensor according to the specified framework, otherwise returns a list.""", +) +class FeatureExtractionPipeline(Pipeline): + """ + Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base + transformer, which can be used as features in downstream tasks. + + Example: + + ```python + >>> from transformers import pipeline + + >>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction") + >>> result = extractor("This is a simple test.", return_tensors=True) + >>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string. + torch.Size([1, 8, 768]) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: + `"feature-extraction"`. + + All models may be used for this pipeline. See a list of all models, including community-contributed models on + [huggingface.co/models](https://huggingface.co/models). + """ + + def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs): + if tokenize_kwargs is None: + tokenize_kwargs = {} + + if truncation is not None: + if "truncation" in tokenize_kwargs: + raise ValueError( + "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" + ) + tokenize_kwargs["truncation"] = truncation + + preprocess_params = tokenize_kwargs + + postprocess_params = {} + if return_tensors is not None: + postprocess_params["return_tensors"] = return_tensors + + return preprocess_params, {}, postprocess_params + + def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]: + model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, return_tensors=False): + # [0] is the first available tensor, logits or last_hidden_state. + if return_tensors: + return model_outputs[0] + if self.framework == "pt": + return model_outputs[0].tolist() + elif self.framework == "tf": + return model_outputs[0].numpy().tolist() + + def __call__(self, *args, **kwargs): + """ + Extract the features of the input(s). + + Args: + args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of. + + Return: + A nested list of `float`: The features computed by the model. + """ + return super().__call__(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..a6f240822322f75012ffb592e89f0e3f59189008 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py @@ -0,0 +1,273 @@ +from typing import Dict + +import numpy as np + +from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging +from .base import GenericTensor, Pipeline, PipelineException, build_pipeline_init_args + + +if is_tf_available(): + import tensorflow as tf + + from ..tf_utils import stable_softmax + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True), + r""" + top_k (`int`, defaults to 5): + The number of predictions to return. + targets (`str` or `List[str]`, *optional*): + When passed, the model will limit the scores to the passed targets instead of looking up in the whole + vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting + token will be used (with a warning, and that might be slower). + tokenizer_kwargs (`dict`, *optional*): + Additional dictionary of keyword arguments passed along to the tokenizer.""", +) +class FillMaskPipeline(Pipeline): + """ + Masked language modeling prediction pipeline using any `ModelWithLMHead`. See the [masked language modeling + examples](../task_summary#masked-language-modeling) for more information. + + Example: + + ```python + >>> from transformers import pipeline + + >>> fill_masker = pipeline(model="google-bert/bert-base-uncased") + >>> fill_masker("This is a simple [MASK].") + [{'score': 0.042, 'token': 3291, 'token_str': 'problem', 'sequence': 'this is a simple problem.'}, {'score': 0.031, 'token': 3160, 'token_str': 'question', 'sequence': 'this is a simple question.'}, {'score': 0.03, 'token': 8522, 'token_str': 'equation', 'sequence': 'this is a simple equation.'}, {'score': 0.027, 'token': 2028, 'token_str': 'one', 'sequence': 'this is a simple one.'}, {'score': 0.024, 'token': 3627, 'token_str': 'rule', 'sequence': 'this is a simple rule.'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This mask filling pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"fill-mask"`. + + The models that this pipeline can use are models that have been trained with a masked language modeling objective, + which includes the bi-directional models in the library. See the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=fill-mask). + + + + This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple + masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect + joint probabilities (See [discussion](https://github.com/huggingface/transformers/pull/10222)). + + + + + + This pipeline now supports tokenizer_kwargs. For example try: + + ```python + >>> from transformers import pipeline + + >>> fill_masker = pipeline(model="google-bert/bert-base-uncased") + >>> tokenizer_kwargs = {"truncation": True} + >>> fill_masker( + ... "This is a simple [MASK]. " + "...with a large amount of repeated text appended. " * 100, + ... tokenizer_kwargs=tokenizer_kwargs, + ... ) + ``` + + + + + + """ + + def get_masked_index(self, input_ids: GenericTensor) -> np.ndarray: + if self.framework == "tf": + masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy() + elif self.framework == "pt": + masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False) + else: + raise ValueError("Unsupported framework") + return masked_index + + def _ensure_exactly_one_mask_token(self, input_ids: GenericTensor) -> np.ndarray: + masked_index = self.get_masked_index(input_ids) + numel = np.prod(masked_index.shape) + if numel < 1: + raise PipelineException( + "fill-mask", + self.model.base_model_prefix, + f"No mask_token ({self.tokenizer.mask_token}) found on the input", + ) + + def ensure_exactly_one_mask_token(self, model_inputs: GenericTensor): + if isinstance(model_inputs, list): + for model_input in model_inputs: + self._ensure_exactly_one_mask_token(model_input["input_ids"][0]) + else: + for input_ids in model_inputs["input_ids"]: + self._ensure_exactly_one_mask_token(input_ids) + + def preprocess( + self, inputs, return_tensors=None, tokenizer_kwargs=None, **preprocess_parameters + ) -> Dict[str, GenericTensor]: + if return_tensors is None: + return_tensors = self.framework + if tokenizer_kwargs is None: + tokenizer_kwargs = {} + + model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs) + self.ensure_exactly_one_mask_token(model_inputs) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + model_outputs["input_ids"] = model_inputs["input_ids"] + return model_outputs + + def postprocess(self, model_outputs, top_k=5, target_ids=None): + # Cap top_k if there are targets + if target_ids is not None and target_ids.shape[0] < top_k: + top_k = target_ids.shape[0] + input_ids = model_outputs["input_ids"][0] + outputs = model_outputs["logits"] + + if self.framework == "tf": + masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0] + + outputs = outputs.numpy() + + logits = outputs[0, masked_index, :] + probs = stable_softmax(logits, axis=-1) + if target_ids is not None: + probs = tf.gather_nd(tf.squeeze(probs, 0), target_ids.reshape(-1, 1)) + probs = tf.expand_dims(probs, 0) + + topk = tf.math.top_k(probs, k=top_k) + values, predictions = topk.values.numpy(), topk.indices.numpy() + else: + masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False).squeeze(-1) + # Fill mask pipeline supports only one ${mask_token} per sample + + logits = outputs[0, masked_index, :] + probs = logits.softmax(dim=-1) + if target_ids is not None: + probs = probs[..., target_ids] + + values, predictions = probs.topk(top_k) + + result = [] + single_mask = values.shape[0] == 1 + for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())): + row = [] + for v, p in zip(_values, _predictions): + # Copy is important since we're going to modify this array in place + tokens = input_ids.numpy().copy() + if target_ids is not None: + p = target_ids[p].tolist() + + tokens[masked_index[i]] = p + # Filter padding out: + tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)] + # Originally we skip special tokens to give readable output. + # For multi masks though, the other [MASK] would be removed otherwise + # making the output look odd, so we add them back + sequence = self.tokenizer.decode(tokens, skip_special_tokens=single_mask) + proposition = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence} + row.append(proposition) + result.append(row) + if single_mask: + return result[0] + return result + + def get_target_ids(self, targets, top_k=None): + if isinstance(targets, str): + targets = [targets] + try: + vocab = self.tokenizer.get_vocab() + except Exception: + vocab = {} + target_ids = [] + for target in targets: + id_ = vocab.get(target, None) + if id_ is None: + input_ids = self.tokenizer( + target, + add_special_tokens=False, + return_attention_mask=False, + return_token_type_ids=False, + max_length=1, + truncation=True, + )["input_ids"] + if len(input_ids) == 0: + logger.warning( + f"The specified target token `{target}` does not exist in the model vocabulary. " + "We cannot replace it with anything meaningful, ignoring it" + ) + continue + id_ = input_ids[0] + # XXX: If users encounter this pass + # it becomes pretty slow, so let's make sure + # The warning enables them to fix the input to + # get faster performance. + logger.warning( + f"The specified target token `{target}` does not exist in the model vocabulary. " + f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`." + ) + target_ids.append(id_) + target_ids = list(set(target_ids)) + if len(target_ids) == 0: + raise ValueError("At least one target must be provided when passed.") + target_ids = np.array(target_ids) + return target_ids + + def _sanitize_parameters(self, top_k=None, targets=None, tokenizer_kwargs=None): + preprocess_params = {} + + if tokenizer_kwargs is not None: + preprocess_params["tokenizer_kwargs"] = tokenizer_kwargs + + postprocess_params = {} + + if targets is not None: + target_ids = self.get_target_ids(targets, top_k) + postprocess_params["target_ids"] = target_ids + + if top_k is not None: + postprocess_params["top_k"] = top_k + + if self.tokenizer.mask_token_id is None: + raise PipelineException( + "fill-mask", self.model.base_model_prefix, "The tokenizer does not define a `mask_token`." + ) + return preprocess_params, {}, postprocess_params + + def __call__(self, inputs, *args, **kwargs): + """ + Fill the masked token in the text(s) given as inputs. + + Args: + args (`str` or `List[str]`): + One or several texts (or one list of prompts) with masked tokens. + targets (`str` or `List[str]`, *optional*): + When passed, the model will limit the scores to the passed targets instead of looking up in the whole + vocab. If the provided targets are not in the model vocab, they will be tokenized and the first + resulting token will be used (with a warning, and that might be slower). + top_k (`int`, *optional*): + When passed, overrides the number of predictions to return. + + Return: + A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys: + + - **sequence** (`str`) -- The corresponding input with the mask token prediction. + - **score** (`float`) -- The corresponding probability. + - **token** (`int`) -- The predicted token id (to replace the masked one). + - **token_str** (`str`) -- The predicted token (to replace the masked one). + """ + outputs = super().__call__(inputs, **kwargs) + if isinstance(inputs, list) and len(inputs) == 1: + return outputs[0] + return outputs diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..62793c252a6ba19f053d8d099dbac9bd60aa09b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_classification.py @@ -0,0 +1,201 @@ +from typing import List, Union + +import numpy as np + +from ..utils import ( + ExplicitEnum, + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +# Copied from transformers.pipelines.text_classification.sigmoid +def sigmoid(_outputs): + return 1.0 / (1.0 + np.exp(-_outputs)) + + +# Copied from transformers.pipelines.text_classification.softmax +def softmax(_outputs): + maxes = np.max(_outputs, axis=-1, keepdims=True) + shifted_exp = np.exp(_outputs - maxes) + return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) + + +# Copied from transformers.pipelines.text_classification.ClassificationFunction +class ClassificationFunction(ExplicitEnum): + SIGMOID = "sigmoid" + SOFTMAX = "softmax" + NONE = "none" + + +@add_end_docstrings( + build_pipeline_init_args(has_image_processor=True), + r""" + function_to_apply (`str`, *optional*, defaults to `"default"`): + The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: + + - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model + has several labels, will apply the softmax function on the output. + - `"sigmoid"`: Applies the sigmoid function on the output. + - `"softmax"`: Applies the softmax function on the output. + - `"none"`: Does not apply any function on the output.""", +) +class ImageClassificationPipeline(Pipeline): + """ + Image classification pipeline using any `AutoModelForImageClassification`. This pipeline predicts the class of an + image. + + Example: + + ```python + >>> from transformers import pipeline + + >>> classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k") + >>> classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") + [{'score': 0.442, 'label': 'macaw'}, {'score': 0.088, 'label': 'popinjay'}, {'score': 0.075, 'label': 'parrot'}, {'score': 0.073, 'label': 'parodist, lampooner'}, {'score': 0.046, 'label': 'poll, poll_parrot'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"image-classification"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=image-classification). + """ + + function_to_apply: ClassificationFunction = ClassificationFunction.NONE + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "vision") + self.check_model_type( + TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES + ) + + def _sanitize_parameters(self, top_k=None, function_to_apply=None, timeout=None): + preprocess_params = {} + if timeout is not None: + preprocess_params["timeout"] = timeout + postprocess_params = {} + if top_k is not None: + postprocess_params["top_k"] = top_k + if isinstance(function_to_apply, str): + function_to_apply = ClassificationFunction(function_to_apply.lower()) + if function_to_apply is not None: + postprocess_params["function_to_apply"] = function_to_apply + return preprocess_params, {}, postprocess_params + + def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): + """ + Assign labels to the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images, which must then be passed as a string. + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL + images. + function_to_apply (`str`, *optional*, defaults to `"default"`): + The function to apply to the model outputs in order to retrieve the scores. Accepts four different + values: + + If this argument is not specified, then it will apply the following functions according to the number + of labels: + + - If the model has a single label, will apply the sigmoid function on the output. + - If the model has several labels, will apply the softmax function on the output. + + Possible values are: + + - `"sigmoid"`: Applies the sigmoid function on the output. + - `"softmax"`: Applies the softmax function on the output. + - `"none"`: Does not apply any function on the output. + top_k (`int`, *optional*, defaults to 5): + The number of top labels that will be returned by the pipeline. If the provided number is higher than + the number of labels available in the model configuration, it will default to the number of labels. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A dictionary or a list of dictionaries containing result. If the input is a single image, will return a + dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to + the images. + + The dictionaries contain the following keys: + + - **label** (`str`) -- The label identified by the model. + - **score** (`int`) -- The score attributed by the model for that label. + """ + return super().__call__(images, **kwargs) + + def preprocess(self, image, timeout=None): + image = load_image(image, timeout=timeout) + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, function_to_apply=None, top_k=5): + if function_to_apply is None: + if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: + function_to_apply = ClassificationFunction.SIGMOID + elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: + function_to_apply = ClassificationFunction.SOFTMAX + elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: + function_to_apply = self.model.config.function_to_apply + else: + function_to_apply = ClassificationFunction.NONE + + if top_k > self.model.config.num_labels: + top_k = self.model.config.num_labels + + outputs = model_outputs["logits"][0] + outputs = outputs.numpy() + + if function_to_apply == ClassificationFunction.SIGMOID: + scores = sigmoid(outputs) + elif function_to_apply == ClassificationFunction.SOFTMAX: + scores = softmax(outputs) + elif function_to_apply == ClassificationFunction.NONE: + scores = outputs + else: + raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") + + dict_scores = [ + {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) + ] + dict_scores.sort(key=lambda x: x["score"], reverse=True) + if top_k is not None: + dict_scores = dict_scores[:top_k] + + return dict_scores diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_feature_extraction.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_feature_extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..3a361deabd797d98f87293acd236a05801ea0458 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_feature_extraction.py @@ -0,0 +1,110 @@ +from typing import Dict + +from ..utils import add_end_docstrings, is_vision_available +from .base import GenericTensor, Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from ..image_utils import load_image + + +@add_end_docstrings( + build_pipeline_init_args(has_image_processor=True), + """ + image_processor_kwargs (`dict`, *optional*): + Additional dictionary of keyword arguments passed along to the image processor e.g. + {"size": {"height": 100, "width": 100}} + pool (`bool`, *optional*, defaults to `False`): + Whether or not to return the pooled output. If `False`, the model will return the raw hidden states. + """, +) +class ImageFeatureExtractionPipeline(Pipeline): + """ + Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base + transformer, which can be used as features in downstream tasks. + + Example: + + ```python + >>> from transformers import pipeline + + >>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction") + >>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True) + >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image. + torch.Size([1, 197, 768]) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: + `"image-feature-extraction"`. + + All vision models may be used for this pipeline. See a list of all models, including community-contributed models on + [huggingface.co/models](https://huggingface.co/models). + """ + + def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, pool=None, **kwargs): + preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs + + postprocess_params = {} + if pool is not None: + postprocess_params["pool"] = pool + if return_tensors is not None: + postprocess_params["return_tensors"] = return_tensors + + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + + return preprocess_params, {}, postprocess_params + + def preprocess(self, image, timeout=None, **image_processor_kwargs) -> Dict[str, GenericTensor]: + image = load_image(image, timeout=timeout) + model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, pool=None, return_tensors=False): + pool = pool if pool is not None else False + + if pool: + if "pooler_output" not in model_outputs: + raise ValueError( + "No pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option." + ) + outputs = model_outputs["pooler_output"] + else: + # [0] is the first available tensor, logits or last_hidden_state. + outputs = model_outputs[0] + + if return_tensors: + return outputs + if self.framework == "pt": + return outputs.tolist() + elif self.framework == "tf": + return outputs.numpy().tolist() + + def __call__(self, *args, **kwargs): + """ + Extract the features of the input(s). + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images, which must then be passed as a string. + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL + images. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and + the call may block forever. + Return: + A nested list of `float`: The features computed by the model. + """ + return super().__call__(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..23fbd4fb79b190cd571f8707f02186d39cccb393 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.py @@ -0,0 +1,211 @@ +from typing import Any, Dict, List, Union + +import numpy as np + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + from ..models.auto.modeling_auto import ( + MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, + MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES, + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, + MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES, + ) + + +logger = logging.get_logger(__name__) + + +Prediction = Dict[str, Any] +Predictions = List[Prediction] + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ImageSegmentationPipeline(Pipeline): + """ + Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and + their classes. + + Example: + + ```python + >>> from transformers import pipeline + + >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic") + >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") + >>> len(segments) + 2 + + >>> segments[0]["label"] + 'bird' + + >>> segments[1]["label"] + 'bird' + + >>> type(segments[0]["mask"]) # This is a black and white mask showing where is the bird on the original image. + + + >>> segments[0]["mask"].size + (768, 512) + ``` + + + This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"image-segmentation"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if self.framework == "tf": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + requires_backends(self, "vision") + mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES.copy() + mapping.update(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES) + mapping.update(MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES) + mapping.update(MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES) + self.check_model_type(mapping) + + def _sanitize_parameters(self, **kwargs): + preprocess_kwargs = {} + postprocess_kwargs = {} + if "subtask" in kwargs: + postprocess_kwargs["subtask"] = kwargs["subtask"] + preprocess_kwargs["subtask"] = kwargs["subtask"] + if "threshold" in kwargs: + postprocess_kwargs["threshold"] = kwargs["threshold"] + if "mask_threshold" in kwargs: + postprocess_kwargs["mask_threshold"] = kwargs["mask_threshold"] + if "overlap_mask_area_threshold" in kwargs: + postprocess_kwargs["overlap_mask_area_threshold"] = kwargs["overlap_mask_area_threshold"] + if "timeout" in kwargs: + preprocess_kwargs["timeout"] = kwargs["timeout"] + + return preprocess_kwargs, {}, postprocess_kwargs + + def __call__(self, images, **kwargs) -> Union[Predictions, List[Prediction]]: + """ + Perform segmentation (detect masks & classes) in the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing an HTTP(S) link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the + same format: all as HTTP(S) links, all as local paths, or all as PIL images. + subtask (`str`, *optional*): + Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model + capabilities. If not set, the pipeline will attempt tp resolve in the following order: + `panoptic`, `instance`, `semantic`. + threshold (`float`, *optional*, defaults to 0.9): + Probability threshold to filter out predicted masks. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5): + Mask overlap threshold to eliminate small, disconnected segments. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a + list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries + corresponding to each image. + + The dictionaries contain the mask, label and score (where applicable) of each detected object and contains + the following keys: + + - **label** (`str`) -- The class label identified by the model. + - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of + the original image. Returns a mask filled with zeros if no object is found. + - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the + "object" described by the label and the mask. + """ + return super().__call__(images, **kwargs) + + def preprocess(self, image, subtask=None, timeout=None): + image = load_image(image, timeout=timeout) + target_size = [(image.height, image.width)] + if self.model.config.__class__.__name__ == "OneFormerConfig": + if subtask is None: + kwargs = {} + else: + kwargs = {"task_inputs": [subtask]} + inputs = self.image_processor(images=[image], return_tensors="pt", **kwargs) + inputs["task_inputs"] = self.tokenizer( + inputs["task_inputs"], + padding="max_length", + max_length=self.model.config.task_seq_len, + return_tensors=self.framework, + )["input_ids"] + else: + inputs = self.image_processor(images=[image], return_tensors="pt") + inputs["target_size"] = target_size + return inputs + + def _forward(self, model_inputs): + target_size = model_inputs.pop("target_size") + model_outputs = self.model(**model_inputs) + model_outputs["target_size"] = target_size + return model_outputs + + def postprocess( + self, model_outputs, subtask=None, threshold=0.9, mask_threshold=0.5, overlap_mask_area_threshold=0.5 + ): + fn = None + if subtask in {"panoptic", None} and hasattr(self.image_processor, "post_process_panoptic_segmentation"): + fn = self.image_processor.post_process_panoptic_segmentation + elif subtask in {"instance", None} and hasattr(self.image_processor, "post_process_instance_segmentation"): + fn = self.image_processor.post_process_instance_segmentation + + if fn is not None: + outputs = fn( + model_outputs, + threshold=threshold, + mask_threshold=mask_threshold, + overlap_mask_area_threshold=overlap_mask_area_threshold, + target_sizes=model_outputs["target_size"], + )[0] + + annotation = [] + segmentation = outputs["segmentation"] + + for segment in outputs["segments_info"]: + mask = (segmentation == segment["id"]) * 255 + mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L") + label = self.model.config.id2label[segment["label_id"]] + score = segment["score"] + annotation.append({"score": score, "label": label, "mask": mask}) + + elif subtask in {"semantic", None} and hasattr(self.image_processor, "post_process_semantic_segmentation"): + outputs = self.image_processor.post_process_semantic_segmentation( + model_outputs, target_sizes=model_outputs["target_size"] + )[0] + + annotation = [] + segmentation = outputs.numpy() + labels = np.unique(segmentation) + + for label in labels: + mask = (segmentation == label) * 255 + mask = Image.fromarray(mask.astype(np.uint8), mode="L") + label = self.model.config.id2label[label] + annotation.append({"score": None, "label": label, "mask": mask}) + else: + raise ValueError(f"Subtask {subtask} is not supported for model {type(self.model)}") + return annotation diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_to_image.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_to_image.py new file mode 100644 index 0000000000000000000000000000000000000000..8c34ee8dd3c80c87dd53c05596a12e2c6bf4c166 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_to_image.py @@ -0,0 +1,134 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Union + +import numpy as np + +from ..utils import ( + add_end_docstrings, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ImageToImagePipeline(Pipeline): + """ + Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous + image input. + + Example: + + ```python + >>> from PIL import Image + >>> import requests + + >>> from transformers import pipeline + + >>> upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64") + >>> img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) + >>> img = img.resize((64, 64)) + >>> upscaled_img = upscaler(img) + >>> img.size + (64, 64) + + >>> upscaled_img.size + (144, 144) + ``` + + This image to image pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"image-to-image"`. + + See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-to-image). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "vision") + self.check_model_type(MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + postprocess_params = {} + forward_params = {} + + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + if "head_mask" in kwargs: + forward_params["head_mask"] = kwargs["head_mask"] + + return preprocess_params, forward_params, postprocess_params + + def __call__( + self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs + ) -> Union["Image.Image", List["Image.Image"]]: + """ + Transform the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images, which must then be passed as a string. + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL + images. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and + the call may block forever. + + Return: + An image (Image.Image) or a list of images (List["Image.Image"]) containing result(s). If the input is a + single image, the return will be also a single image, if the input is a list of several images, it will + return a list of transformed images. + """ + return super().__call__(images, **kwargs) + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def preprocess(self, image, timeout=None): + image = load_image(image, timeout=timeout) + inputs = self.image_processor(images=[image], return_tensors="pt") + return inputs + + def postprocess(self, model_outputs): + images = [] + if "reconstruction" in model_outputs.keys(): + outputs = model_outputs.reconstruction + for output in outputs: + output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy() + output = np.moveaxis(output, source=0, destination=-1) + output = (output * 255.0).round().astype(np.uint8) # float32 to uint8 + images.append(Image.fromarray(output)) + + return images if len(images) > 1 else images[0] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9a3744d841a05da26b978b7432248fb8f4313e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/image_to_text.py @@ -0,0 +1,194 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Union + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True)) +class ImageToTextPipeline(Pipeline): + """ + Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image. + + Example: + + ```python + >>> from transformers import pipeline + + >>> captioner = pipeline(model="ydshieh/vit-gpt2-coco-en") + >>> captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") + [{'generated_text': 'two birds are standing next to each other '}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image to text pipeline can currently be loaded from pipeline() using the following task identifier: + "image-to-text". + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "vision") + self.check_model_type( + TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES + ) + + def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None, prompt=None, timeout=None): + forward_params = {} + preprocess_params = {} + + if prompt is not None: + preprocess_params["prompt"] = prompt + if timeout is not None: + preprocess_params["timeout"] = timeout + + if max_new_tokens is not None: + forward_params["max_new_tokens"] = max_new_tokens + if generate_kwargs is not None: + if max_new_tokens is not None and "max_new_tokens" in generate_kwargs: + raise ValueError( + "`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use" + " only 1 version" + ) + forward_params.update(generate_kwargs) + + return preprocess_params, forward_params, {} + + def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): + """ + Assign labels to the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a HTTP(s) link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. + + max_new_tokens (`int`, *optional*): + The amount of maximum tokens to generate. By default it will use `generate` default. + + generate_kwargs (`Dict`, *optional*): + Pass it to send all of these arguments directly to `generate` allowing full control of this function. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following key: + + - **generated_text** (`str`) -- The generated text. + """ + return super().__call__(images, **kwargs) + + def preprocess(self, image, prompt=None, timeout=None): + image = load_image(image, timeout=timeout) + + if prompt is not None: + if not isinstance(prompt, str): + raise ValueError( + f"Received an invalid text input, got - {type(prompt)} - but expected a single string. " + "Note also that one single text can be provided for conditional image to text generation." + ) + + model_type = self.model.config.model_type + + if model_type == "git": + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids + input_ids = [self.tokenizer.cls_token_id] + input_ids + input_ids = torch.tensor(input_ids).unsqueeze(0) + model_inputs.update({"input_ids": input_ids}) + + elif model_type == "pix2struct": + model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework) + + elif model_type != "vision-encoder-decoder": + # vision-encoder-decoder does not support conditional generation + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + text_inputs = self.tokenizer(prompt, return_tensors=self.framework) + model_inputs.update(text_inputs) + + else: + raise ValueError(f"Model type {model_type} does not support conditional text generation") + + else: + model_inputs = self.image_processor(images=image, return_tensors=self.framework) + + if self.model.config.model_type == "git" and prompt is None: + model_inputs["input_ids"] = None + + return model_inputs + + def _forward(self, model_inputs, **generate_kwargs): + # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the + # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. + if ( + "input_ids" in model_inputs + and isinstance(model_inputs["input_ids"], list) + and all(x is None for x in model_inputs["input_ids"]) + ): + model_inputs["input_ids"] = None + + # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` + # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas + # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` + # in the `_prepare_model_inputs` method. + inputs = model_inputs.pop(self.model.main_input_name) + model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs) + return model_outputs + + def postprocess(self, model_outputs): + records = [] + for output_ids in model_outputs: + record = { + "generated_text": self.tokenizer.decode( + output_ids, + skip_special_tokens=True, + ) + } + records.append(record) + return records diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/mask_generation.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/mask_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..68d407aff2d4e4853e6b7a5ff3d3c90ed075305a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/mask_generation.py @@ -0,0 +1,285 @@ +from collections import defaultdict +from typing import Optional + +from ..image_utils import load_image +from ..utils import ( + add_end_docstrings, + is_torch_available, + logging, + requires_backends, +) +from .base import ChunkPipeline, build_pipeline_init_args + + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings( + build_pipeline_init_args(has_image_processor=True), + r""" + points_per_batch (*optional*, int, default to 64): + Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU + memory. + output_bboxes_mask (`bool`, *optional*, default to `False`): + Whether or not to output the bounding box predictions. + output_rle_masks (`bool`, *optional*, default to `False`): + Whether or not to output the masks in `RLE` format""", +) +class MaskGenerationPipeline(ChunkPipeline): + """ + Automatic mask generation for images using `SamForMaskGeneration`. This pipeline predicts binary masks for an + image, given an image. It is a `ChunkPipeline` because you can seperate the points in a mini-batch in order to + avoid OOM issues. Use the `points_per_batch` argument to control the number of points that will be processed at the + same time. Default is `64`. + + The pipeline works in 3 steps: + 1. `preprocess`: A grid of 1024 points evenly separated is generated along with bounding boxes and point + labels. + For more details on how the points and bounding boxes are created, check the `_generate_crop_boxes` + function. The image is also preprocessed using the `image_processor`. This function `yields` a minibatch of + `points_per_batch`. + + 2. `forward`: feeds the outputs of `preprocess` to the model. The image embedding is computed only once. + Calls both `self.model.get_image_embeddings` and makes sure that the gradients are not computed, and the + tensors and models are on the same device. + + 3. `postprocess`: The most important part of the automatic mask generation happens here. Three steps + are induced: + - image_processor.postprocess_masks (run on each minibatch loop): takes in the raw output masks, + resizes them according + to the image size, and transforms there to binary masks. + - image_processor.filter_masks (on each minibatch loop): uses both `pred_iou_thresh` and + `stability_scores`. Also + applies a variety of filters based on non maximum suppression to remove bad masks. + - image_processor.postprocess_masks_for_amg applies the NSM on the mask to only keep relevant ones. + + Example: + + ```python + >>> from transformers import pipeline + + >>> generator = pipeline(model="facebook/sam-vit-base", task="mask-generation") + >>> outputs = generator( + ... "http://images.cocodataset.org/val2017/000000039769.jpg", + ... ) + + >>> outputs = generator( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", points_per_batch=128 + ... ) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"mask-generation"`. + + See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=mask-generation). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + requires_backends(self, "vision") + requires_backends(self, "torch") + + if self.framework != "pt": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + self.check_model_type(MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) + + def _sanitize_parameters(self, **kwargs): + preprocess_kwargs = {} + postprocess_kwargs = {} + forward_params = {} + # preprocess args + if "points_per_batch" in kwargs: + preprocess_kwargs["points_per_batch"] = kwargs["points_per_batch"] + if "points_per_crop" in kwargs: + preprocess_kwargs["points_per_crop"] = kwargs["points_per_crop"] + if "crops_n_layers" in kwargs: + preprocess_kwargs["crops_n_layers"] = kwargs["crops_n_layers"] + if "crop_overlap_ratio" in kwargs: + preprocess_kwargs["crop_overlap_ratio"] = kwargs["crop_overlap_ratio"] + if "crop_n_points_downscale_factor" in kwargs: + preprocess_kwargs["crop_n_points_downscale_factor"] = kwargs["crop_n_points_downscale_factor"] + if "timeout" in kwargs: + preprocess_kwargs["timeout"] = kwargs["timeout"] + # postprocess args + if "pred_iou_thresh" in kwargs: + forward_params["pred_iou_thresh"] = kwargs["pred_iou_thresh"] + if "stability_score_offset" in kwargs: + forward_params["stability_score_offset"] = kwargs["stability_score_offset"] + if "mask_threshold" in kwargs: + forward_params["mask_threshold"] = kwargs["mask_threshold"] + if "stability_score_thresh" in kwargs: + forward_params["stability_score_thresh"] = kwargs["stability_score_thresh"] + if "crops_nms_thresh" in kwargs: + postprocess_kwargs["crops_nms_thresh"] = kwargs["crops_nms_thresh"] + if "output_rle_mask" in kwargs: + postprocess_kwargs["output_rle_mask"] = kwargs["output_rle_mask"] + if "output_bboxes_mask" in kwargs: + postprocess_kwargs["output_bboxes_mask"] = kwargs["output_bboxes_mask"] + return preprocess_kwargs, forward_params, postprocess_kwargs + + def __call__(self, image, *args, num_workers=None, batch_size=None, **kwargs): + """ + Generates binary segmentation masks + + Args: + inputs (`np.ndarray` or `bytes` or `str` or `dict`): + Image or list of images. + mask_threshold (`float`, *optional*, defaults to 0.0): + Threshold to use when turning the predicted masks into binary values. + pred_iou_thresh (`float`, *optional*, defaults to 0.88): + A filtering threshold in `[0,1]` applied on the model's predicted mask quality. + stability_score_thresh (`float`, *optional*, defaults to 0.95): + A filtering threshold in `[0,1]`, using the stability of the mask under changes to the cutoff used to + binarize the model's mask predictions. + stability_score_offset (`int`, *optional*, defaults to 1): + The amount to shift the cutoff when calculated the stability score. + crops_nms_thresh (`float`, *optional*, defaults to 0.7): + The box IoU cutoff used by non-maximal suppression to filter duplicate masks. + crops_n_layers (`int`, *optional*, defaults to 0): + If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of + layers to run, where each layer has 2**i_layer number of image crops. + crop_overlap_ratio (`float`, *optional*, defaults to `512 / 1500`): + Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (`int`, *optional*, defaults to `1`): + The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + `Dict`: A dictionary with the following keys: + - **mask** (`PIL.Image`) -- A binary mask of the detected object as a PIL Image of shape `(width, + height)` of the original image. Returns a mask filled with zeros if no object is found. + - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of + the "object" described by the label and the mask. + + """ + return super().__call__(image, *args, num_workers=num_workers, batch_size=batch_size, **kwargs) + + def preprocess( + self, + image, + points_per_batch=64, + crops_n_layers: int = 0, + crop_overlap_ratio: float = 512 / 1500, + points_per_crop: Optional[int] = 32, + crop_n_points_downscale_factor: Optional[int] = 1, + timeout: Optional[float] = None, + ): + image = load_image(image, timeout=timeout) + target_size = self.image_processor.size["longest_edge"] + crop_boxes, grid_points, cropped_images, input_labels = self.image_processor.generate_crop_boxes( + image, target_size, crops_n_layers, crop_overlap_ratio, points_per_crop, crop_n_points_downscale_factor + ) + model_inputs = self.image_processor(images=cropped_images, return_tensors="pt") + + with self.device_placement(): + if self.framework == "pt": + inference_context = self.get_inference_context() + with inference_context(): + model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) + image_embeddings = self.model.get_image_embeddings(model_inputs.pop("pixel_values")) + model_inputs["image_embeddings"] = image_embeddings + + n_points = grid_points.shape[1] + points_per_batch = points_per_batch if points_per_batch is not None else n_points + + if points_per_batch <= 0: + raise ValueError( + "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " + "To return all points at once, set points_per_batch to None" + ) + + for i in range(0, n_points, points_per_batch): + batched_points = grid_points[:, i : i + points_per_batch, :, :] + labels = input_labels[:, i : i + points_per_batch] + is_last = i == n_points - points_per_batch + yield { + "input_points": batched_points, + "input_labels": labels, + "input_boxes": crop_boxes, + "is_last": is_last, + **model_inputs, + } + + def _forward( + self, + model_inputs, + pred_iou_thresh=0.88, + stability_score_thresh=0.95, + mask_threshold=0, + stability_score_offset=1, + ): + input_boxes = model_inputs.pop("input_boxes") + is_last = model_inputs.pop("is_last") + original_sizes = model_inputs.pop("original_sizes").tolist() + reshaped_input_sizes = model_inputs.pop("reshaped_input_sizes").tolist() + + model_outputs = self.model(**model_inputs) + + # post processing happens here in order to avoid CPU GPU copies of ALL the masks + low_resolution_masks = model_outputs["pred_masks"] + masks = self.image_processor.post_process_masks( + low_resolution_masks, original_sizes, reshaped_input_sizes, mask_threshold, binarize=False + ) + iou_scores = model_outputs["iou_scores"] + masks, iou_scores, boxes = self.image_processor.filter_masks( + masks[0], + iou_scores[0], + original_sizes[0], + input_boxes[0], + pred_iou_thresh, + stability_score_thresh, + mask_threshold, + stability_score_offset, + ) + return { + "masks": masks, + "is_last": is_last, + "boxes": boxes, + "iou_scores": iou_scores, + } + + def postprocess( + self, + model_outputs, + output_rle_mask=False, + output_bboxes_mask=False, + crops_nms_thresh=0.7, + ): + all_scores = [] + all_masks = [] + all_boxes = [] + for model_output in model_outputs: + all_scores.append(model_output.pop("iou_scores")) + all_masks.extend(model_output.pop("masks")) + all_boxes.append(model_output.pop("boxes")) + + all_scores = torch.cat(all_scores) + all_boxes = torch.cat(all_boxes) + output_masks, iou_scores, rle_mask, bounding_boxes = self.image_processor.post_process_for_mask_generation( + all_masks, all_scores, all_boxes, crops_nms_thresh + ) + + extra = defaultdict(list) + for output in model_outputs: + for k, v in output.items(): + extra[k].append(v) + + optional = {} + if output_rle_mask: + optional["rle_mask"] = rle_mask + + if output_bboxes_mask: + optional["bounding_boxes"] = bounding_boxes + + return {"masks": output_masks, "scores": iou_scores, **optional, **extra} diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/object_detection.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..d6ae63f4bd19f384ed660f6648362bd6e5b47965 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/object_detection.py @@ -0,0 +1,187 @@ +from typing import Any, Dict, List, Union + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from ..image_utils import load_image + + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import ( + MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + ) + +logger = logging.get_logger(__name__) + + +Prediction = Dict[str, Any] +Predictions = List[Prediction] + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ObjectDetectionPipeline(Pipeline): + """ + Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects + and their classes. + + Example: + + ```python + >>> from transformers import pipeline + + >>> detector = pipeline(model="facebook/detr-resnet-50") + >>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") + [{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}] + + >>> # x, y are expressed relative to the top left hand corner. + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"object-detection"`. + + See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if self.framework == "tf": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + requires_backends(self, "vision") + mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES.copy() + mapping.update(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) + self.check_model_type(mapping) + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + postprocess_kwargs = {} + if "threshold" in kwargs: + postprocess_kwargs["threshold"] = kwargs["threshold"] + return preprocess_params, {}, postprocess_kwargs + + def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: + """ + Detect objects (bounding boxes & classes) in the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing an HTTP(S) link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the + same format: all as HTTP(S) links, all as local paths, or all as PIL images. + threshold (`float`, *optional*, defaults to 0.9): + The probability necessary to make a prediction. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single + image, will return a list of dictionaries, if the input is a list of several images, will return a list of + list of dictionaries corresponding to each image. + + The dictionaries contain the following keys: + + - **label** (`str`) -- The class label identified by the model. + - **score** (`float`) -- The score attributed by the model for that label. + - **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size. + """ + + return super().__call__(*args, **kwargs) + + def preprocess(self, image, timeout=None): + image = load_image(image, timeout=timeout) + target_size = torch.IntTensor([[image.height, image.width]]) + inputs = self.image_processor(images=[image], return_tensors="pt") + if self.tokenizer is not None: + inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt") + inputs["target_size"] = target_size + return inputs + + def _forward(self, model_inputs): + target_size = model_inputs.pop("target_size") + outputs = self.model(**model_inputs) + model_outputs = outputs.__class__({"target_size": target_size, **outputs}) + if self.tokenizer is not None: + model_outputs["bbox"] = model_inputs["bbox"] + return model_outputs + + def postprocess(self, model_outputs, threshold=0.9): + target_size = model_outputs["target_size"] + if self.tokenizer is not None: + # This is a LayoutLMForTokenClassification variant. + # The OCR got the boxes and the model classified the words. + height, width = target_size[0].tolist() + + def unnormalize(bbox): + return self._get_bounding_box( + torch.Tensor( + [ + (width * bbox[0] / 1000), + (height * bbox[1] / 1000), + (width * bbox[2] / 1000), + (height * bbox[3] / 1000), + ] + ) + ) + + scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) + labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()] + boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)] + keys = ["score", "label", "box"] + annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold] + else: + # This is a regular ForObjectDetectionModel + raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size) + raw_annotation = raw_annotations[0] + scores = raw_annotation["scores"] + labels = raw_annotation["labels"] + boxes = raw_annotation["boxes"] + + raw_annotation["scores"] = scores.tolist() + raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels] + raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes] + + # {"scores": [...], ...} --> [{"score":x, ...}, ...] + keys = ["score", "label", "box"] + annotation = [ + dict(zip(keys, vals)) + for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"]) + ] + + return annotation + + def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: + """ + Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } + + Args: + box (`torch.Tensor`): Tensor containing the coordinates in corners format. + + Returns: + bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. + """ + if self.framework != "pt": + raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") + xmin, ymin, xmax, ymax = box.int().tolist() + bbox = { + "xmin": xmin, + "ymin": ymin, + "xmax": xmax, + "ymax": ymax, + } + return bbox diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/pt_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/pt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..652d1eb544ef93fe1fa46f3d192cd735cd1d1819 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/pt_utils.py @@ -0,0 +1,321 @@ +import numpy as np +import torch +from torch.utils.data import Dataset, IterableDataset + +from ..utils.generic import ModelOutput + + +class PipelineDataset(Dataset): + def __init__(self, dataset, process, params): + self.dataset = dataset + self.process = process + self.params = params + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + item = self.dataset[i] + processed = self.process(item, **self.params) + return processed + + +class PipelineIterator(IterableDataset): + def __init__(self, loader, infer, params, loader_batch_size=None): + """ + Roughly equivalent to + + ``` + for item in loader: + yield infer(item, **params) + ``` + + Arguments: + loader (`torch.utils.data.DataLoader` or any iterator): + The iterator that will be used to apply `infer` on. + infer (any function): + The function to apply of each element of `loader`. + params (`dict`): + The parameters passed to `infer` along with every item + loader_batch_size (`int`, *optional*): + If specified, the items of `loader` are supposed to come as batch, and are loader_batched here + making it roughly behave as + + + ``` + for items in loader: + for i in loader_batch_size: + item = items[i] + yield infer(item, **params) + ```""" + self.loader = loader + self.infer = infer + self.params = params + if loader_batch_size == 1: + # Let's spare some time by deactivating altogether + loader_batch_size = None + self.loader_batch_size = loader_batch_size + + # Internal bookkeeping + self._loader_batch_index = None + self._loader_batch_data = None + + def __len__(self): + return len(self.loader) + + def __iter__(self): + self.iterator = iter(self.loader) + return self + + def loader_batch_item(self): + """ + Return item located at `loader_batch_index` within the current `loader_batch_data`. + """ + if isinstance(self._loader_batch_data, torch.Tensor): + # Batch data is simple tensor, just fetch the slice + result = self._loader_batch_data[self._loader_batch_index].unsqueeze(0) + else: + # Batch data is assumed to be BaseModelOutput (or dict) + loader_batched = {} + for k, element in self._loader_batch_data.items(): + if isinstance(element, ModelOutput): + # Convert ModelOutput to tuple first + element = element.to_tuple() + if isinstance(element[0], torch.Tensor): + loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) + elif isinstance(element[0], np.ndarray): + loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) + continue + if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(element, tuple): + # Those are stored as lists of tensors so need specific unbatching. + if isinstance(element[0], torch.Tensor): + loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) + elif isinstance(element[0], np.ndarray): + loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) + continue + if element is None: + # This can happen for optional data that get passed around + loader_batched[k] = None + elif isinstance(element[self._loader_batch_index], torch.Tensor): + # Take correct batch data, but make it looked like batch_size=1 + # For compatibility with other methods within transformers + + loader_batched[k] = element[self._loader_batch_index].unsqueeze(0) + elif isinstance(element[self._loader_batch_index], np.ndarray): + # Take correct batch data, but make it looked like batch_size=1 + # For compatibility with other methods within transformers + loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0) + else: + # This is typically a list, so no need to `unsqueeze`. + loader_batched[k] = element[self._loader_batch_index] + # Recreate the element by reusing the original class to make it look + # batch_size=1 + result = self._loader_batch_data.__class__(loader_batched) + self._loader_batch_index += 1 + return result + + def __next__(self): + if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: + # We are currently unrolling a batch so we just need to return + # the current item within a batch + return self.loader_batch_item() + + # We're out of items within a batch + item = next(self.iterator) + processed = self.infer(item, **self.params) + # We now have a batch of "inferred things". + if self.loader_batch_size is not None: + # Try to infer the size of the batch + if isinstance(processed, torch.Tensor): + first_tensor = processed + elif isinstance(processed, tuple): + first_tensor = processed[0] + else: + key = list(processed.keys())[0] + first_tensor = processed[key] + + if isinstance(first_tensor, list): + observed_batch_size = len(first_tensor) + else: + observed_batch_size = first_tensor.shape[0] + if 0 < observed_batch_size < self.loader_batch_size: + # could be last batch so we can't unroll as many + # elements. + self.loader_batch_size = observed_batch_size + # Setting internal index to unwrap the batch + self._loader_batch_data = processed[0] if isinstance(processed, tuple) else processed + self._loader_batch_index = 0 + return self.loader_batch_item() + else: + # We're not unrolling batches + return processed + + +class PipelineChunkIterator(PipelineIterator): + def __init__(self, loader, infer, params, loader_batch_size=None): + """ + Roughly equivalent to + + ``` + for iterator in loader: + for item in iterator: + yield infer(item, **params) + ``` + + Arguments: + loader (`torch.utils.data.DataLoader` or any iterator): + The iterator that will be used to apply `infer` on. + infer (any function): + The function to apply of each element of `loader`. + params (`dict`): + The parameters passed to `infer` along with every item + """ + super().__init__(loader, infer, params) + + def __iter__(self): + self.iterator = iter(self.loader) + self.subiterator = None + return self + + def __next__(self): + if self.subiterator is None: + "Subiterator None means we haven't started a `preprocess` iterator. so start it" + self.subiterator = self.infer(next(self.iterator), **self.params) + try: + # Try to return next item + processed = next(self.subiterator) + except StopIteration: + # When a preprocess iterator ends, we can start lookig at the next item + # ChunkIterator will keep feeding until ALL elements of iterator + # all have created their subiterator and have been iterating against. + # + # Another way to look at it, is we're basically flattening lists of lists + # into a single list, but with generators + self.subiterator = self.infer(next(self.iterator), **self.params) + processed = next(self.subiterator) + return processed + + +class PipelinePackIterator(PipelineIterator): + """ + Roughly equivalent to + + ``` + packed = [] + for item in loader: + packed.append(item) + if item["is_last"]: + yield packed + packed = [] + ``` + + but it also handles cases where `item` are batched (meaning it's a dict of Tensor with first dimension > 1. In + that case it does + + ``` + packed = [] + for batch in loader: + # item is batched + for item in batch: + packed.append(item) + if item["is_last"]: + yield packed + packed = [] + ``` + + Arguments: + loader (`torch.utils.data.DataLoader` or any iterator): + The iterator that will be used to apply `infer` on. + infer (any function): + The function to apply of each element of `loader`. + params (`dict`): + The parameters passed to `infer` along with every item + loader_batch_size (`int`, *optional*): + If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making + it roughly behave as + + + ``` + for items in loader: + for i in loader_batch_size: + item = items[i] + yield infer(item, **params) + ```""" + + def __iter__(self): + self.iterator = iter(self.loader) + return self + + def __next__(self): + # Extremely similar to PipelineIterator in its unpacking mechanism + # BUT, we have an extra required item which is the presence of `is_last` + # That is because everything is flattened by `PipelineChunkIterator` we + # need to keep track of how to regroup here in the original `process` + # boundaries so that `process` and `postprocess` see the same data. + + # This iterator accumulates items (possibly while unbatching) until it + # its a `is_last` and then just passes it on to the caller. + is_last = False + accumulator = [] + if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: + while self._loader_batch_index < self.loader_batch_size: + item = self.loader_batch_item() + is_last = item.pop("is_last") + accumulator.append(item) + if is_last: + return accumulator + + while not is_last: + processed = self.infer(next(self.iterator), **self.params) + if self.loader_batch_size is not None: + if isinstance(processed, torch.Tensor): + first_tensor = processed + else: + key = list(processed.keys())[0] + first_tensor = processed[key] + if isinstance(first_tensor, list): + observed_batch_size = len(first_tensor) + else: + observed_batch_size = first_tensor.shape[0] + if 0 < observed_batch_size < self.loader_batch_size: + # could be last batch so we can't unroll as many + # elements. + self.loader_batch_size = observed_batch_size + self._loader_batch_data = processed + self._loader_batch_index = 0 + while self._loader_batch_index < self.loader_batch_size: + item = self.loader_batch_item() + is_last = item.pop("is_last") + accumulator.append(item) + if is_last: + return accumulator + else: + item = processed + is_last = item.pop("is_last") + accumulator.append(item) + return accumulator + + +class KeyDataset(Dataset): + def __init__(self, dataset: Dataset, key: str): + self.dataset = dataset + self.key = key + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return self.dataset[i][self.key] + + +class KeyPairDataset(Dataset): + def __init__(self, dataset: Dataset, key1: str, key2: str): + self.dataset = dataset + self.key1 = key1 + self.key2 = key2 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + return {"text": self.dataset[i][self.key1], "text_pair": self.dataset[i][self.key2]} diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..702a47b7c3cbeddadebd8a98d5ad644f7608ec6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py @@ -0,0 +1,432 @@ +import collections +import types + +import numpy as np + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + requires_backends, +) +from .base import ArgumentHandler, Dataset, Pipeline, PipelineException, build_pipeline_init_args + + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import ( + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, + ) + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import ( + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, + ) + + +class TableQuestionAnsweringArgumentHandler(ArgumentHandler): + """ + Handles arguments for the TableQuestionAnsweringPipeline + """ + + def __call__(self, table=None, query=None, **kwargs): + # Returns tqa_pipeline_inputs of shape: + # [ + # {"table": pd.DataFrame, "query": List[str]}, + # ..., + # {"table": pd.DataFrame, "query" : List[str]} + # ] + requires_backends(self, "pandas") + import pandas as pd + + if table is None: + raise ValueError("Keyword argument `table` cannot be None.") + elif query is None: + if isinstance(table, dict) and table.get("query") is not None and table.get("table") is not None: + tqa_pipeline_inputs = [table] + elif isinstance(table, list) and len(table) > 0: + if not all(isinstance(d, dict) for d in table): + raise ValueError( + f"Keyword argument `table` should be a list of dict, but is {(type(d) for d in table)}" + ) + + if table[0].get("query") is not None and table[0].get("table") is not None: + tqa_pipeline_inputs = table + else: + raise ValueError( + "If keyword argument `table` is a list of dictionaries, each dictionary should have a `table`" + f" and `query` key, but only dictionary has keys {table[0].keys()} `table` and `query` keys." + ) + elif Dataset is not None and isinstance(table, Dataset) or isinstance(table, types.GeneratorType): + return table + else: + raise ValueError( + "Invalid input. Keyword argument `table` should be either of type `dict` or `list`, but " + f"is {type(table)})" + ) + else: + tqa_pipeline_inputs = [{"table": table, "query": query}] + + for tqa_pipeline_input in tqa_pipeline_inputs: + if not isinstance(tqa_pipeline_input["table"], pd.DataFrame): + if tqa_pipeline_input["table"] is None: + raise ValueError("Table cannot be None.") + + tqa_pipeline_input["table"] = pd.DataFrame(tqa_pipeline_input["table"]) + + return tqa_pipeline_inputs + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class TableQuestionAnsweringPipeline(Pipeline): + """ + Table Question Answering pipeline using a `ModelForTableQuestionAnswering`. This pipeline is only available in + PyTorch. + + Example: + + ```python + >>> from transformers import pipeline + + >>> oracle = pipeline(model="google/tapas-base-finetuned-wtq") + >>> table = { + ... "Repository": ["Transformers", "Datasets", "Tokenizers"], + ... "Stars": ["36542", "4512", "3934"], + ... "Contributors": ["651", "77", "34"], + ... "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], + ... } + >>> oracle(query="How many stars does the transformers repository have?", table=table) + {'answer': 'AVERAGE > 36542', 'coordinates': [(0, 1)], 'cells': ['36542'], 'aggregator': 'AVERAGE'} + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This tabular question answering pipeline can currently be loaded from [`pipeline`] using the following task + identifier: `"table-question-answering"`. + + The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. + See the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=table-question-answering). + """ + + default_input_names = "table,query" + + def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, **kwargs): + super().__init__(*args, **kwargs) + self._args_parser = args_parser + + if self.framework == "tf": + mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() + mapping.update(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) + else: + mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() + mapping.update(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) + self.check_model_type(mapping) + + self.aggregate = bool(getattr(self.model.config, "aggregation_labels", None)) and bool( + getattr(self.model.config, "num_aggregation_labels", None) + ) + self.type = "tapas" if hasattr(self.model.config, "aggregation_labels") else None + + def batch_inference(self, **inputs): + return self.model(**inputs) + + def sequential_inference(self, **inputs): + """ + Inference used for models that need to process sequences in a sequential fashion, like the SQA models which + handle conversational query related to a table. + """ + if self.framework == "pt": + all_logits = [] + all_aggregations = [] + prev_answers = None + batch_size = inputs["input_ids"].shape[0] + + input_ids = inputs["input_ids"].to(self.device) + attention_mask = inputs["attention_mask"].to(self.device) + token_type_ids = inputs["token_type_ids"].to(self.device) + token_type_ids_example = None + + for index in range(batch_size): + # If sequences have already been processed, the token type IDs will be created according to the previous + # answer. + if prev_answers is not None: + prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) + model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,) + + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + for i in range(model_labels.shape[0]): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col_id = token_type_ids_example[:, 1].tolist()[i] - 1 + row_id = token_type_ids_example[:, 2].tolist()[i] - 1 + + if row_id >= 0 and col_id >= 0 and segment_id == 1: + model_labels[i] = int(prev_answers[(col_id, row_id)]) + + token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device) + + input_ids_example = input_ids[index] + attention_mask_example = attention_mask[index] # shape (seq_len,) + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + outputs = self.model( + input_ids=input_ids_example.unsqueeze(0), + attention_mask=attention_mask_example.unsqueeze(0), + token_type_ids=token_type_ids_example.unsqueeze(0), + ) + logits = outputs.logits + + if self.aggregate: + all_aggregations.append(outputs.logits_aggregation) + + all_logits.append(logits) + + dist_per_token = torch.distributions.Bernoulli(logits=logits) + probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to( + dist_per_token.probs.device + ) + + coords_to_probs = collections.defaultdict(list) + for i, p in enumerate(probabilities.squeeze().tolist()): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col = token_type_ids_example[:, 1].tolist()[i] - 1 + row = token_type_ids_example[:, 2].tolist()[i] - 1 + if col >= 0 and row >= 0 and segment_id == 1: + coords_to_probs[(col, row)].append(p) + + prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} + + logits_batch = torch.cat(tuple(all_logits), 0) + + return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0)) + else: + all_logits = [] + all_aggregations = [] + prev_answers = None + batch_size = inputs["input_ids"].shape[0] + + input_ids = inputs["input_ids"] + attention_mask = inputs["attention_mask"] + token_type_ids = inputs["token_type_ids"].numpy() + token_type_ids_example = None + + for index in range(batch_size): + # If sequences have already been processed, the token type IDs will be created according to the previous + # answer. + if prev_answers is not None: + prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) + model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) # shape (seq_len,) + + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + for i in range(model_labels.shape[0]): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col_id = token_type_ids_example[:, 1].tolist()[i] - 1 + row_id = token_type_ids_example[:, 2].tolist()[i] - 1 + + if row_id >= 0 and col_id >= 0 and segment_id == 1: + model_labels[i] = int(prev_answers[(col_id, row_id)]) + + token_type_ids_example[:, 3] = model_labels + + input_ids_example = input_ids[index] + attention_mask_example = attention_mask[index] # shape (seq_len,) + token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) + outputs = self.model( + input_ids=np.expand_dims(input_ids_example, axis=0), + attention_mask=np.expand_dims(attention_mask_example, axis=0), + token_type_ids=np.expand_dims(token_type_ids_example, axis=0), + ) + logits = outputs.logits + + if self.aggregate: + all_aggregations.append(outputs.logits_aggregation) + + all_logits.append(logits) + + probabilities = tf.math.sigmoid(tf.cast(logits, tf.float32)) * tf.cast( + attention_mask_example, tf.float32 + ) + + coords_to_probs = collections.defaultdict(list) + token_type_ids_example = token_type_ids_example + for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()): + segment_id = token_type_ids_example[:, 0].tolist()[i] + col = token_type_ids_example[:, 1].tolist()[i] - 1 + row = token_type_ids_example[:, 2].tolist()[i] - 1 + if col >= 0 and row >= 0 and segment_id == 1: + coords_to_probs[(col, row)].append(p) + + prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} + + logits_batch = tf.concat(tuple(all_logits), 0) + + return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0)) + + def __call__(self, *args, **kwargs): + r""" + Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below: + + - `pipeline(table, query)` + - `pipeline(table, [query])` + - `pipeline(table=table, query=query)` + - `pipeline(table=table, query=[query])` + - `pipeline({"table": table, "query": query})` + - `pipeline({"table": table, "query": [query]})` + - `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])` + + The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table: + + Example: + + ```python + data = { + "actors": ["brad pitt", "leonardo di caprio", "george clooney"], + "age": ["56", "45", "59"], + "number of movies": ["87", "53", "69"], + "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], + } + ``` + + This dictionary can be passed in as such, or can be converted to a pandas DataFrame: + + Example: + + ```python + import pandas as pd + + table = pd.DataFrame.from_dict(data) + ``` + + Args: + table (`pd.DataFrame` or `Dict`): + Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. + See above for an example of dictionary. + query (`str` or `List[str]`): + Query or list of queries that will be sent to the model alongside the table. + sequential (`bool`, *optional*, defaults to `False`): + Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the + inference to be done sequentially to extract relations within sequences, given their conversational + nature. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): + Activates and controls padding. Accepts the following values: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + + truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`): + Activates and controls truncation. Accepts the following values: + + - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` + or to the maximum acceptable input length for the model if that argument is not provided. This will + truncate row by row, removing rows from the table. + - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths + greater than the model maximum admissible input size). + + + Return: + A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following + keys: + + - **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will + be preceded by `AGGREGATOR >`. + - **coordinates** (`List[Tuple[int, int]]`) -- Coordinates of the cells of the answers. + - **cells** (`List[str]`) -- List of strings made up of the answer cell values. + - **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator. + """ + pipeline_inputs = self._args_parser(*args, **kwargs) + + results = super().__call__(pipeline_inputs, **kwargs) + if len(results) == 1: + return results[0] + return results + + def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, **kwargs): + preprocess_params = {} + if padding is not None: + preprocess_params["padding"] = padding + if truncation is not None: + preprocess_params["truncation"] = truncation + + forward_params = {} + if sequential is not None: + forward_params["sequential"] = sequential + return preprocess_params, forward_params, {} + + def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None): + if truncation is None: + if self.type == "tapas": + truncation = "drop_rows_to_fit" + else: + truncation = "do_not_truncate" + + table, query = pipeline_input["table"], pipeline_input["query"] + if table.empty: + raise ValueError("table is empty") + if query is None or query == "": + raise ValueError("query is empty") + inputs = self.tokenizer(table, query, return_tensors=self.framework, truncation=truncation, padding=padding) + inputs["table"] = table + return inputs + + def _forward(self, model_inputs, sequential=False, **generate_kwargs): + table = model_inputs.pop("table") + + if self.type == "tapas": + if sequential: + outputs = self.sequential_inference(**model_inputs) + else: + outputs = self.batch_inference(**model_inputs) + else: + outputs = self.model.generate(**model_inputs, **generate_kwargs) + model_outputs = {"model_inputs": model_inputs, "table": table, "outputs": outputs} + return model_outputs + + def postprocess(self, model_outputs): + inputs = model_outputs["model_inputs"] + table = model_outputs["table"] + outputs = model_outputs["outputs"] + if self.type == "tapas": + if self.aggregate: + logits, logits_agg = outputs[:2] + predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits, logits_agg) + answer_coordinates_batch, agg_predictions = predictions + aggregators = {i: self.model.config.aggregation_labels[pred] for i, pred in enumerate(agg_predictions)} + + no_agg_label_index = self.model.config.no_aggregation_label_index + aggregators_prefix = { + i: aggregators[i] + " > " for i, pred in enumerate(agg_predictions) if pred != no_agg_label_index + } + else: + logits = outputs[0] + predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits) + answer_coordinates_batch = predictions[0] + aggregators = {} + aggregators_prefix = {} + answers = [] + for index, coordinates in enumerate(answer_coordinates_batch): + cells = [table.iat[coordinate] for coordinate in coordinates] + aggregator = aggregators.get(index, "") + aggregator_prefix = aggregators_prefix.get(index, "") + answer = { + "answer": aggregator_prefix + ", ".join(cells), + "coordinates": coordinates, + "cells": [table.iat[coordinate] for coordinate in coordinates], + } + if aggregator: + answer["aggregator"] = aggregator + + answers.append(answer) + if len(answer) == 0: + raise PipelineException("Empty answer") + else: + answers = [{"answer": answer} for answer in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)] + + return answers if len(answers) > 1 else answers[0] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..bb8abdfcf7f500d314fc7d73a031e6b563a39b43 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py @@ -0,0 +1,371 @@ +import enum +import warnings + +from ..tokenization_utils import TruncationStrategy +from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging +from .base import Pipeline, build_pipeline_init_args + + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +class ReturnType(enum.Enum): + TENSORS = 0 + TEXT = 1 + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class Text2TextGenerationPipeline(Pipeline): + """ + Pipeline for text to text generation using seq2seq models. + + Example: + + ```python + >>> from transformers import pipeline + + >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap") + >>> generator( + ... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google" + ... ) + [{'generated_text': 'question: Who created the RuPERTa-base?'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text + generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about + text generation parameters in [Text generation strategies](../generation_strategies) and [Text + generation](text_generation). + + This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task + identifier: `"text2text-generation"`. + + The models that this pipeline can use are models that have been fine-tuned on a translation task. See the + up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available + parameters, see the [following + documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) + + Usage: + + ```python + text2text_generator = pipeline("text2text-generation") + text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything") + ```""" + + # Used in the return key of the pipeline. + return_name = "generated" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.check_model_type( + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES + ) + + def _sanitize_parameters( + self, + return_tensors=None, + return_text=None, + return_type=None, + clean_up_tokenization_spaces=None, + truncation=None, + stop_sequence=None, + **generate_kwargs, + ): + preprocess_params = {} + if truncation is not None: + preprocess_params["truncation"] = truncation + + forward_params = generate_kwargs + + postprocess_params = {} + if return_tensors is not None and return_type is None: + return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT + if return_type is not None: + postprocess_params["return_type"] = return_type + + if clean_up_tokenization_spaces is not None: + postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + + if stop_sequence is not None: + stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) + if len(stop_sequence_ids) > 1: + warnings.warn( + "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" + " the stop sequence will be used as the stop sequence string in the interim." + ) + generate_kwargs["eos_token_id"] = stop_sequence_ids[0] + + return preprocess_params, forward_params, postprocess_params + + def check_inputs(self, input_length: int, min_length: int, max_length: int): + """ + Checks whether there might be something wrong with given input with regard to the model. + """ + return True + + def _parse_and_tokenize(self, *args, truncation): + prefix = self.model.config.prefix if self.model.config.prefix is not None else "" + if isinstance(args[0], list): + if self.tokenizer.pad_token_id is None: + raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") + args = ([prefix + arg for arg in args[0]],) + padding = True + + elif isinstance(args[0], str): + args = (prefix + args[0],) + padding = False + else: + raise ValueError( + f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" + ) + inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework) + # This is produced by tokenizers but is an invalid generate kwargs + if "token_type_ids" in inputs: + del inputs["token_type_ids"] + return inputs + + def __call__(self, *args, **kwargs): + r""" + Generate the output text(s) using text(s) given as inputs. + + Args: + args (`str` or `List[str]`): + Input text for the encoder. + return_tensors (`bool`, *optional*, defaults to `False`): + Whether or not to include the tensors of predictions (as token indices) in the outputs. + return_text (`bool`, *optional*, defaults to `True`): + Whether or not to include the decoded texts in the outputs. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to clean up the potential extra spaces in the text output. + truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`): + The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE` + (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's + max_length instead of throwing an error down the line. + generate_kwargs: + Additional keyword arguments to pass along to the generate method of the model (see the generate method + corresponding to your framework [here](./model#generative-models)). + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: + + - **generated_text** (`str`, present when `return_text=True`) -- The generated text. + - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token + ids of the generated text. + """ + + result = super().__call__(*args, **kwargs) + if ( + isinstance(args[0], list) + and all(isinstance(el, str) for el in args[0]) + and all(len(res) == 1 for res in result) + ): + return [res[0] for res in result] + return result + + def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs): + inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs) + return inputs + + def _forward(self, model_inputs, **generate_kwargs): + if self.framework == "pt": + in_b, input_length = model_inputs["input_ids"].shape + elif self.framework == "tf": + in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy() + + self.check_inputs( + input_length, + generate_kwargs.get("min_length", self.model.config.min_length), + generate_kwargs.get("max_length", self.model.config.max_length), + ) + output_ids = self.model.generate(**model_inputs, **generate_kwargs) + out_b = output_ids.shape[0] + if self.framework == "pt": + output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:]) + elif self.framework == "tf": + output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:])) + return {"output_ids": output_ids} + + def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False): + records = [] + for output_ids in model_outputs["output_ids"][0]: + if return_type == ReturnType.TENSORS: + record = {f"{self.return_name}_token_ids": output_ids} + elif return_type == ReturnType.TEXT: + record = { + f"{self.return_name}_text": self.tokenizer.decode( + output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + ) + } + records.append(record) + return records + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class SummarizationPipeline(Text2TextGenerationPipeline): + """ + Summarize news articles and other documents. + + This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"summarization"`. + + The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is + currently, '*bart-large-cnn*', '*google-t5/t5-small*', '*google-t5/t5-base*', '*google-t5/t5-large*', '*google-t5/t5-3b*', '*google-t5/t5-11b*'. See the up-to-date + list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list + of available parameters, see the [following + documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) + + Usage: + + ```python + # use bart in pytorch + summarizer = pipeline("summarization") + summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) + + # use t5 in tf + summarizer = pipeline("summarization", model="google-t5/t5-base", tokenizer="google-t5/t5-base", framework="tf") + summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) + ```""" + + # Used in the return key of the pipeline. + return_name = "summary" + + def __call__(self, *args, **kwargs): + r""" + Summarize the text(s) given as inputs. + + Args: + documents (*str* or `List[str]`): + One or several articles (or one list of articles) to summarize. + return_text (`bool`, *optional*, defaults to `True`): + Whether or not to include the decoded texts in the outputs + return_tensors (`bool`, *optional*, defaults to `False`): + Whether or not to include the tensors of predictions (as token indices) in the outputs. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to clean up the potential extra spaces in the text output. + generate_kwargs: + Additional keyword arguments to pass along to the generate method of the model (see the generate method + corresponding to your framework [here](./model#generative-models)). + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: + + - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input. + - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token + ids of the summary. + """ + return super().__call__(*args, **kwargs) + + def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool: + """ + Checks whether there might be something wrong with given input with regard to the model. + """ + if max_length < min_length: + logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.") + + if input_length < max_length: + logger.warning( + f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is " + "a summarization task, where outputs shorter than the input are typically wanted, you might " + f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" + ) + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class TranslationPipeline(Text2TextGenerationPipeline): + """ + Translates from one language to another. + + This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"translation_xx_to_yy"`. + + The models that this pipeline can use are models that have been fine-tuned on a translation task. See the + up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation). + For a list of available parameters, see the [following + documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) + + Usage: + + ```python + en_fr_translator = pipeline("translation_en_to_fr") + en_fr_translator("How old are you?") + ```""" + + # Used in the return key of the pipeline. + return_name = "translation" + + def check_inputs(self, input_length: int, min_length: int, max_length: int): + if input_length > 0.9 * max_length: + logger.warning( + f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider " + "increasing your max_length manually, e.g. translator('...', max_length=400)" + ) + return True + + def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None): + if getattr(self.tokenizer, "_build_translation_inputs", None): + return self.tokenizer._build_translation_inputs( + *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang + ) + else: + return super()._parse_and_tokenize(*args, truncation=truncation) + + def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs): + preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs) + if src_lang is not None: + preprocess_params["src_lang"] = src_lang + if tgt_lang is not None: + preprocess_params["tgt_lang"] = tgt_lang + if src_lang is None and tgt_lang is None: + # Backward compatibility, direct arguments use is preferred. + task = kwargs.get("task", self.task) + items = task.split("_") + if task and len(items) == 4: + # translation, XX, to YY + preprocess_params["src_lang"] = items[1] + preprocess_params["tgt_lang"] = items[3] + return preprocess_params, forward_params, postprocess_params + + def __call__(self, *args, **kwargs): + r""" + Translate the text(s) given as inputs. + + Args: + args (`str` or `List[str]`): + Texts to be translated. + return_tensors (`bool`, *optional*, defaults to `False`): + Whether or not to include the tensors of predictions (as token indices) in the outputs. + return_text (`bool`, *optional*, defaults to `True`): + Whether or not to include the decoded texts in the outputs. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to clean up the potential extra spaces in the text output. + src_lang (`str`, *optional*): + The language of the input. Might be required for multilingual models. Will not have any effect for + single pair translation models + tgt_lang (`str`, *optional*): + The language of the desired output. Might be required for multilingual models. Will not have any effect + for single pair translation models + generate_kwargs: + Additional keyword arguments to pass along to the generate method of the model (see the generate method + corresponding to your framework [here](./model#generative-models)). + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: + + - **translation_text** (`str`, present when `return_text=True`) -- The translation. + - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The + token ids of the translation. + """ + return super().__call__(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..6521da098d4cdf379d6c338041d33cb21f44907a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text_classification.py @@ -0,0 +1,226 @@ +import inspect +import warnings +from typing import Dict + +import numpy as np + +from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available +from .base import GenericTensor, Pipeline, build_pipeline_init_args + + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + + +def sigmoid(_outputs): + return 1.0 / (1.0 + np.exp(-_outputs)) + + +def softmax(_outputs): + maxes = np.max(_outputs, axis=-1, keepdims=True) + shifted_exp = np.exp(_outputs - maxes) + return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) + + +class ClassificationFunction(ExplicitEnum): + SIGMOID = "sigmoid" + SOFTMAX = "softmax" + NONE = "none" + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True), + r""" + return_all_scores (`bool`, *optional*, defaults to `False`): + Whether to return all prediction scores or just the one of the predicted class. + function_to_apply (`str`, *optional*, defaults to `"default"`): + The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: + + - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model + has several labels, will apply the softmax function on the output. + - `"sigmoid"`: Applies the sigmoid function on the output. + - `"softmax"`: Applies the softmax function on the output. + - `"none"`: Does not apply any function on the output.""", +) +class TextClassificationPipeline(Pipeline): + """ + Text classification pipeline using any `ModelForSequenceClassification`. See the [sequence classification + examples](../task_summary#sequence-classification) for more information. + + Example: + + ```python + >>> from transformers import pipeline + + >>> classifier = pipeline(model="distilbert/distilbert-base-uncased-finetuned-sst-2-english") + >>> classifier("This movie is disgustingly good !") + [{'label': 'POSITIVE', 'score': 1.0}] + + >>> classifier("Director tried too much.") + [{'label': 'NEGATIVE', 'score': 0.996}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This text classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"sentiment-analysis"` (for classifying sequences according to positive or negative sentiments). + + If multiple classification labels are available (`model.config.num_labels >= 2`), the pipeline will run a softmax + over the results. If there is a single label, the pipeline will run a sigmoid over the result. + + The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See + the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=text-classification). + """ + + return_all_scores = False + function_to_apply = ClassificationFunction.NONE + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.check_model_type( + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES + ) + + def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs): + # Using "" as default argument because we're going to use `top_k=None` in user code to declare + # "No top_k" + preprocess_params = tokenizer_kwargs + + postprocess_params = {} + if hasattr(self.model.config, "return_all_scores") and return_all_scores is None: + return_all_scores = self.model.config.return_all_scores + + if isinstance(top_k, int) or top_k is None: + postprocess_params["top_k"] = top_k + postprocess_params["_legacy"] = False + elif return_all_scores is not None: + warnings.warn( + "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" + " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", + UserWarning, + ) + if return_all_scores: + postprocess_params["top_k"] = None + else: + postprocess_params["top_k"] = 1 + + if isinstance(function_to_apply, str): + function_to_apply = ClassificationFunction[function_to_apply.upper()] + + if function_to_apply is not None: + postprocess_params["function_to_apply"] = function_to_apply + return preprocess_params, {}, postprocess_params + + def __call__(self, inputs, **kwargs): + """ + Classify the text(s) given as inputs. + + Args: + inputs (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`): + One or several texts to classify. In order to use text pairs for your classification, you can send a + dictionary containing `{"text", "text_pair"}` keys, or a list of those. + top_k (`int`, *optional*, defaults to `1`): + How many results to return. + function_to_apply (`str`, *optional*, defaults to `"default"`): + The function to apply to the model outputs in order to retrieve the scores. Accepts four different + values: + + If this argument is not specified, then it will apply the following functions according to the number + of labels: + + - If the model has a single label, will apply the sigmoid function on the output. + - If the model has several labels, will apply the softmax function on the output. + + Possible values are: + + - `"sigmoid"`: Applies the sigmoid function on the output. + - `"softmax"`: Applies the softmax function on the output. + - `"none"`: Does not apply any function on the output. + + Return: + A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys: + + - **label** (`str`) -- The label predicted. + - **score** (`float`) -- The corresponding probability. + + If `top_k` is used, one such dictionary is returned per label. + """ + inputs = (inputs,) + result = super().__call__(*inputs, **kwargs) + # TODO try and retrieve it in a nicer way from _sanitize_parameters. + _legacy = "top_k" not in kwargs + if isinstance(inputs[0], str) and _legacy: + # This pipeline is odd, and return a list when single item is run + return [result] + else: + return result + + def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]: + return_tensors = self.framework + if isinstance(inputs, dict): + return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs) + elif isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], list) and len(inputs[0]) == 2: + # It used to be valid to use a list of list of list for text pairs, keeping this path for BC + return self.tokenizer( + text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs + ) + elif isinstance(inputs, list): + # This is likely an invalid usage of the pipeline attempting to pass text pairs. + raise ValueError( + "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" + ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' + ) + return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs) + + def _forward(self, model_inputs): + # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported + model_forward = self.model.forward if self.framework == "pt" else self.model.call + if "use_cache" in inspect.signature(model_forward).parameters.keys(): + model_inputs["use_cache"] = False + return self.model(**model_inputs) + + def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True): + # `_legacy` is used to determine if we're running the naked pipeline and in backward + # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running + # the more natural result containing the list. + # Default value before `set_parameters` + if function_to_apply is None: + if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: + function_to_apply = ClassificationFunction.SIGMOID + elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: + function_to_apply = ClassificationFunction.SOFTMAX + elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: + function_to_apply = self.model.config.function_to_apply + else: + function_to_apply = ClassificationFunction.NONE + + outputs = model_outputs["logits"][0] + outputs = outputs.numpy() + + if function_to_apply == ClassificationFunction.SIGMOID: + scores = sigmoid(outputs) + elif function_to_apply == ClassificationFunction.SOFTMAX: + scores = softmax(outputs) + elif function_to_apply == ClassificationFunction.NONE: + scores = outputs + else: + raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") + + if top_k == 1 and _legacy: + return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()} + + dict_scores = [ + {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) + ] + if not _legacy: + dict_scores.sort(key=lambda x: x["score"], reverse=True) + if top_k is not None: + dict_scores = dict_scores[:top_k] + return dict_scores diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/token_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/token_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..e1d763eafa8b71ee8953faee949102e7cb4dbac8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/token_classification.py @@ -0,0 +1,570 @@ +import types +import warnings +from typing import List, Optional, Tuple, Union + +import numpy as np + +from ..models.bert.tokenization_bert import BasicTokenizer +from ..utils import ( + ExplicitEnum, + add_end_docstrings, + is_tf_available, + is_torch_available, +) +from .base import ArgumentHandler, ChunkPipeline, Dataset, build_pipeline_init_args + + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES + + +class TokenClassificationArgumentHandler(ArgumentHandler): + """ + Handles arguments for token classification. + """ + + def __call__(self, inputs: Union[str, List[str]], **kwargs): + if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0: + inputs = list(inputs) + batch_size = len(inputs) + elif isinstance(inputs, str): + inputs = [inputs] + batch_size = 1 + elif Dataset is not None and isinstance(inputs, Dataset) or isinstance(inputs, types.GeneratorType): + return inputs, None + else: + raise ValueError("At least one input is required.") + + offset_mapping = kwargs.get("offset_mapping") + if offset_mapping: + if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple): + offset_mapping = [offset_mapping] + if len(offset_mapping) != batch_size: + raise ValueError("offset_mapping should have the same batch size as the input") + return inputs, offset_mapping + + +class AggregationStrategy(ExplicitEnum): + """All the valid aggregation strategies for TokenClassificationPipeline""" + + NONE = "none" + SIMPLE = "simple" + FIRST = "first" + AVERAGE = "average" + MAX = "max" + + +@add_end_docstrings( + build_pipeline_init_args(has_tokenizer=True), + r""" + ignore_labels (`List[str]`, defaults to `["O"]`): + A list of labels to ignore. + grouped_entities (`bool`, *optional*, defaults to `False`): + DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the + same entity together in the predictions or not. + stride (`int`, *optional*): + If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size + model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The + value of this argument defines the number of overlapping tokens between chunks. In other words, the model + will shift forward by `tokenizer.model_max_length - stride` tokens each step. + aggregation_strategy (`str`, *optional*, defaults to `"none"`): + The strategy to fuse (or not) tokens based on the model prediction. + + - "none" : Will simply not do any aggregation and simply return raw results from the model + - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, + I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D", + "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as + different entities. On word based languages, we might end up splitting words undesirably : Imagine + Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity": + "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages + that support that meaning, which is basically tokens separated by a space). These mitigations will + only work on real words, "New york" might still be tagged with two different entities. + - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot + end up with different tags. Words will simply use the tag of the first token of the word when there + is ambiguity. + - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words, + cannot end up with different tags. scores will be averaged first across tokens, and then the maximum + label is applied. + - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot + end up with different tags. Word entity will simply be the token with the maximum score.""", +) +class TokenClassificationPipeline(ChunkPipeline): + """ + Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition + examples](../task_summary#named-entity-recognition) for more information. + + Example: + + ```python + >>> from transformers import pipeline + + >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple") + >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal" + >>> tokens = token_classifier(sentence) + >>> tokens + [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}] + + >>> token = tokens[0] + >>> # Start and end provide an easy way to highlight words in the original text. + >>> sentence[token["start"] : token["end"]] + ' jean-baptiste' + + >>> # Some models use the same idea to do part of speech. + >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple") + >>> syntaxer("My name is Sarah and I live in London") + [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous). + + The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the + up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=token-classification). + """ + + default_input_names = "sequences" + + def __init__(self, args_parser=TokenClassificationArgumentHandler(), *args, **kwargs): + super().__init__(*args, **kwargs) + self.check_model_type( + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES + ) + + self._basic_tokenizer = BasicTokenizer(do_lower_case=False) + self._args_parser = args_parser + + def _sanitize_parameters( + self, + ignore_labels=None, + grouped_entities: Optional[bool] = None, + ignore_subwords: Optional[bool] = None, + aggregation_strategy: Optional[AggregationStrategy] = None, + offset_mapping: Optional[List[Tuple[int, int]]] = None, + stride: Optional[int] = None, + ): + preprocess_params = {} + if offset_mapping is not None: + preprocess_params["offset_mapping"] = offset_mapping + + postprocess_params = {} + if grouped_entities is not None or ignore_subwords is not None: + if grouped_entities and ignore_subwords: + aggregation_strategy = AggregationStrategy.FIRST + elif grouped_entities and not ignore_subwords: + aggregation_strategy = AggregationStrategy.SIMPLE + else: + aggregation_strategy = AggregationStrategy.NONE + + if grouped_entities is not None: + warnings.warn( + "`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to" + f' `aggregation_strategy="{aggregation_strategy}"` instead.' + ) + if ignore_subwords is not None: + warnings.warn( + "`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to" + f' `aggregation_strategy="{aggregation_strategy}"` instead.' + ) + + if aggregation_strategy is not None: + if isinstance(aggregation_strategy, str): + aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()] + if ( + aggregation_strategy + in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE} + and not self.tokenizer.is_fast + ): + raise ValueError( + "Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option" + ' to `"simple"` or use a fast tokenizer.' + ) + postprocess_params["aggregation_strategy"] = aggregation_strategy + if ignore_labels is not None: + postprocess_params["ignore_labels"] = ignore_labels + if stride is not None: + if stride >= self.tokenizer.model_max_length: + raise ValueError( + "`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)" + ) + if aggregation_strategy == AggregationStrategy.NONE: + raise ValueError( + "`stride` was provided to process all the text but `aggregation_strategy=" + f'"{aggregation_strategy}"`, please select another one instead.' + ) + else: + if self.tokenizer.is_fast: + tokenizer_params = { + "return_overflowing_tokens": True, + "padding": True, + "stride": stride, + } + preprocess_params["tokenizer_params"] = tokenizer_params + else: + raise ValueError( + "`stride` was provided to process all the text but you're using a slow tokenizer." + " Please use a fast tokenizer." + ) + return preprocess_params, {}, postprocess_params + + def __call__(self, inputs: Union[str, List[str]], **kwargs): + """ + Classify each token of the text(s) given as inputs. + + Args: + inputs (`str` or `List[str]`): + One or several texts (or one list of texts) for token classification. + + Return: + A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the + corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with + the following keys: + + - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you + want to have the exact string in the original sentence, use `start` and `end`. + - **score** (`float`) -- The corresponding probability for `entity`. + - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when + *aggregation_strategy* is not `"none"`. + - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding + token in the sentence. + - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only + exists if the offsets are available within the tokenizer + - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only + exists if the offsets are available within the tokenizer + """ + + _inputs, offset_mapping = self._args_parser(inputs, **kwargs) + if offset_mapping: + kwargs["offset_mapping"] = offset_mapping + + return super().__call__(inputs, **kwargs) + + def preprocess(self, sentence, offset_mapping=None, **preprocess_params): + tokenizer_params = preprocess_params.pop("tokenizer_params", {}) + truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False + inputs = self.tokenizer( + sentence, + return_tensors=self.framework, + truncation=truncation, + return_special_tokens_mask=True, + return_offsets_mapping=self.tokenizer.is_fast, + **tokenizer_params, + ) + inputs.pop("overflow_to_sample_mapping", None) + num_chunks = len(inputs["input_ids"]) + + for i in range(num_chunks): + if self.framework == "tf": + model_inputs = {k: tf.expand_dims(v[i], 0) for k, v in inputs.items()} + else: + model_inputs = {k: v[i].unsqueeze(0) for k, v in inputs.items()} + if offset_mapping is not None: + model_inputs["offset_mapping"] = offset_mapping + model_inputs["sentence"] = sentence if i == 0 else None + model_inputs["is_last"] = i == num_chunks - 1 + + yield model_inputs + + def _forward(self, model_inputs): + # Forward + special_tokens_mask = model_inputs.pop("special_tokens_mask") + offset_mapping = model_inputs.pop("offset_mapping", None) + sentence = model_inputs.pop("sentence") + is_last = model_inputs.pop("is_last") + if self.framework == "tf": + logits = self.model(**model_inputs)[0] + else: + output = self.model(**model_inputs) + logits = output["logits"] if isinstance(output, dict) else output[0] + + return { + "logits": logits, + "special_tokens_mask": special_tokens_mask, + "offset_mapping": offset_mapping, + "sentence": sentence, + "is_last": is_last, + **model_inputs, + } + + def postprocess(self, all_outputs, aggregation_strategy=AggregationStrategy.NONE, ignore_labels=None): + if ignore_labels is None: + ignore_labels = ["O"] + all_entities = [] + for model_outputs in all_outputs: + logits = model_outputs["logits"][0].numpy() + sentence = all_outputs[0]["sentence"] + input_ids = model_outputs["input_ids"][0] + offset_mapping = ( + model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None + ) + special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy() + + maxes = np.max(logits, axis=-1, keepdims=True) + shifted_exp = np.exp(logits - maxes) + scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) + + if self.framework == "tf": + input_ids = input_ids.numpy() + offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None + + pre_entities = self.gather_pre_entities( + sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy + ) + grouped_entities = self.aggregate(pre_entities, aggregation_strategy) + # Filter anything that is in self.ignore_labels + entities = [ + entity + for entity in grouped_entities + if entity.get("entity", None) not in ignore_labels + and entity.get("entity_group", None) not in ignore_labels + ] + all_entities.extend(entities) + num_chunks = len(all_outputs) + if num_chunks > 1: + all_entities = self.aggregate_overlapping_entities(all_entities) + return all_entities + + def aggregate_overlapping_entities(self, entities): + if len(entities) == 0: + return entities + entities = sorted(entities, key=lambda x: x["start"]) + aggregated_entities = [] + previous_entity = entities[0] + for entity in entities: + if previous_entity["start"] <= entity["start"] < previous_entity["end"]: + current_length = entity["end"] - entity["start"] + previous_length = previous_entity["end"] - previous_entity["start"] + if current_length > previous_length: + previous_entity = entity + elif current_length == previous_length and entity["score"] > previous_entity["score"]: + previous_entity = entity + else: + aggregated_entities.append(previous_entity) + previous_entity = entity + aggregated_entities.append(previous_entity) + return aggregated_entities + + def gather_pre_entities( + self, + sentence: str, + input_ids: np.ndarray, + scores: np.ndarray, + offset_mapping: Optional[List[Tuple[int, int]]], + special_tokens_mask: np.ndarray, + aggregation_strategy: AggregationStrategy, + ) -> List[dict]: + """Fuse various numpy arrays into dicts with all the information needed for aggregation""" + pre_entities = [] + for idx, token_scores in enumerate(scores): + # Filter special_tokens + if special_tokens_mask[idx]: + continue + + word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])) + if offset_mapping is not None: + start_ind, end_ind = offset_mapping[idx] + if not isinstance(start_ind, int): + if self.framework == "pt": + start_ind = start_ind.item() + end_ind = end_ind.item() + word_ref = sentence[start_ind:end_ind] + if getattr(self.tokenizer, "_tokenizer", None) and getattr( + self.tokenizer._tokenizer.model, "continuing_subword_prefix", None + ): + # This is a BPE, word aware tokenizer, there is a correct way + # to fuse tokens + is_subword = len(word) != len(word_ref) + else: + # This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered "words". Non word aware models cannot do better than this unfortunately. + if aggregation_strategy in { + AggregationStrategy.FIRST, + AggregationStrategy.AVERAGE, + AggregationStrategy.MAX, + }: + warnings.warn( + "Tokenizer does not support real words, using fallback heuristic", + UserWarning, + ) + is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1] + + if int(input_ids[idx]) == self.tokenizer.unk_token_id: + word = word_ref + is_subword = False + else: + start_ind = None + end_ind = None + is_subword = False + + pre_entity = { + "word": word, + "scores": token_scores, + "start": start_ind, + "end": end_ind, + "index": idx, + "is_subword": is_subword, + } + pre_entities.append(pre_entity) + return pre_entities + + def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: + if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}: + entities = [] + for pre_entity in pre_entities: + entity_idx = pre_entity["scores"].argmax() + score = pre_entity["scores"][entity_idx] + entity = { + "entity": self.model.config.id2label[entity_idx], + "score": score, + "index": pre_entity["index"], + "word": pre_entity["word"], + "start": pre_entity["start"], + "end": pre_entity["end"], + } + entities.append(entity) + else: + entities = self.aggregate_words(pre_entities, aggregation_strategy) + + if aggregation_strategy == AggregationStrategy.NONE: + return entities + return self.group_entities(entities) + + def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict: + word = self.tokenizer.convert_tokens_to_string([entity["word"] for entity in entities]) + if aggregation_strategy == AggregationStrategy.FIRST: + scores = entities[0]["scores"] + idx = scores.argmax() + score = scores[idx] + entity = self.model.config.id2label[idx] + elif aggregation_strategy == AggregationStrategy.MAX: + max_entity = max(entities, key=lambda entity: entity["scores"].max()) + scores = max_entity["scores"] + idx = scores.argmax() + score = scores[idx] + entity = self.model.config.id2label[idx] + elif aggregation_strategy == AggregationStrategy.AVERAGE: + scores = np.stack([entity["scores"] for entity in entities]) + average_scores = np.nanmean(scores, axis=0) + entity_idx = average_scores.argmax() + entity = self.model.config.id2label[entity_idx] + score = average_scores[entity_idx] + else: + raise ValueError("Invalid aggregation_strategy") + new_entity = { + "entity": entity, + "score": score, + "word": word, + "start": entities[0]["start"], + "end": entities[-1]["end"], + } + return new_entity + + def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: + """ + Override tokens from a given word that disagree to force agreement on word boundaries. + + Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| + company| B-ENT I-ENT + """ + if aggregation_strategy in { + AggregationStrategy.NONE, + AggregationStrategy.SIMPLE, + }: + raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation") + + word_entities = [] + word_group = None + for entity in entities: + if word_group is None: + word_group = [entity] + elif entity["is_subword"]: + word_group.append(entity) + else: + word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) + word_group = [entity] + # Last item + if word_group is not None: + word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) + return word_entities + + def group_sub_entities(self, entities: List[dict]) -> dict: + """ + Group together the adjacent tokens with the same entity predicted. + + Args: + entities (`dict`): The entities predicted by the pipeline. + """ + # Get the first entity in the entity group + entity = entities[0]["entity"].split("-", 1)[-1] + scores = np.nanmean([entity["score"] for entity in entities]) + tokens = [entity["word"] for entity in entities] + + entity_group = { + "entity_group": entity, + "score": np.mean(scores), + "word": self.tokenizer.convert_tokens_to_string(tokens), + "start": entities[0]["start"], + "end": entities[-1]["end"], + } + return entity_group + + def get_tag(self, entity_name: str) -> Tuple[str, str]: + if entity_name.startswith("B-"): + bi = "B" + tag = entity_name[2:] + elif entity_name.startswith("I-"): + bi = "I" + tag = entity_name[2:] + else: + # It's not in B-, I- format + # Default to I- for continuation. + bi = "I" + tag = entity_name + return bi, tag + + def group_entities(self, entities: List[dict]) -> List[dict]: + """ + Find and group together the adjacent tokens with the same entity predicted. + + Args: + entities (`dict`): The entities predicted by the pipeline. + """ + + entity_groups = [] + entity_group_disagg = [] + + for entity in entities: + if not entity_group_disagg: + entity_group_disagg.append(entity) + continue + + # If the current entity is similar and adjacent to the previous entity, + # append it to the disaggregated entity group + # The split is meant to account for the "B" and "I" prefixes + # Shouldn't merge if both entities are B-type + bi, tag = self.get_tag(entity["entity"]) + last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"]) + + if tag == last_tag and bi != "B": + # Modify subword type to be previous_type + entity_group_disagg.append(entity) + else: + # If the current entity is different from the previous entity + # aggregate the disaggregated entity group + entity_groups.append(self.group_sub_entities(entity_group_disagg)) + entity_group_disagg = [entity] + if entity_group_disagg: + # it's the last entity, add it to the entity groups + entity_groups.append(self.group_sub_entities(entity_group_disagg)) + + return entity_groups + + +NerPipeline = TokenClassificationPipeline diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/video_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/video_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..5702f23c5f60908740ac94f088efb4df5e86af3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/video_classification.py @@ -0,0 +1,140 @@ +from io import BytesIO +from typing import List, Union + +import requests + +from ..utils import ( + add_end_docstrings, + is_av_available, + is_torch_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_av_available(): + import av + import numpy as np + + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class VideoClassificationPipeline(Pipeline): + """ + Video classification pipeline using any `AutoModelForVideoClassification`. This pipeline predicts the class of a + video. + + This video classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"video-classification"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=video-classification). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "av") + self.check_model_type(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES) + + def _sanitize_parameters(self, top_k=None, num_frames=None, frame_sampling_rate=None): + preprocess_params = {} + if frame_sampling_rate is not None: + preprocess_params["frame_sampling_rate"] = frame_sampling_rate + if num_frames is not None: + preprocess_params["num_frames"] = num_frames + + postprocess_params = {} + if top_k is not None: + postprocess_params["top_k"] = top_k + return preprocess_params, {}, postprocess_params + + def __call__(self, videos: Union[str, List[str]], **kwargs): + """ + Assign labels to the video(s) passed as inputs. + + Args: + videos (`str`, `List[str]`): + The pipeline handles three types of videos: + + - A string containing a http link pointing to a video + - A string containing a local path to a video + + The pipeline accepts either a single video or a batch of videos, which must then be passed as a string. + Videos in a batch must all be in the same format: all as http links or all as local paths. + top_k (`int`, *optional*, defaults to 5): + The number of top labels that will be returned by the pipeline. If the provided number is higher than + the number of labels available in the model configuration, it will default to the number of labels. + num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`): + The number of frames sampled from the video to run the classification on. If not provided, will default + to the number of frames specified in the model configuration. + frame_sampling_rate (`int`, *optional*, defaults to 1): + The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every + frame will be used. + + Return: + A dictionary or a list of dictionaries containing result. If the input is a single video, will return a + dictionary, if the input is a list of several videos, will return a list of dictionaries corresponding to + the videos. + + The dictionaries contain the following keys: + + - **label** (`str`) -- The label identified by the model. + - **score** (`int`) -- The score attributed by the model for that label. + """ + return super().__call__(videos, **kwargs) + + def preprocess(self, video, num_frames=None, frame_sampling_rate=1): + if num_frames is None: + num_frames = self.model.config.num_frames + + if video.startswith("http://") or video.startswith("https://"): + video = BytesIO(requests.get(video).content) + + container = av.open(video) + + start_idx = 0 + end_idx = num_frames * frame_sampling_rate - 1 + indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64) + + video = read_video_pyav(container, indices) + video = list(video) + + model_inputs = self.image_processor(video, return_tensors=self.framework) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, top_k=5): + if top_k > self.model.config.num_labels: + top_k = self.model.config.num_labels + + if self.framework == "pt": + probs = model_outputs.logits.softmax(-1)[0] + scores, ids = probs.topk(top_k) + else: + raise ValueError(f"Unsupported framework: {self.framework}") + + scores = scores.tolist() + ids = ids.tolist() + return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] + + +def read_video_pyav(container, indices): + frames = [] + container.seek(0) + start_index = indices[0] + end_index = indices[-1] + for i, frame in enumerate(container.decode(video=0)): + if i > end_index: + break + if i >= start_index and i in indices: + frames.append(frame) + return np.stack([x.to_ndarray(format="rgb24") for x in frames]) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_audio_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..c3606e3c2b83dfa8e1dc1e7bc065933ee1cba7ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_audio_classification.py @@ -0,0 +1,161 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import UserDict +from typing import Union + +import numpy as np +import requests + +from ..utils import ( + add_end_docstrings, + logging, +) +from .audio_classification import ffmpeg_read +from .base import Pipeline, build_pipeline_init_args + + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True, has_tokenizer=True)) +class ZeroShotAudioClassificationPipeline(Pipeline): + """ + Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you + provide an audio and a set of `candidate_labels`. + + + + The default `hypothesis_template` is : `"This is a sound of {}."`. Make sure you update it for your usage. + + + + Example: + ```python + >>> from transformers import pipeline + >>> from datasets import load_dataset + + >>> dataset = load_dataset("ashraq/esc50") + >>> audio = next(iter(dataset["train"]["audio"]))["array"] + >>> classifier = pipeline(task="zero-shot-audio-classification", model="laion/clap-htsat-unfused") + >>> classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) + [{'score': 0.9996, 'label': 'Sound of a dog'}, {'score': 0.0004, 'label': 'Sound of vaccum cleaner'}] + ``` + + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This audio + classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-audio-classification"`. See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-audio-classification). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + if self.framework != "pt": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + # No specific FOR_XXX available yet + + def __call__(self, audios: Union[np.ndarray, bytes, str], **kwargs): + """ + Assign labels to the audio(s) passed as inputs. + + Args: + audios (`str`, `List[str]`, `np.array` or `List[np.array]`): + The pipeline handles three types of inputs: + - A string containing a http link pointing to an audio + - A string containing a local path to an audio + - An audio loaded in numpy + candidate_labels (`List[str]`): + The candidate labels for this audio + hypothesis_template (`str`, *optional*, defaults to `"This is a sound of {}"`): + The sentence used in cunjunction with *candidate_labels* to attempt the audio classification by + replacing the placeholder with the candidate_labels. Then likelihood is estimated by using + logits_per_audio + Return: + A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the + following keys: + - **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`. + - **score** (`float`) -- The score attributed by the model for that label (between 0 and 1). + """ + return super().__call__(audios, **kwargs) + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "candidate_labels" in kwargs: + preprocess_params["candidate_labels"] = kwargs["candidate_labels"] + if "hypothesis_template" in kwargs: + preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] + + return preprocess_params, {}, {} + + def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is a sound of {}."): + if isinstance(audio, str): + if audio.startswith("http://") or audio.startswith("https://"): + # We need to actually check for a real protocol, otherwise it's impossible to use a local file + # like http_huggingface_co.png + audio = requests.get(audio).content + else: + with open(audio, "rb") as f: + audio = f.read() + + if isinstance(audio, bytes): + audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate) + + if not isinstance(audio, np.ndarray): + raise ValueError("We expect a numpy ndarray as input") + if len(audio.shape) != 1: + raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline") + + inputs = self.feature_extractor( + [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" + ) + inputs["candidate_labels"] = candidate_labels + sequences = [hypothesis_template.format(x) for x in candidate_labels] + text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) + inputs["text_inputs"] = [text_inputs] + return inputs + + def _forward(self, model_inputs): + candidate_labels = model_inputs.pop("candidate_labels") + text_inputs = model_inputs.pop("text_inputs") + if isinstance(text_inputs[0], UserDict): + text_inputs = text_inputs[0] + else: + # Batching case. + text_inputs = text_inputs[0][0] + + outputs = self.model(**text_inputs, **model_inputs) + + model_outputs = { + "candidate_labels": candidate_labels, + "logits": outputs.logits_per_audio, + } + return model_outputs + + def postprocess(self, model_outputs): + candidate_labels = model_outputs.pop("candidate_labels") + logits = model_outputs["logits"][0] + + if self.framework == "pt": + probs = logits.softmax(dim=0) + scores = probs.tolist() + else: + raise ValueError("`tf` framework not supported.") + + result = [ + {"score": score, "label": candidate_label} + for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) + ] + return result diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..9a600bc8ad0fb850a29e53710238437d168521d0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_classification.py @@ -0,0 +1,265 @@ +import inspect +from typing import List, Union + +import numpy as np + +from ..tokenization_utils import TruncationStrategy +from ..utils import add_end_docstrings, logging +from .base import ArgumentHandler, ChunkPipeline, build_pipeline_init_args + + +logger = logging.get_logger(__name__) + + +class ZeroShotClassificationArgumentHandler(ArgumentHandler): + """ + Handles arguments for zero-shot for text classification by turning each possible label into an NLI + premise/hypothesis pair. + """ + + def _parse_labels(self, labels): + if isinstance(labels, str): + labels = [label.strip() for label in labels.split(",") if label.strip()] + return labels + + def __call__(self, sequences, labels, hypothesis_template): + if len(labels) == 0 or len(sequences) == 0: + raise ValueError("You must include at least one label and at least one sequence.") + if hypothesis_template.format(labels[0]) == hypothesis_template: + raise ValueError( + ( + 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' + "Make sure the passed template includes formatting syntax such as {{}} where the label should go." + ).format(hypothesis_template) + ) + + if isinstance(sequences, str): + sequences = [sequences] + + sequence_pairs = [] + for sequence in sequences: + sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels]) + + return sequence_pairs, sequences + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class ZeroShotClassificationPipeline(ChunkPipeline): + """ + NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` trained on NLI (natural + language inference) tasks. Equivalent of `text-classification` pipelines, but these models don't require a + hardcoded number of potential classes, they can be chosen at runtime. It usually means it's slower but it is + **much** more flexible. + + Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis + pair and passed to the pretrained model. Then, the logit for *entailment* is taken as the logit for the candidate + label being valid. Any NLI model can be used, but the id of the *entailment* label must be included in the model + config's :attr:*~transformers.PretrainedConfig.label2id*. + + Example: + + ```python + >>> from transformers import pipeline + + >>> oracle = pipeline(model="facebook/bart-large-mnli") + >>> oracle( + ... "I have a problem with my iphone that needs to be resolved asap!!", + ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], + ... ) + {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} + + >>> oracle( + ... "I have a problem with my iphone that needs to be resolved asap!!", + ... candidate_labels=["english", "german"], + ... ) + {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['english', 'german'], 'scores': [0.814, 0.186]} + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This NLI pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-classification"`. + + The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list + of available models on [huggingface.co/models](https://huggingface.co/models?search=nli). + """ + + def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs): + self._args_parser = args_parser + super().__init__(*args, **kwargs) + if self.entailment_id == -1: + logger.warning( + "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " + "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." + ) + + @property + def entailment_id(self): + for label, ind in self.model.config.label2id.items(): + if label.lower().startswith("entail"): + return ind + return -1 + + def _parse_and_tokenize( + self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs + ): + """ + Parse arguments and tokenize only_first so that hypothesis (label) is not truncated + """ + return_tensors = self.framework + if self.tokenizer.pad_token is None: + # Override for tokenizers not supporting padding + logger.error( + "Tokenizer was not supporting padding necessary for zero-shot, attempting to use " + " `pad_token=eos_token`" + ) + self.tokenizer.pad_token = self.tokenizer.eos_token + try: + inputs = self.tokenizer( + sequence_pairs, + add_special_tokens=add_special_tokens, + return_tensors=return_tensors, + padding=padding, + truncation=truncation, + ) + except Exception as e: + if "too short" in str(e): + # tokenizers might yell that we want to truncate + # to a value that is not even reached by the input. + # In that case we don't want to truncate. + # It seems there's not a really better way to catch that + # exception. + + inputs = self.tokenizer( + sequence_pairs, + add_special_tokens=add_special_tokens, + return_tensors=return_tensors, + padding=padding, + truncation=TruncationStrategy.DO_NOT_TRUNCATE, + ) + else: + raise e + + return inputs + + def _sanitize_parameters(self, **kwargs): + if kwargs.get("multi_class", None) is not None: + kwargs["multi_label"] = kwargs["multi_class"] + logger.warning( + "The `multi_class` argument has been deprecated and renamed to `multi_label`. " + "`multi_class` will be removed in a future version of Transformers." + ) + preprocess_params = {} + if "candidate_labels" in kwargs: + preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"]) + if "hypothesis_template" in kwargs: + preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] + + postprocess_params = {} + if "multi_label" in kwargs: + postprocess_params["multi_label"] = kwargs["multi_label"] + return preprocess_params, {}, postprocess_params + + def __call__( + self, + sequences: Union[str, List[str]], + *args, + **kwargs, + ): + """ + Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more + information. + + Args: + sequences (`str` or `List[str]`): + The sequence(s) to classify, will be truncated if the model input is too large. + candidate_labels (`str` or `List[str]`): + The set of possible class labels to classify each sequence into. Can be a single label, a string of + comma-separated labels, or a list of labels. + hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`): + The template used to turn each label into an NLI-style hypothesis. This template must include a {} or + similar syntax for the candidate label to be inserted into the template. For example, the default + template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the + model like `" sequence to classify This example is sports . "`. The default template + works well in many cases, but it may be worthwhile to experiment with different templates depending on + the task setting. + multi_label (`bool`, *optional*, defaults to `False`): + Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that + the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered + independent and probabilities are normalized for each candidate by doing a softmax of the entailment + score vs. the contradiction score. + + Return: + A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: + + - **sequence** (`str`) -- The sequence for which this is the output. + - **labels** (`List[str]`) -- The labels sorted by order of likelihood. + - **scores** (`List[float]`) -- The probabilities for each of the labels. + """ + if len(args) == 0: + pass + elif len(args) == 1 and "candidate_labels" not in kwargs: + kwargs["candidate_labels"] = args[0] + else: + raise ValueError(f"Unable to understand extra arguments {args}") + + return super().__call__(sequences, **kwargs) + + def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."): + sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template) + + for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)): + model_input = self._parse_and_tokenize([sequence_pair]) + + yield { + "candidate_label": candidate_label, + "sequence": sequences[0], + "is_last": i == len(candidate_labels) - 1, + **model_input, + } + + def _forward(self, inputs): + candidate_label = inputs["candidate_label"] + sequence = inputs["sequence"] + model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names} + # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported + model_forward = self.model.forward if self.framework == "pt" else self.model.call + if "use_cache" in inspect.signature(model_forward).parameters.keys(): + model_inputs["use_cache"] = False + outputs = self.model(**model_inputs) + + model_outputs = { + "candidate_label": candidate_label, + "sequence": sequence, + "is_last": inputs["is_last"], + **outputs, + } + return model_outputs + + def postprocess(self, model_outputs, multi_label=False): + candidate_labels = [outputs["candidate_label"] for outputs in model_outputs] + sequences = [outputs["sequence"] for outputs in model_outputs] + logits = np.concatenate([output["logits"].numpy() for output in model_outputs]) + N = logits.shape[0] + n = len(candidate_labels) + num_sequences = N // n + reshaped_outputs = logits.reshape((num_sequences, n, -1)) + + if multi_label or len(candidate_labels) == 1: + # softmax over the entailment vs. contradiction dim for each label independently + entailment_id = self.entailment_id + contradiction_id = -1 if entailment_id == 0 else 0 + entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]] + scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True) + scores = scores[..., 1] + else: + # softmax the "entailment" logits over all candidate labels + entail_logits = reshaped_outputs[..., self.entailment_id] + scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True) + + top_inds = list(reversed(scores[0].argsort())) + return { + "sequence": sequences[0], + "labels": [candidate_labels[i] for i in top_inds], + "scores": scores[0, top_inds].tolist(), + } diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_image_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..8e40d0e6a5cbfa4bb5938dfc9d7883071485aa77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_image_classification.py @@ -0,0 +1,170 @@ +from collections import UserDict +from typing import List, Union + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + from ..tf_utils import stable_softmax + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ZeroShotImageClassificationPipeline(Pipeline): + """ + Zero shot image classification pipeline using `CLIPModel`. This pipeline predicts the class of an image when you + provide an image and a set of `candidate_labels`. + + Example: + + ```python + >>> from transformers import pipeline + + >>> classifier = pipeline(model="google/siglip-so400m-patch14-384") + >>> classifier( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ... candidate_labels=["animals", "humans", "landscape"], + ... ) + [{'score': 0.965, 'label': 'animals'}, {'score': 0.03, 'label': 'humans'}, {'score': 0.005, 'label': 'landscape'}] + + >>> classifier( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ... candidate_labels=["black and white", "photorealist", "painting"], + ... ) + [{'score': 0.996, 'label': 'black and white'}, {'score': 0.003, 'label': 'photorealist'}, {'score': 0.0, 'label': 'painting'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-image-classification"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-image-classification). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + requires_backends(self, "vision") + self.check_model_type( + TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + if self.framework == "tf" + else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES + ) + + def __call__(self, images: Union[str, List[str], "Image", List["Image"]], **kwargs): + """ + Assign labels to the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + candidate_labels (`List[str]`): + The candidate labels for this image + + hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`): + The sentence used in cunjunction with *candidate_labels* to attempt the image classification by + replacing the placeholder with the candidate_labels. Then likelihood is estimated by using + logits_per_image + + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + Return: + A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the + following keys: + + - **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`. + - **score** (`float`) -- The score attributed by the model for that label (between 0 and 1). + """ + return super().__call__(images, **kwargs) + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "candidate_labels" in kwargs: + preprocess_params["candidate_labels"] = kwargs["candidate_labels"] + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + if "hypothesis_template" in kwargs: + preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] + + return preprocess_params, {}, {} + + def preprocess(self, image, candidate_labels=None, hypothesis_template="This is a photo of {}.", timeout=None): + image = load_image(image, timeout=timeout) + inputs = self.image_processor(images=[image], return_tensors=self.framework) + inputs["candidate_labels"] = candidate_labels + sequences = [hypothesis_template.format(x) for x in candidate_labels] + padding = "max_length" if self.model.config.model_type == "siglip" else True + text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=padding) + inputs["text_inputs"] = [text_inputs] + return inputs + + def _forward(self, model_inputs): + candidate_labels = model_inputs.pop("candidate_labels") + text_inputs = model_inputs.pop("text_inputs") + if isinstance(text_inputs[0], UserDict): + text_inputs = text_inputs[0] + else: + # Batching case. + text_inputs = text_inputs[0][0] + + outputs = self.model(**text_inputs, **model_inputs) + + model_outputs = { + "candidate_labels": candidate_labels, + "logits": outputs.logits_per_image, + } + return model_outputs + + def postprocess(self, model_outputs): + candidate_labels = model_outputs.pop("candidate_labels") + logits = model_outputs["logits"][0] + if self.framework == "pt" and self.model.config.model_type == "siglip": + probs = torch.sigmoid(logits).squeeze(-1) + scores = probs.tolist() + if not isinstance(scores, list): + scores = [scores] + elif self.framework == "pt": + probs = logits.softmax(dim=-1).squeeze(-1) + scores = probs.tolist() + if not isinstance(scores, list): + scores = [scores] + elif self.framework == "tf": + probs = stable_softmax(logits, axis=-1) + scores = probs.numpy().tolist() + else: + raise ValueError(f"Unsupported framework: {self.framework}") + + result = [ + {"score": score, "label": candidate_label} + for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) + ] + return result diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_object_detection.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..5be89332cbd910fe182600c11a58d5f1994dd46b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/zero_shot_object_detection.py @@ -0,0 +1,218 @@ +from typing import Any, Dict, List, Union + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends +from .base import ChunkPipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from transformers.modeling_outputs import BaseModelOutput + + from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +class ZeroShotObjectDetectionPipeline(ChunkPipeline): + """ + Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of + objects when you provide an image and a set of `candidate_labels`. + + Example: + + ```python + >>> from transformers import pipeline + + >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") + >>> detector( + ... "http://images.cocodataset.org/val2017/000000039769.jpg", + ... candidate_labels=["cat", "couch"], + ... ) + [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}] + + >>> detector( + ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", + ... candidate_labels=["head", "bird"], + ... ) + [{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-object-detection"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + if self.framework == "tf": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + requires_backends(self, "vision") + self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES) + + def __call__( + self, + image: Union[str, "Image.Image", List[Dict[str, Any]]], + candidate_labels: Union[str, List[str]] = None, + **kwargs, + ): + """ + Detect objects (bounding boxes & classes) in the image(s) passed as inputs. + + Args: + image (`str`, `PIL.Image` or `List[Dict[str, Any]]`): + The pipeline handles three types of images: + + - A string containing an http url pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + You can use this parameter to send directly a list of images, or a dataset or a generator like so: + + ```python + >>> from transformers import pipeline + + >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") + >>> detector( + ... [ + ... { + ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", + ... "candidate_labels": ["cat", "couch"], + ... }, + ... { + ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", + ... "candidate_labels": ["cat", "couch"], + ... }, + ... ] + ... ) + [[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]] + ``` + + + candidate_labels (`str` or `List[str]` or `List[List[str]]`): + What the model should recognize in the image. + + threshold (`float`, *optional*, defaults to 0.1): + The probability necessary to make a prediction. + + top_k (`int`, *optional*, defaults to None): + The number of top predictions that will be returned by the pipeline. If the provided number is `None` + or higher than the number of predictions available, it will default to the number of predictions. + + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + + + Return: + A list of lists containing prediction results, one list per input image. Each list contains dictionaries + with the following keys: + + - **label** (`str`) -- Text query corresponding to the found object. + - **score** (`float`) -- Score corresponding to the object (between 0 and 1). + - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a + dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys. + """ + if "text_queries" in kwargs: + candidate_labels = kwargs.pop("text_queries") + + if isinstance(image, (str, Image.Image)): + inputs = {"image": image, "candidate_labels": candidate_labels} + else: + inputs = image + results = super().__call__(inputs, **kwargs) + return results + + def _sanitize_parameters(self, **kwargs): + preprocess_params = {} + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + postprocess_params = {} + if "threshold" in kwargs: + postprocess_params["threshold"] = kwargs["threshold"] + if "top_k" in kwargs: + postprocess_params["top_k"] = kwargs["top_k"] + return preprocess_params, {}, postprocess_params + + def preprocess(self, inputs, timeout=None): + image = load_image(inputs["image"], timeout=timeout) + candidate_labels = inputs["candidate_labels"] + if isinstance(candidate_labels, str): + candidate_labels = candidate_labels.split(",") + + target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32) + for i, candidate_label in enumerate(candidate_labels): + text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework) + image_features = self.image_processor(image, return_tensors=self.framework) + yield { + "is_last": i == len(candidate_labels) - 1, + "target_size": target_size, + "candidate_label": candidate_label, + **text_inputs, + **image_features, + } + + def _forward(self, model_inputs): + target_size = model_inputs.pop("target_size") + candidate_label = model_inputs.pop("candidate_label") + is_last = model_inputs.pop("is_last") + + outputs = self.model(**model_inputs) + + model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} + return model_outputs + + def postprocess(self, model_outputs, threshold=0.1, top_k=None): + results = [] + for model_output in model_outputs: + label = model_output["candidate_label"] + model_output = BaseModelOutput(model_output) + outputs = self.image_processor.post_process_object_detection( + outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"] + )[0] + + for index in outputs["scores"].nonzero(): + score = outputs["scores"][index].item() + box = self._get_bounding_box(outputs["boxes"][index][0]) + + result = {"score": score, "label": label, "box": box} + results.append(result) + + results = sorted(results, key=lambda x: x["score"], reverse=True) + if top_k: + results = results[:top_k] + + return results + + def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: + """ + Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } + + Args: + box (`torch.Tensor`): Tensor containing the coordinates in corners format. + + Returns: + bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. + """ + if self.framework != "pt": + raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") + xmin, ymin, xmax, ymax = box.int().tolist() + bbox = { + "xmin": xmin, + "ymin": ymin, + "xmax": xmax, + "ymax": ymax, + } + return bbox