diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..950365a836985fcca12bfb3ce2e37671cc9b02c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/cmd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/cmd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c1b22014662995ba309bdefea4c64502de36129 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/cmd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/core.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f9345581316757f3b7a3baea31bbcdc555d6ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/core.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4286ab986cfc491fd2208ba5a34c49d7e9c3d499 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb9dfe4abab23a561dcb47c20544b32f9a1110a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd563658165ada19a73a679872fdece568da1c22 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/errors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd33cd956f73fe541c4e196da3ab8660d8397dcb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/errors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/extension.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d214a10943252918384b5d3f114efcffde346ab5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/extension.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f0f40de10d611a55c68895ec02207b30f9905be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/file_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/file_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..020ef8a1530b69665d28473ebfc3092e7f79dcc0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/file_util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/filelist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/filelist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba0ddac94097885fb9f06ca40ae5f272bac798e8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/filelist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/log.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e761c9cc002ed66a8f0633dcfdd29edaba3c86ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/log.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/msvccompiler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/msvccompiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..403bbbaefdf66ab0438ea0199f85ecbeec26c3f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/msvccompiler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/py35compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/py35compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd268519ea5fd24c1ff9479006fc18b4082fb5a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/py35compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/py38compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/py38compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a9c9b23247c1b215dde23e5007770c87287067 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/py38compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/spawn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84868d92162f28d8cae8c37b431d26c208987203 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/spawn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/text_file.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/text_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76eab10fbf252474f22f94ea65dba11ba1a6c9b8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/text_file.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01eb862dbfaeebd6420caef2123fbe67fa77fd8c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4310292e3e9d2720b6e4389d3a1f8768b2098834 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a011e4fffcebcc1f0d586ff9567b765b28c8d1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d82e1b917b2b8159d7cea55c8dd2e8b3f84a3b7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_dumb.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_dumb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc194d96c1f957907d9ad2bf1a47416aaa372d92 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_dumb.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_msi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_msi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c14c9011a63e2dde75cf12ae61748f582a28218 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_msi.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_rpm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_rpm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2b57b6ba257a8bbd6675520ee2dc3f9e05b9640 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_rpm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_wininst.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_wininst.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11431a7b6802233bfca7facf2c381805c173425b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/bdist_wininst.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3786a5ba3d267c2015750a9ee1530be8fb2702a1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_ext.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_ext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1433208b3b6b42c797af2f4cd66e2157c3bf4e61 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_ext.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_py.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17f09b4e4ad44781f126c18946026fafc7dfc622 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_py.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_scripts.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_scripts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24bd91d3184f828f629eafaf39a7d02d9d13b1dc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/build_scripts.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/check.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec1c9f925dc0e7fcb53215a399cac81438c4a697 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/check.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7628989e41f04b4ac41fb4a65c1aaa7f7d9dcfb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c61ff8798e2ee0cd4b3bed239cf410ac4b50dce9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23b91fcab771105b5f1e9e2adeabc71fa13915b1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_egg_info.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_egg_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c09b24a7423fbe3ed3be92ff3ee65854ac6ce6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_egg_info.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_lib.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e153b6f3f179c794f4e39f1eb77181d158e6ebd0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_lib.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_scripts.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_scripts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ad83c46be0e3926691ace74471dab3c16e2cb3b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/install_scripts.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/py37compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/py37compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc8a97a568041b6eb52e5d573894667c41b3a5f2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/py37compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/register.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/register.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce98c427cf51832af6f5d36e6c303f47f34a5eb2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/register.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/sdist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/sdist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46227a15add73544aa4404b25660df474de331a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/sdist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/upload.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/upload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6cb200e3da7bb563e60170d7801acafec2adca0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/__pycache__/upload.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_msi.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_msi.py new file mode 100644 index 0000000000000000000000000000000000000000..0863a1883e72058a8701a946c644276f047f837e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_msi.py @@ -0,0 +1,749 @@ +# Copyright (C) 2005, 2006 Martin von Löwis +# Licensed to PSF under a Contributor Agreement. +# The bdist_wininst command proper +# based on bdist_wininst +""" +Implements the bdist_msi command. +""" + +import os +import sys +import warnings +from distutils.core import Command +from distutils.dir_util import remove_tree +from distutils.sysconfig import get_python_version +from distutils.version import StrictVersion +from distutils.errors import DistutilsOptionError +from distutils.util import get_platform +from distutils import log +import msilib +from msilib import schema, sequence, text +from msilib import Directory, Feature, Dialog, add_data + +class PyDialog(Dialog): + """Dialog class with a fixed layout: controls at the top, then a ruler, + then a list of buttons: back, next, cancel. Optionally a bitmap at the + left.""" + def __init__(self, *args, **kw): + """Dialog(database, name, x, y, w, h, attributes, title, first, + default, cancel, bitmap=true)""" + Dialog.__init__(self, *args) + ruler = self.h - 36 + bmwidth = 152*ruler/328 + #if kw.get("bitmap", True): + # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin") + self.line("BottomLine", 0, ruler, self.w, 0) + + def title(self, title): + "Set the title text of the dialog at the top." + # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix, + # text, in VerdanaBold10 + self.text("Title", 15, 10, 320, 60, 0x30003, + r"{\VerdanaBold10}%s" % title) + + def back(self, title, next, name = "Back", active = 1): + """Add a back button with a given title, the tab-next button, + its name in the Control table, possibly initially disabled. + + Return the button, so that events can be associated""" + if active: + flags = 3 # Visible|Enabled + else: + flags = 1 # Visible + return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next) + + def cancel(self, title, next, name = "Cancel", active = 1): + """Add a cancel button with a given title, the tab-next button, + its name in the Control table, possibly initially disabled. + + Return the button, so that events can be associated""" + if active: + flags = 3 # Visible|Enabled + else: + flags = 1 # Visible + return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next) + + def next(self, title, next, name = "Next", active = 1): + """Add a Next button with a given title, the tab-next button, + its name in the Control table, possibly initially disabled. + + Return the button, so that events can be associated""" + if active: + flags = 3 # Visible|Enabled + else: + flags = 1 # Visible + return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next) + + def xbutton(self, name, title, next, xpos): + """Add a button with a given title, the tab-next button, + its name in the Control table, giving its x position; the + y-position is aligned with the other buttons. + + Return the button, so that events can be associated""" + return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next) + +class bdist_msi(Command): + + description = "create a Microsoft Installer (.msi) binary distribution" + + user_options = [('bdist-dir=', None, + "temporary directory for creating the distribution"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_platform()), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('target-version=', None, + "require a specific python version" + + " on the target system"), + ('no-target-compile', 'c', + "do not compile .py to .pyc on the target system"), + ('no-target-optimize', 'o', + "do not compile .py to .pyo (optimized) " + "on the target system"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ('install-script=', None, + "basename of installation script to be run after " + "installation or before deinstallation"), + ('pre-install-script=', None, + "Fully qualified filename of a script to be run before " + "any files are installed. This script need not be in the " + "distribution"), + ] + + boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', + 'skip-build'] + + all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4', + '2.5', '2.6', '2.7', '2.8', '2.9', + '3.0', '3.1', '3.2', '3.3', '3.4', + '3.5', '3.6', '3.7', '3.8', '3.9'] + other_version = 'X' + + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + warnings.warn("bdist_msi command is deprecated since Python 3.9, " + "use bdist_wheel (wheel packages) instead", + DeprecationWarning, 2) + + def initialize_options(self): + self.bdist_dir = None + self.plat_name = None + self.keep_temp = 0 + self.no_target_compile = 0 + self.no_target_optimize = 0 + self.target_version = None + self.dist_dir = None + self.skip_build = None + self.install_script = None + self.pre_install_script = None + self.versions = None + + def finalize_options(self): + self.set_undefined_options('bdist', ('skip_build', 'skip_build')) + + if self.bdist_dir is None: + bdist_base = self.get_finalized_command('bdist').bdist_base + self.bdist_dir = os.path.join(bdist_base, 'msi') + + short_version = get_python_version() + if (not self.target_version) and self.distribution.has_ext_modules(): + self.target_version = short_version + + if self.target_version: + self.versions = [self.target_version] + if not self.skip_build and self.distribution.has_ext_modules()\ + and self.target_version != short_version: + raise DistutilsOptionError( + "target version can only be %s, or the '--skip-build'" + " option must be specified" % (short_version,)) + else: + self.versions = list(self.all_versions) + + self.set_undefined_options('bdist', + ('dist_dir', 'dist_dir'), + ('plat_name', 'plat_name'), + ) + + if self.pre_install_script: + raise DistutilsOptionError( + "the pre-install-script feature is not yet implemented") + + if self.install_script: + for script in self.distribution.scripts: + if self.install_script == os.path.basename(script): + break + else: + raise DistutilsOptionError( + "install_script '%s' not found in scripts" + % self.install_script) + self.install_script_key = None + + def run(self): + if not self.skip_build: + self.run_command('build') + + install = self.reinitialize_command('install', reinit_subcommands=1) + install.prefix = self.bdist_dir + install.skip_build = self.skip_build + install.warn_dir = 0 + + install_lib = self.reinitialize_command('install_lib') + # we do not want to include pyc or pyo files + install_lib.compile = 0 + install_lib.optimize = 0 + + if self.distribution.has_ext_modules(): + # If we are building an installer for a Python version other + # than the one we are currently running, then we need to ensure + # our build_lib reflects the other Python version rather than ours. + # Note that for target_version!=sys.version, we must have skipped the + # build step, so there is no issue with enforcing the build of this + # version. + target_version = self.target_version + if not target_version: + assert self.skip_build, "Should have already checked this" + target_version = '%d.%d' % sys.version_info[:2] + plat_specifier = ".%s-%s" % (self.plat_name, target_version) + build = self.get_finalized_command('build') + build.build_lib = os.path.join(build.build_base, + 'lib' + plat_specifier) + + log.info("installing to %s", self.bdist_dir) + install.ensure_finalized() + + # avoid warning of 'install_lib' about installing + # into a directory not in sys.path + sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) + + install.run() + + del sys.path[0] + + self.mkpath(self.dist_dir) + fullname = self.distribution.get_fullname() + installer_name = self.get_installer_filename(fullname) + installer_name = os.path.abspath(installer_name) + if os.path.exists(installer_name): os.unlink(installer_name) + + metadata = self.distribution.metadata + author = metadata.author + if not author: + author = metadata.maintainer + if not author: + author = "UNKNOWN" + version = metadata.get_version() + # ProductVersion must be strictly numeric + # XXX need to deal with prerelease versions + sversion = "%d.%d.%d" % StrictVersion(version).version + # Prefix ProductName with Python x.y, so that + # it sorts together with the other Python packages + # in Add-Remove-Programs (APR) + fullname = self.distribution.get_fullname() + if self.target_version: + product_name = "Python %s %s" % (self.target_version, fullname) + else: + product_name = "Python %s" % (fullname) + self.db = msilib.init_database(installer_name, schema, + product_name, msilib.gen_uuid(), + sversion, author) + msilib.add_tables(self.db, sequence) + props = [('DistVersion', version)] + email = metadata.author_email or metadata.maintainer_email + if email: + props.append(("ARPCONTACT", email)) + if metadata.url: + props.append(("ARPURLINFOABOUT", metadata.url)) + if props: + add_data(self.db, 'Property', props) + + self.add_find_python() + self.add_files() + self.add_scripts() + self.add_ui() + self.db.Commit() + + if hasattr(self.distribution, 'dist_files'): + tup = 'bdist_msi', self.target_version or 'any', fullname + self.distribution.dist_files.append(tup) + + if not self.keep_temp: + remove_tree(self.bdist_dir, dry_run=self.dry_run) + + def add_files(self): + db = self.db + cab = msilib.CAB("distfiles") + rootdir = os.path.abspath(self.bdist_dir) + + root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir") + f = Feature(db, "Python", "Python", "Everything", + 0, 1, directory="TARGETDIR") + + items = [(f, root, '')] + for version in self.versions + [self.other_version]: + target = "TARGETDIR" + version + name = default = "Python" + version + desc = "Everything" + if version is self.other_version: + title = "Python from another location" + level = 2 + else: + title = "Python %s from registry" % version + level = 1 + f = Feature(db, name, title, desc, 1, level, directory=target) + dir = Directory(db, cab, root, rootdir, target, default) + items.append((f, dir, version)) + db.Commit() + + seen = {} + for feature, dir, version in items: + todo = [dir] + while todo: + dir = todo.pop() + for file in os.listdir(dir.absolute): + afile = os.path.join(dir.absolute, file) + if os.path.isdir(afile): + short = "%s|%s" % (dir.make_short(file), file) + default = file + version + newdir = Directory(db, cab, dir, file, default, short) + todo.append(newdir) + else: + if not dir.component: + dir.start_component(dir.logical, feature, 0) + if afile not in seen: + key = seen[afile] = dir.add_file(file) + if file==self.install_script: + if self.install_script_key: + raise DistutilsOptionError( + "Multiple files with name %s" % file) + self.install_script_key = '[#%s]' % key + else: + key = seen[afile] + add_data(self.db, "DuplicateFile", + [(key + version, dir.component, key, None, dir.logical)]) + db.Commit() + cab.commit(db) + + def add_find_python(self): + """Adds code to the installer to compute the location of Python. + + Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the + registry for each version of Python. + + Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, + else from PYTHON.MACHINE.X.Y. + + Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" + + start = 402 + for ver in self.versions: + install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver + machine_reg = "python.machine." + ver + user_reg = "python.user." + ver + machine_prop = "PYTHON.MACHINE." + ver + user_prop = "PYTHON.USER." + ver + machine_action = "PythonFromMachine" + ver + user_action = "PythonFromUser" + ver + exe_action = "PythonExe" + ver + target_dir_prop = "TARGETDIR" + ver + exe_prop = "PYTHON" + ver + if msilib.Win64: + # type: msidbLocatorTypeRawValue + msidbLocatorType64bit + Type = 2+16 + else: + Type = 2 + add_data(self.db, "RegLocator", + [(machine_reg, 2, install_path, None, Type), + (user_reg, 1, install_path, None, Type)]) + add_data(self.db, "AppSearch", + [(machine_prop, machine_reg), + (user_prop, user_reg)]) + add_data(self.db, "CustomAction", + [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), + (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), + (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), + ]) + add_data(self.db, "InstallExecuteSequence", + [(machine_action, machine_prop, start), + (user_action, user_prop, start + 1), + (exe_action, None, start + 2), + ]) + add_data(self.db, "InstallUISequence", + [(machine_action, machine_prop, start), + (user_action, user_prop, start + 1), + (exe_action, None, start + 2), + ]) + add_data(self.db, "Condition", + [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) + start += 4 + assert start < 500 + + def add_scripts(self): + if self.install_script: + start = 6800 + for ver in self.versions + [self.other_version]: + install_action = "install_script." + ver + exe_prop = "PYTHON" + ver + add_data(self.db, "CustomAction", + [(install_action, 50, exe_prop, self.install_script_key)]) + add_data(self.db, "InstallExecuteSequence", + [(install_action, "&Python%s=3" % ver, start)]) + start += 1 + # XXX pre-install scripts are currently refused in finalize_options() + # but if this feature is completed, it will also need to add + # entries for each version as the above code does + if self.pre_install_script: + scriptfn = os.path.join(self.bdist_dir, "preinstall.bat") + with open(scriptfn, "w") as f: + # The batch file will be executed with [PYTHON], so that %1 + # is the path to the Python interpreter; %0 will be the path + # of the batch file. + # rem =""" + # %1 %0 + # exit + # """ + # + f.write('rem ="""\n%1 %0\nexit\n"""\n') + with open(self.pre_install_script) as fin: + f.write(fin.read()) + add_data(self.db, "Binary", + [("PreInstall", msilib.Binary(scriptfn)) + ]) + add_data(self.db, "CustomAction", + [("PreInstall", 2, "PreInstall", None) + ]) + add_data(self.db, "InstallExecuteSequence", + [("PreInstall", "NOT Installed", 450)]) + + + def add_ui(self): + db = self.db + x = y = 50 + w = 370 + h = 300 + title = "[ProductName] Setup" + + # see "Dialog Style Bits" + modal = 3 # visible | modal + modeless = 1 # visible + track_disk_space = 32 + + # UI customization properties + add_data(db, "Property", + # See "DefaultUIFont Property" + [("DefaultUIFont", "DlgFont8"), + # See "ErrorDialog Style Bit" + ("ErrorDialog", "ErrorDlg"), + ("Progress1", "Install"), # modified in maintenance type dlg + ("Progress2", "installs"), + ("MaintenanceForm_Action", "Repair"), + # possible values: ALL, JUSTME + ("WhichUsers", "ALL") + ]) + + # Fonts, see "TextStyle Table" + add_data(db, "TextStyle", + [("DlgFont8", "Tahoma", 9, None, 0), + ("DlgFontBold8", "Tahoma", 8, None, 1), #bold + ("VerdanaBold10", "Verdana", 10, None, 1), + ("VerdanaRed9", "Verdana", 9, 255, 0), + ]) + + # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table" + # Numbers indicate sequence; see sequence.py for how these action integrate + add_data(db, "InstallUISequence", + [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140), + ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141), + # In the user interface, assume all-users installation if privileged. + ("SelectFeaturesDlg", "Not Installed", 1230), + # XXX no support for resume installations yet + #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240), + ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250), + ("ProgressDlg", None, 1280)]) + + add_data(db, 'ActionText', text.ActionText) + add_data(db, 'UIText', text.UIText) + ##################################################################### + # Standard dialogs: FatalError, UserExit, ExitDialog + fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title, + "Finish", "Finish", "Finish") + fatal.title("[ProductName] Installer ended prematurely") + fatal.back("< Back", "Finish", active = 0) + fatal.cancel("Cancel", "Back", active = 0) + fatal.text("Description1", 15, 70, 320, 80, 0x30003, + "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.") + fatal.text("Description2", 15, 155, 320, 20, 0x30003, + "Click the Finish button to exit the Installer.") + c=fatal.next("Finish", "Cancel", name="Finish") + c.event("EndDialog", "Exit") + + user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title, + "Finish", "Finish", "Finish") + user_exit.title("[ProductName] Installer was interrupted") + user_exit.back("< Back", "Finish", active = 0) + user_exit.cancel("Cancel", "Back", active = 0) + user_exit.text("Description1", 15, 70, 320, 80, 0x30003, + "[ProductName] setup was interrupted. Your system has not been modified. " + "To install this program at a later time, please run the installation again.") + user_exit.text("Description2", 15, 155, 320, 20, 0x30003, + "Click the Finish button to exit the Installer.") + c = user_exit.next("Finish", "Cancel", name="Finish") + c.event("EndDialog", "Exit") + + exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title, + "Finish", "Finish", "Finish") + exit_dialog.title("Completing the [ProductName] Installer") + exit_dialog.back("< Back", "Finish", active = 0) + exit_dialog.cancel("Cancel", "Back", active = 0) + exit_dialog.text("Description", 15, 235, 320, 20, 0x30003, + "Click the Finish button to exit the Installer.") + c = exit_dialog.next("Finish", "Cancel", name="Finish") + c.event("EndDialog", "Return") + + ##################################################################### + # Required dialog: FilesInUse, ErrorDlg + inuse = PyDialog(db, "FilesInUse", + x, y, w, h, + 19, # KeepModeless|Modal|Visible + title, + "Retry", "Retry", "Retry", bitmap=False) + inuse.text("Title", 15, 6, 200, 15, 0x30003, + r"{\DlgFontBold8}Files in Use") + inuse.text("Description", 20, 23, 280, 20, 0x30003, + "Some files that need to be updated are currently in use.") + inuse.text("Text", 20, 55, 330, 50, 3, + "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.") + inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess", + None, None, None) + c=inuse.back("Exit", "Ignore", name="Exit") + c.event("EndDialog", "Exit") + c=inuse.next("Ignore", "Retry", name="Ignore") + c.event("EndDialog", "Ignore") + c=inuse.cancel("Retry", "Exit", name="Retry") + c.event("EndDialog","Retry") + + # See "Error Dialog". See "ICE20" for the required names of the controls. + error = Dialog(db, "ErrorDlg", + 50, 10, 330, 101, + 65543, # Error|Minimize|Modal|Visible + title, + "ErrorText", None, None) + error.text("ErrorText", 50,9,280,48,3, "") + #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None) + error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo") + error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes") + error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort") + error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel") + error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore") + error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk") + error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry") + + ##################################################################### + # Global "Query Cancel" dialog + cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title, + "No", "No", "No") + cancel.text("Text", 48, 15, 194, 30, 3, + "Are you sure you want to cancel [ProductName] installation?") + #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None, + # "py.ico", None, None) + c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No") + c.event("EndDialog", "Exit") + + c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes") + c.event("EndDialog", "Return") + + ##################################################################### + # Global "Wait for costing" dialog + costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title, + "Return", "Return", "Return") + costing.text("Text", 48, 15, 194, 30, 3, + "Please wait while the installer finishes determining your disk space requirements.") + c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None) + c.event("EndDialog", "Exit") + + ##################################################################### + # Preparation dialog: no user input except cancellation + prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title, + "Cancel", "Cancel", "Cancel") + prep.text("Description", 15, 70, 320, 40, 0x30003, + "Please wait while the Installer prepares to guide you through the installation.") + prep.title("Welcome to the [ProductName] Installer") + c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...") + c.mapping("ActionText", "Text") + c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None) + c.mapping("ActionData", "Text") + prep.back("Back", None, active=0) + prep.next("Next", None, active=0) + c=prep.cancel("Cancel", None) + c.event("SpawnDialog", "CancelDlg") + + ##################################################################### + # Feature (Python directory) selection + seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title, + "Next", "Next", "Cancel") + seldlg.title("Select Python Installations") + + seldlg.text("Hint", 15, 30, 300, 20, 3, + "Select the Python locations where %s should be installed." + % self.distribution.get_fullname()) + + seldlg.back("< Back", None, active=0) + c = seldlg.next("Next >", "Cancel") + order = 1 + c.event("[TARGETDIR]", "[SourceDir]", ordering=order) + for version in self.versions + [self.other_version]: + order += 1 + c.event("[TARGETDIR]", "[TARGETDIR%s]" % version, + "FEATURE_SELECTED AND &Python%s=3" % version, + ordering=order) + c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1) + c.event("EndDialog", "Return", ordering=order + 2) + c = seldlg.cancel("Cancel", "Features") + c.event("SpawnDialog", "CancelDlg") + + c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3, + "FEATURE", None, "PathEdit", None) + c.event("[FEATURE_SELECTED]", "1") + ver = self.other_version + install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver + dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver + + c = seldlg.text("Other", 15, 200, 300, 15, 3, + "Provide an alternate Python location") + c.condition("Enable", install_other_cond) + c.condition("Show", install_other_cond) + c.condition("Disable", dont_install_other_cond) + c.condition("Hide", dont_install_other_cond) + + c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1, + "TARGETDIR" + ver, None, "Next", None) + c.condition("Enable", install_other_cond) + c.condition("Show", install_other_cond) + c.condition("Disable", dont_install_other_cond) + c.condition("Hide", dont_install_other_cond) + + ##################################################################### + # Disk cost + cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, + "OK", "OK", "OK", bitmap=False) + cost.text("Title", 15, 6, 200, 15, 0x30003, + r"{\DlgFontBold8}Disk Space Requirements") + cost.text("Description", 20, 20, 280, 20, 0x30003, + "The disk space required for the installation of the selected features.") + cost.text("Text", 20, 53, 330, 60, 3, + "The highlighted volumes (if any) do not have enough disk space " + "available for the currently selected features. You can either " + "remove some files from the highlighted volumes, or choose to " + "install less features onto local drive(s), or select different " + "destination drive(s).") + cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223, + None, "{120}{70}{70}{70}{70}", None, None) + cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return") + + ##################################################################### + # WhichUsers Dialog. Only available on NT, and for privileged users. + # This must be run before FindRelatedProducts, because that will + # take into account whether the previous installation was per-user + # or per-machine. We currently don't support going back to this + # dialog after "Next" was selected; to support this, we would need to + # find how to reset the ALLUSERS property, and how to re-run + # FindRelatedProducts. + # On Windows9x, the ALLUSERS property is ignored on the command line + # and in the Property table, but installer fails according to the documentation + # if a dialog attempts to set ALLUSERS. + whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title, + "AdminInstall", "Next", "Cancel") + whichusers.title("Select whether to install [ProductName] for all users of this computer.") + # A radio group with two options: allusers, justme + g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3, + "WhichUsers", "", "Next") + g.add("ALL", 0, 5, 150, 20, "Install for all users") + g.add("JUSTME", 0, 25, 150, 20, "Install just for me") + + whichusers.back("Back", None, active=0) + + c = whichusers.next("Next >", "Cancel") + c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1) + c.event("EndDialog", "Return", ordering = 2) + + c = whichusers.cancel("Cancel", "AdminInstall") + c.event("SpawnDialog", "CancelDlg") + + ##################################################################### + # Installation Progress dialog (modeless) + progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, + "Cancel", "Cancel", "Cancel", bitmap=False) + progress.text("Title", 20, 15, 200, 15, 0x30003, + r"{\DlgFontBold8}[Progress1] [ProductName]") + progress.text("Text", 35, 65, 300, 30, 3, + "Please wait while the Installer [Progress2] [ProductName]. " + "This may take several minutes.") + progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:") + + c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...") + c.mapping("ActionText", "Text") + + #c=progress.text("ActionData", 35, 140, 300, 20, 3, None) + #c.mapping("ActionData", "Text") + + c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537, + None, "Progress done", None, None) + c.mapping("SetProgress", "Progress") + + progress.back("< Back", "Next", active=False) + progress.next("Next >", "Cancel", active=False) + progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg") + + ################################################################### + # Maintenance type: repair/uninstall + maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title, + "Next", "Next", "Cancel") + maint.title("Welcome to the [ProductName] Setup Wizard") + maint.text("BodyText", 15, 63, 330, 42, 3, + "Select whether you want to repair or remove [ProductName].") + g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3, + "MaintenanceForm_Action", "", "Next") + #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]") + g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]") + g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]") + + maint.back("< Back", None, active=False) + c=maint.next("Finish", "Cancel") + # Change installation: Change progress dialog to "Change", then ask + # for feature selection + #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1) + #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2) + + # Reinstall: Change progress dialog to "Repair", then invoke reinstall + # Also set list of reinstalled features to "ALL" + c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5) + c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6) + c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7) + c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8) + + # Uninstall: Change progress to "Remove", then invoke uninstall + # Also set list of removed features to "ALL" + c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11) + c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12) + c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13) + c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14) + + # Close dialog when maintenance action scheduled + c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20) + #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21) + + maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg") + + def get_installer_filename(self, fullname): + # Factored out to allow overriding in subclasses + if self.target_version: + base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name, + self.target_version) + else: + base_name = "%s.%s.msi" % (fullname, self.plat_name) + installer_name = os.path.join(self.dist_dir, base_name) + return installer_name diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb270166211fe2b24b6ec636f632a77a5ca6b8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py @@ -0,0 +1,76 @@ +"""distutils.command.clean + +Implements the Distutils 'clean' command.""" + +# contributed by Bastian Kleineidam , added 2000-03-18 + +import os +from distutils.core import Command +from distutils.dir_util import remove_tree +from distutils import log + +class clean(Command): + + description = "clean up temporary files from 'build' command" + user_options = [ + ('build-base=', 'b', + "base build directory (default: 'build.build-base')"), + ('build-lib=', None, + "build directory for all modules (default: 'build.build-lib')"), + ('build-temp=', 't', + "temporary build directory (default: 'build.build-temp')"), + ('build-scripts=', None, + "build directory for scripts (default: 'build.build-scripts')"), + ('bdist-base=', None, + "temporary directory for built distributions"), + ('all', 'a', + "remove all build output, not just temporary by-products") + ] + + boolean_options = ['all'] + + def initialize_options(self): + self.build_base = None + self.build_lib = None + self.build_temp = None + self.build_scripts = None + self.bdist_base = None + self.all = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib'), + ('build_scripts', 'build_scripts'), + ('build_temp', 'build_temp')) + self.set_undefined_options('bdist', + ('bdist_base', 'bdist_base')) + + def run(self): + # remove the build/temp. directory (unless it's already + # gone) + if os.path.exists(self.build_temp): + remove_tree(self.build_temp, dry_run=self.dry_run) + else: + log.debug("'%s' does not exist -- can't clean it", + self.build_temp) + + if self.all: + # remove build directories + for directory in (self.build_lib, + self.bdist_base, + self.build_scripts): + if os.path.exists(directory): + remove_tree(directory, dry_run=self.dry_run) + else: + log.warn("'%s' does not exist -- can't clean it", + directory) + + # just for the heck of it, try to remove the base build directory: + # we might have emptied it right now, but if not we don't care + if not self.dry_run: + try: + os.rmdir(self.build_base) + log.info("removing '%s'", self.build_base) + except OSError: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/install.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/install.py new file mode 100644 index 0000000000000000000000000000000000000000..18b352fac06f68adfaa4dd3927a0dc7bb3349408 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/install.py @@ -0,0 +1,721 @@ +"""distutils.command.install + +Implements the Distutils 'install' command.""" + +import sys +import os +import contextlib +import sysconfig +import itertools + +from distutils import log +from distutils.core import Command +from distutils.debug import DEBUG +from distutils.sysconfig import get_config_vars +from distutils.errors import DistutilsPlatformError +from distutils.file_util import write_file +from distutils.util import convert_path, subst_vars, change_root +from distutils.util import get_platform +from distutils.errors import DistutilsOptionError + +from site import USER_BASE +from site import USER_SITE +HAS_USER_SITE = True + +WINDOWS_SCHEME = { + 'purelib': '{base}/Lib/site-packages', + 'platlib': '{base}/Lib/site-packages', + 'headers': '{base}/Include/{dist_name}', + 'scripts': '{base}/Scripts', + 'data' : '{base}', +} + +INSTALL_SCHEMES = { + 'posix_prefix': { + 'purelib': '{base}/lib/{implementation_lower}{py_version_short}/site-packages', + 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}/site-packages', + 'headers': '{base}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}', + 'scripts': '{base}/bin', + 'data' : '{base}', + }, + 'posix_home': { + 'purelib': '{base}/lib/{implementation_lower}', + 'platlib': '{base}/{platlibdir}/{implementation_lower}', + 'headers': '{base}/include/{implementation_lower}/{dist_name}', + 'scripts': '{base}/bin', + 'data' : '{base}', + }, + 'nt': WINDOWS_SCHEME, + 'pypy': { + 'purelib': '{base}/site-packages', + 'platlib': '{base}/site-packages', + 'headers': '{base}/include/{dist_name}', + 'scripts': '{base}/bin', + 'data' : '{base}', + }, + 'pypy_nt': { + 'purelib': '{base}/site-packages', + 'platlib': '{base}/site-packages', + 'headers': '{base}/include/{dist_name}', + 'scripts': '{base}/Scripts', + 'data' : '{base}', + }, + } + +# user site schemes +if HAS_USER_SITE: + INSTALL_SCHEMES['nt_user'] = { + 'purelib': '{usersite}', + 'platlib': '{usersite}', + 'headers': '{userbase}/{implementation}{py_version_nodot}/Include/{dist_name}', + 'scripts': '{userbase}/{implementation}{py_version_nodot}/Scripts', + 'data' : '{userbase}', + } + + INSTALL_SCHEMES['posix_user'] = { + 'purelib': '{usersite}', + 'platlib': '{usersite}', + 'headers': + '{userbase}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}', + 'scripts': '{userbase}/bin', + 'data' : '{userbase}', + } + +# The keys to an installation scheme; if any new types of files are to be +# installed, be sure to add an entry to every installation scheme above, +# and to SCHEME_KEYS here. +SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data') + + +def _load_sysconfig_schemes(): + with contextlib.suppress(AttributeError): + return { + scheme: sysconfig.get_paths(scheme, expand=False) + for scheme in sysconfig.get_scheme_names() + } + + +def _load_schemes(): + """ + Extend default schemes with schemes from sysconfig. + """ + + sysconfig_schemes = _load_sysconfig_schemes() or {} + + return { + scheme: { + **INSTALL_SCHEMES.get(scheme, {}), + **sysconfig_schemes.get(scheme, {}), + } + for scheme in set(itertools.chain(INSTALL_SCHEMES, sysconfig_schemes)) + } + + +def _get_implementation(): + if hasattr(sys, 'pypy_version_info'): + return 'PyPy' + else: + return 'Python' + + +class install(Command): + + description = "install everything from build directory" + + user_options = [ + # Select installation scheme and set base director(y|ies) + ('prefix=', None, + "installation prefix"), + ('exec-prefix=', None, + "(Unix only) prefix for platform-specific files"), + ('home=', None, + "(Unix only) home directory to install under"), + + # Or, just set the base director(y|ies) + ('install-base=', None, + "base installation directory (instead of --prefix or --home)"), + ('install-platbase=', None, + "base installation directory for platform-specific files " + + "(instead of --exec-prefix or --home)"), + ('root=', None, + "install everything relative to this alternate root directory"), + + # Or, explicitly set the installation scheme + ('install-purelib=', None, + "installation directory for pure Python module distributions"), + ('install-platlib=', None, + "installation directory for non-pure module distributions"), + ('install-lib=', None, + "installation directory for all module distributions " + + "(overrides --install-purelib and --install-platlib)"), + + ('install-headers=', None, + "installation directory for C/C++ headers"), + ('install-scripts=', None, + "installation directory for Python scripts"), + ('install-data=', None, + "installation directory for data files"), + + # Byte-compilation options -- see install_lib.py for details, as + # these are duplicated from there (but only install_lib does + # anything with them). + ('compile', 'c', "compile .py to .pyc [default]"), + ('no-compile', None, "don't compile .py files"), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), + + # Miscellaneous control options + ('force', 'f', + "force installation (overwrite any existing files)"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + + # Where to install documentation (eventually!) + #('doc-format=', None, "format of documentation to generate"), + #('install-man=', None, "directory for Unix man pages"), + #('install-html=', None, "directory for HTML documentation"), + #('install-info=', None, "directory for GNU info files"), + + ('record=', None, + "filename in which to record list of installed files"), + ] + + boolean_options = ['compile', 'force', 'skip-build'] + + if HAS_USER_SITE: + user_options.append(('user', None, + "install in user site-package '%s'" % USER_SITE)) + boolean_options.append('user') + + negative_opt = {'no-compile' : 'compile'} + + + def initialize_options(self): + """Initializes options.""" + # High-level options: these select both an installation base + # and scheme. + self.prefix = None + self.exec_prefix = None + self.home = None + self.user = 0 + + # These select only the installation base; it's up to the user to + # specify the installation scheme (currently, that means supplying + # the --install-{platlib,purelib,scripts,data} options). + self.install_base = None + self.install_platbase = None + self.root = None + + # These options are the actual installation directories; if not + # supplied by the user, they are filled in using the installation + # scheme implied by prefix/exec-prefix/home and the contents of + # that installation scheme. + self.install_purelib = None # for pure module distributions + self.install_platlib = None # non-pure (dists w/ extensions) + self.install_headers = None # for C/C++ headers + self.install_lib = None # set to either purelib or platlib + self.install_scripts = None + self.install_data = None + self.install_userbase = USER_BASE + self.install_usersite = USER_SITE + + self.compile = None + self.optimize = None + + # Deprecated + # These two are for putting non-packagized distributions into their + # own directory and creating a .pth file if it makes sense. + # 'extra_path' comes from the setup file; 'install_path_file' can + # be turned off if it makes no sense to install a .pth file. (But + # better to install it uselessly than to guess wrong and not + # install it when it's necessary and would be used!) Currently, + # 'install_path_file' is always true unless some outsider meddles + # with it. + self.extra_path = None + self.install_path_file = 1 + + # 'force' forces installation, even if target files are not + # out-of-date. 'skip_build' skips running the "build" command, + # handy if you know it's not necessary. 'warn_dir' (which is *not* + # a user option, it's just there so the bdist_* commands can turn + # it off) determines whether we warn about installing to a + # directory not in sys.path. + self.force = 0 + self.skip_build = 0 + self.warn_dir = 1 + + # These are only here as a conduit from the 'build' command to the + # 'install_*' commands that do the real work. ('build_base' isn't + # actually used anywhere, but it might be useful in future.) They + # are not user options, because if the user told the install + # command where the build directory is, that wouldn't affect the + # build command. + self.build_base = None + self.build_lib = None + + # Not defined yet because we don't know anything about + # documentation yet. + #self.install_man = None + #self.install_html = None + #self.install_info = None + + self.record = None + + + # -- Option finalizing methods ------------------------------------- + # (This is rather more involved than for most commands, + # because this is where the policy for installing third- + # party Python modules on various platforms given a wide + # array of user input is decided. Yes, it's quite complex!) + + def finalize_options(self): + """Finalizes options.""" + # This method (and its helpers, like 'finalize_unix()', + # 'finalize_other()', and 'select_scheme()') is where the default + # installation directories for modules, extension modules, and + # anything else we care to install from a Python module + # distribution. Thus, this code makes a pretty important policy + # statement about how third-party stuff is added to a Python + # installation! Note that the actual work of installation is done + # by the relatively simple 'install_*' commands; they just take + # their orders from the installation directory options determined + # here. + + # Check for errors/inconsistencies in the options; first, stuff + # that's wrong on any platform. + + if ((self.prefix or self.exec_prefix or self.home) and + (self.install_base or self.install_platbase)): + raise DistutilsOptionError( + "must supply either prefix/exec-prefix/home or " + + "install-base/install-platbase -- not both") + + if self.home and (self.prefix or self.exec_prefix): + raise DistutilsOptionError( + "must supply either home or prefix/exec-prefix -- not both") + + if self.user and (self.prefix or self.exec_prefix or self.home or + self.install_base or self.install_platbase): + raise DistutilsOptionError("can't combine user with prefix, " + "exec_prefix/home, or install_(plat)base") + + # Next, stuff that's wrong (or dubious) only on certain platforms. + if os.name != "posix": + if self.exec_prefix: + self.warn("exec-prefix option ignored on this platform") + self.exec_prefix = None + + # Now the interesting logic -- so interesting that we farm it out + # to other methods. The goal of these methods is to set the final + # values for the install_{lib,scripts,data,...} options, using as + # input a heady brew of prefix, exec_prefix, home, install_base, + # install_platbase, user-supplied versions of + # install_{purelib,platlib,lib,scripts,data,...}, and the + # install schemes. Phew! + + self.dump_dirs("pre-finalize_{unix,other}") + + if os.name == 'posix': + self.finalize_unix() + else: + self.finalize_other() + + self.dump_dirs("post-finalize_{unix,other}()") + + # Expand configuration variables, tilde, etc. in self.install_base + # and self.install_platbase -- that way, we can use $base or + # $platbase in the other installation directories and not worry + # about needing recursive variable expansion (shudder). + + py_version = sys.version.split()[0] + (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') + try: + abiflags = sys.abiflags + except AttributeError: + # sys.abiflags may not be defined on all platforms. + abiflags = '' + self.config_vars = {'dist_name': self.distribution.get_name(), + 'dist_version': self.distribution.get_version(), + 'dist_fullname': self.distribution.get_fullname(), + 'py_version': py_version, + 'py_version_short': '%d.%d' % sys.version_info[:2], + 'py_version_nodot': '%d%d' % sys.version_info[:2], + 'sys_prefix': prefix, + 'prefix': prefix, + 'sys_exec_prefix': exec_prefix, + 'exec_prefix': exec_prefix, + 'abiflags': abiflags, + 'platlibdir': getattr(sys, 'platlibdir', 'lib'), + 'implementation_lower': _get_implementation().lower(), + 'implementation': _get_implementation(), + } + + if HAS_USER_SITE: + self.config_vars['userbase'] = self.install_userbase + self.config_vars['usersite'] = self.install_usersite + + self.expand_basedirs() + + self.dump_dirs("post-expand_basedirs()") + + # Now define config vars for the base directories so we can expand + # everything else. + self.config_vars['base'] = self.install_base + self.config_vars['platbase'] = self.install_platbase + self.config_vars['installed_base'] = ( + sysconfig.get_config_vars()['installed_base']) + + if DEBUG: + from pprint import pprint + print("config vars:") + pprint(self.config_vars) + + # Expand "~" and configuration variables in the installation + # directories. + self.expand_dirs() + + self.dump_dirs("post-expand_dirs()") + + # Create directories in the home dir: + if self.user: + self.create_home_path() + + # Pick the actual directory to install all modules to: either + # install_purelib or install_platlib, depending on whether this + # module distribution is pure or not. Of course, if the user + # already specified install_lib, use their selection. + if self.install_lib is None: + if self.distribution.has_ext_modules(): # has extensions: non-pure + self.install_lib = self.install_platlib + else: + self.install_lib = self.install_purelib + + + # Convert directories from Unix /-separated syntax to the local + # convention. + self.convert_paths('lib', 'purelib', 'platlib', + 'scripts', 'data', 'headers', + 'userbase', 'usersite') + + # Deprecated + # Well, we're not actually fully completely finalized yet: we still + # have to deal with 'extra_path', which is the hack for allowing + # non-packagized module distributions (hello, Numerical Python!) to + # get their own directories. + self.handle_extra_path() + self.install_libbase = self.install_lib # needed for .pth file + self.install_lib = os.path.join(self.install_lib, self.extra_dirs) + + # If a new root directory was supplied, make all the installation + # dirs relative to it. + if self.root is not None: + self.change_roots('libbase', 'lib', 'purelib', 'platlib', + 'scripts', 'data', 'headers') + + self.dump_dirs("after prepending root") + + # Find out the build directories, ie. where to install from. + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib')) + + # Punt on doc directories for now -- after all, we're punting on + # documentation completely! + + def dump_dirs(self, msg): + """Dumps the list of user options.""" + if not DEBUG: + return + from distutils.fancy_getopt import longopt_xlate + log.debug(msg + ":") + for opt in self.user_options: + opt_name = opt[0] + if opt_name[-1] == "=": + opt_name = opt_name[0:-1] + if opt_name in self.negative_opt: + opt_name = self.negative_opt[opt_name] + opt_name = opt_name.translate(longopt_xlate) + val = not getattr(self, opt_name) + else: + opt_name = opt_name.translate(longopt_xlate) + val = getattr(self, opt_name) + log.debug(" %s: %s", opt_name, val) + + def finalize_unix(self): + """Finalizes options for posix platforms.""" + if self.install_base is not None or self.install_platbase is not None: + if ((self.install_lib is None and + self.install_purelib is None and + self.install_platlib is None) or + self.install_headers is None or + self.install_scripts is None or + self.install_data is None): + raise DistutilsOptionError( + "install-base or install-platbase supplied, but " + "installation scheme is incomplete") + return + + if self.user: + if self.install_userbase is None: + raise DistutilsPlatformError( + "User base directory is not specified") + self.install_base = self.install_platbase = self.install_userbase + self.select_scheme("posix_user") + elif self.home is not None: + self.install_base = self.install_platbase = self.home + self.select_scheme("posix_home") + else: + if self.prefix is None: + if self.exec_prefix is not None: + raise DistutilsOptionError( + "must not supply exec-prefix without prefix") + + # Allow Fedora to add components to the prefix + _prefix_addition = getattr(sysconfig, '_prefix_addition', "") + + self.prefix = ( + os.path.normpath(sys.prefix) + _prefix_addition) + self.exec_prefix = ( + os.path.normpath(sys.exec_prefix) + _prefix_addition) + + else: + if self.exec_prefix is None: + self.exec_prefix = self.prefix + + self.install_base = self.prefix + self.install_platbase = self.exec_prefix + self.select_scheme("posix_prefix") + + def finalize_other(self): + """Finalizes options for non-posix platforms""" + if self.user: + if self.install_userbase is None: + raise DistutilsPlatformError( + "User base directory is not specified") + self.install_base = self.install_platbase = self.install_userbase + self.select_scheme(os.name + "_user") + elif self.home is not None: + self.install_base = self.install_platbase = self.home + self.select_scheme("posix_home") + else: + if self.prefix is None: + self.prefix = os.path.normpath(sys.prefix) + + self.install_base = self.install_platbase = self.prefix + try: + self.select_scheme(os.name) + except KeyError: + raise DistutilsPlatformError( + "I don't know how to install stuff on '%s'" % os.name) + + def select_scheme(self, name): + """Sets the install directories by applying the install schemes.""" + # it's the caller's problem if they supply a bad name! + if (hasattr(sys, 'pypy_version_info') and + sys.version_info < (3, 8) and + not name.endswith(('_user', '_home'))): + if os.name == 'nt': + name = 'pypy_nt' + else: + name = 'pypy' + scheme = _load_schemes()[name] + for key in SCHEME_KEYS: + attrname = 'install_' + key + if getattr(self, attrname) is None: + setattr(self, attrname, scheme[key]) + + def _expand_attrs(self, attrs): + for attr in attrs: + val = getattr(self, attr) + if val is not None: + if os.name == 'posix' or os.name == 'nt': + val = os.path.expanduser(val) + val = subst_vars(val, self.config_vars) + setattr(self, attr, val) + + def expand_basedirs(self): + """Calls `os.path.expanduser` on install_base, install_platbase and + root.""" + self._expand_attrs(['install_base', 'install_platbase', 'root']) + + def expand_dirs(self): + """Calls `os.path.expanduser` on install dirs.""" + self._expand_attrs(['install_purelib', 'install_platlib', + 'install_lib', 'install_headers', + 'install_scripts', 'install_data',]) + + def convert_paths(self, *names): + """Call `convert_path` over `names`.""" + for name in names: + attr = "install_" + name + setattr(self, attr, convert_path(getattr(self, attr))) + + def handle_extra_path(self): + """Set `path_file` and `extra_dirs` using `extra_path`.""" + if self.extra_path is None: + self.extra_path = self.distribution.extra_path + + if self.extra_path is not None: + log.warn( + "Distribution option extra_path is deprecated. " + "See issue27919 for details." + ) + if isinstance(self.extra_path, str): + self.extra_path = self.extra_path.split(',') + + if len(self.extra_path) == 1: + path_file = extra_dirs = self.extra_path[0] + elif len(self.extra_path) == 2: + path_file, extra_dirs = self.extra_path + else: + raise DistutilsOptionError( + "'extra_path' option must be a list, tuple, or " + "comma-separated string with 1 or 2 elements") + + # convert to local form in case Unix notation used (as it + # should be in setup scripts) + extra_dirs = convert_path(extra_dirs) + else: + path_file = None + extra_dirs = '' + + # XXX should we warn if path_file and not extra_dirs? (in which + # case the path file would be harmless but pointless) + self.path_file = path_file + self.extra_dirs = extra_dirs + + def change_roots(self, *names): + """Change the install directories pointed by name using root.""" + for name in names: + attr = "install_" + name + setattr(self, attr, change_root(self.root, getattr(self, attr))) + + def create_home_path(self): + """Create directories under ~.""" + if not self.user: + return + home = convert_path(os.path.expanduser("~")) + for name, path in self.config_vars.items(): + if path.startswith(home) and not os.path.isdir(path): + self.debug_print("os.makedirs('%s', 0o700)" % path) + os.makedirs(path, 0o700) + + # -- Command execution methods ------------------------------------- + + def run(self): + """Runs the command.""" + # Obviously have to build before we can install + if not self.skip_build: + self.run_command('build') + # If we built for any other platform, we can't install. + build_plat = self.distribution.get_command_obj('build').plat_name + # check warn_dir - it is a clue that the 'install' is happening + # internally, and not to sys.path, so we don't check the platform + # matches what we are running. + if self.warn_dir and build_plat != get_platform(): + raise DistutilsPlatformError("Can't install when " + "cross-compiling") + + # Run all sub-commands (at least those that need to be run) + for cmd_name in self.get_sub_commands(): + self.run_command(cmd_name) + + if self.path_file: + self.create_path_file() + + # write list of installed files, if requested. + if self.record: + outputs = self.get_outputs() + if self.root: # strip any package prefix + root_len = len(self.root) + for counter in range(len(outputs)): + outputs[counter] = outputs[counter][root_len:] + self.execute(write_file, + (self.record, outputs), + "writing list of installed files to '%s'" % + self.record) + + sys_path = map(os.path.normpath, sys.path) + sys_path = map(os.path.normcase, sys_path) + install_lib = os.path.normcase(os.path.normpath(self.install_lib)) + if (self.warn_dir and + not (self.path_file and self.install_path_file) and + install_lib not in sys_path): + log.debug(("modules installed to '%s', which is not in " + "Python's module search path (sys.path) -- " + "you'll have to change the search path yourself"), + self.install_lib) + + def create_path_file(self): + """Creates the .pth file""" + filename = os.path.join(self.install_libbase, + self.path_file + ".pth") + if self.install_path_file: + self.execute(write_file, + (filename, [self.extra_dirs]), + "creating %s" % filename) + else: + self.warn("path file '%s' not created" % filename) + + + # -- Reporting methods --------------------------------------------- + + def get_outputs(self): + """Assembles the outputs of all the sub-commands.""" + outputs = [] + for cmd_name in self.get_sub_commands(): + cmd = self.get_finalized_command(cmd_name) + # Add the contents of cmd.get_outputs(), ensuring + # that outputs doesn't contain duplicate entries + for filename in cmd.get_outputs(): + if filename not in outputs: + outputs.append(filename) + + if self.path_file and self.install_path_file: + outputs.append(os.path.join(self.install_libbase, + self.path_file + ".pth")) + + return outputs + + def get_inputs(self): + """Returns the inputs of all the sub-commands""" + # XXX gee, this looks familiar ;-( + inputs = [] + for cmd_name in self.get_sub_commands(): + cmd = self.get_finalized_command(cmd_name) + inputs.extend(cmd.get_inputs()) + + return inputs + + # -- Predicates for sub-command list ------------------------------- + + def has_lib(self): + """Returns true if the current distribution has any Python + modules to install.""" + return (self.distribution.has_pure_modules() or + self.distribution.has_ext_modules()) + + def has_headers(self): + """Returns true if the current distribution has any headers to + install.""" + return self.distribution.has_headers() + + def has_scripts(self): + """Returns true if the current distribution has any scripts to. + install.""" + return self.distribution.has_scripts() + + def has_data(self): + """Returns true if the current distribution has any data to. + install.""" + return self.distribution.has_data_files() + + # 'sub_commands': a list of commands this command might have to run to + # get its work done. See cmd.py for more info. + sub_commands = [('install_lib', has_lib), + ('install_headers', has_headers), + ('install_scripts', has_scripts), + ('install_data', has_data), + ('install_egg_info', lambda self:True), + ] diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py new file mode 100644 index 0000000000000000000000000000000000000000..adc0323f98fde748a70aada6930ea29fd22724b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py @@ -0,0 +1,84 @@ +"""distutils.command.install_egg_info + +Implements the Distutils 'install_egg_info' command, for installing +a package's PKG-INFO metadata.""" + + +from distutils.cmd import Command +from distutils import log, dir_util +import os, sys, re + +class install_egg_info(Command): + """Install an .egg-info file for the package""" + + description = "Install package's PKG-INFO metadata as an .egg-info file" + user_options = [ + ('install-dir=', 'd', "directory to install to"), + ] + + def initialize_options(self): + self.install_dir = None + + @property + def basename(self): + """ + Allow basename to be overridden by child class. + Ref pypa/distutils#2. + """ + return "%s-%s-py%d.%d.egg-info" % ( + to_filename(safe_name(self.distribution.get_name())), + to_filename(safe_version(self.distribution.get_version())), + *sys.version_info[:2] + ) + + def finalize_options(self): + self.set_undefined_options('install_lib',('install_dir','install_dir')) + self.target = os.path.join(self.install_dir, self.basename) + self.outputs = [self.target] + + def run(self): + target = self.target + if os.path.isdir(target) and not os.path.islink(target): + dir_util.remove_tree(target, dry_run=self.dry_run) + elif os.path.exists(target): + self.execute(os.unlink,(self.target,),"Removing "+target) + elif not os.path.isdir(self.install_dir): + self.execute(os.makedirs, (self.install_dir,), + "Creating "+self.install_dir) + log.info("Writing %s", target) + if not self.dry_run: + with open(target, 'w', encoding='UTF-8') as f: + self.distribution.metadata.write_pkg_file(f) + + def get_outputs(self): + return self.outputs + + +# The following routines are taken from setuptools' pkg_resources module and +# can be replaced by importing them from pkg_resources once it is included +# in the stdlib. + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """Convert an arbitrary string to a standard version string + + Spaces become dots, and all other non-alphanumeric characters become + dashes, with runs of multiple dashes condensed to a single dash. + """ + version = version.replace(' ','.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-','_') diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py new file mode 100644 index 0000000000000000000000000000000000000000..754715a5084a9e4f04544ac8a4426d0871a0eb88 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py @@ -0,0 +1,30 @@ +import sys + + +def _pythonlib_compat(): + """ + On Python 3.7 and earlier, distutils would include the Python + library. See pypa/distutils#9. + """ + from distutils import sysconfig + if not sysconfig.get_config_var('Py_ENABLED_SHARED'): + return + + yield 'python{}.{}{}'.format( + sys.hexversion >> 24, + (sys.hexversion >> 16) & 0xff, + sysconfig.get_config_var('ABIFLAGS'), + ) + + +def compose(f1, f2): + return lambda *args, **kwargs: f1(f2(*args, **kwargs)) + + +pythonlib = ( + compose(list, _pythonlib_compat) + if sys.version_info < (3, 8) + and sys.platform != 'darwin' + and sys.platform[:3] != 'aix' + else list +) diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/register.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/register.py new file mode 100644 index 0000000000000000000000000000000000000000..0fac94e9e54905688d0e359fc5a9b96b703afab5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/register.py @@ -0,0 +1,304 @@ +"""distutils.command.register + +Implements the Distutils 'register' command (register with the repository). +""" + +# created 2002/10/21, Richard Jones + +import getpass +import io +import urllib.parse, urllib.request +from warnings import warn + +from distutils.core import PyPIRCCommand +from distutils.errors import * +from distutils import log + +class register(PyPIRCCommand): + + description = ("register the distribution with the Python package index") + user_options = PyPIRCCommand.user_options + [ + ('list-classifiers', None, + 'list the valid Trove classifiers'), + ('strict', None , + 'Will stop the registering if the meta-data are not fully compliant') + ] + boolean_options = PyPIRCCommand.boolean_options + [ + 'verify', 'list-classifiers', 'strict'] + + sub_commands = [('check', lambda self: True)] + + def initialize_options(self): + PyPIRCCommand.initialize_options(self) + self.list_classifiers = 0 + self.strict = 0 + + def finalize_options(self): + PyPIRCCommand.finalize_options(self) + # setting options for the `check` subcommand + check_options = {'strict': ('register', self.strict), + 'restructuredtext': ('register', 1)} + self.distribution.command_options['check'] = check_options + + def run(self): + self.finalize_options() + self._set_config() + + # Run sub commands + for cmd_name in self.get_sub_commands(): + self.run_command(cmd_name) + + if self.dry_run: + self.verify_metadata() + elif self.list_classifiers: + self.classifiers() + else: + self.send_metadata() + + def check_metadata(self): + """Deprecated API.""" + warn("distutils.command.register.check_metadata is deprecated, \ + use the check command instead", PendingDeprecationWarning) + check = self.distribution.get_command_obj('check') + check.ensure_finalized() + check.strict = self.strict + check.restructuredtext = 1 + check.run() + + def _set_config(self): + ''' Reads the configuration file and set attributes. + ''' + config = self._read_pypirc() + if config != {}: + self.username = config['username'] + self.password = config['password'] + self.repository = config['repository'] + self.realm = config['realm'] + self.has_config = True + else: + if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): + raise ValueError('%s not found in .pypirc' % self.repository) + if self.repository == 'pypi': + self.repository = self.DEFAULT_REPOSITORY + self.has_config = False + + def classifiers(self): + ''' Fetch the list of classifiers from the server. + ''' + url = self.repository+'?:action=list_classifiers' + response = urllib.request.urlopen(url) + log.info(self._read_pypi_response(response)) + + def verify_metadata(self): + ''' Send the metadata to the package index server to be checked. + ''' + # send the info to the server and report the result + (code, result) = self.post_to_server(self.build_post_data('verify')) + log.info('Server response (%s): %s', code, result) + + def send_metadata(self): + ''' Send the metadata to the package index server. + + Well, do the following: + 1. figure who the user is, and then + 2. send the data as a Basic auth'ed POST. + + First we try to read the username/password from $HOME/.pypirc, + which is a ConfigParser-formatted file with a section + [distutils] containing username and password entries (both + in clear text). Eg: + + [distutils] + index-servers = + pypi + + [pypi] + username: fred + password: sekrit + + Otherwise, to figure who the user is, we offer the user three + choices: + + 1. use existing login, + 2. register as a new user, or + 3. set the password to a random string and email the user. + + ''' + # see if we can short-cut and get the username/password from the + # config + if self.has_config: + choice = '1' + username = self.username + password = self.password + else: + choice = 'x' + username = password = '' + + # get the user's login info + choices = '1 2 3 4'.split() + while choice not in choices: + self.announce('''\ +We need to know who you are, so please choose either: + 1. use your existing login, + 2. register as a new user, + 3. have the server generate a new password for you (and email it to you), or + 4. quit +Your selection [default 1]: ''', log.INFO) + choice = input() + if not choice: + choice = '1' + elif choice not in choices: + print('Please choose one of the four options!') + + if choice == '1': + # get the username and password + while not username: + username = input('Username: ') + while not password: + password = getpass.getpass('Password: ') + + # set up the authentication + auth = urllib.request.HTTPPasswordMgr() + host = urllib.parse.urlparse(self.repository)[1] + auth.add_password(self.realm, host, username, password) + # send the info to the server and report the result + code, result = self.post_to_server(self.build_post_data('submit'), + auth) + self.announce('Server response (%s): %s' % (code, result), + log.INFO) + + # possibly save the login + if code == 200: + if self.has_config: + # sharing the password in the distribution instance + # so the upload command can reuse it + self.distribution.password = password + else: + self.announce(('I can store your PyPI login so future ' + 'submissions will be faster.'), log.INFO) + self.announce('(the login will be stored in %s)' % \ + self._get_rc_file(), log.INFO) + choice = 'X' + while choice.lower() not in 'yn': + choice = input('Save your login (y/N)?') + if not choice: + choice = 'n' + if choice.lower() == 'y': + self._store_pypirc(username, password) + + elif choice == '2': + data = {':action': 'user'} + data['name'] = data['password'] = data['email'] = '' + data['confirm'] = None + while not data['name']: + data['name'] = input('Username: ') + while data['password'] != data['confirm']: + while not data['password']: + data['password'] = getpass.getpass('Password: ') + while not data['confirm']: + data['confirm'] = getpass.getpass(' Confirm: ') + if data['password'] != data['confirm']: + data['password'] = '' + data['confirm'] = None + print("Password and confirm don't match!") + while not data['email']: + data['email'] = input(' EMail: ') + code, result = self.post_to_server(data) + if code != 200: + log.info('Server response (%s): %s', code, result) + else: + log.info('You will receive an email shortly.') + log.info(('Follow the instructions in it to ' + 'complete registration.')) + elif choice == '3': + data = {':action': 'password_reset'} + data['email'] = '' + while not data['email']: + data['email'] = input('Your email address: ') + code, result = self.post_to_server(data) + log.info('Server response (%s): %s', code, result) + + def build_post_data(self, action): + # figure the data to send - the metadata plus some additional + # information used by the package server + meta = self.distribution.metadata + data = { + ':action': action, + 'metadata_version' : '1.0', + 'name': meta.get_name(), + 'version': meta.get_version(), + 'summary': meta.get_description(), + 'home_page': meta.get_url(), + 'author': meta.get_contact(), + 'author_email': meta.get_contact_email(), + 'license': meta.get_licence(), + 'description': meta.get_long_description(), + 'keywords': meta.get_keywords(), + 'platform': meta.get_platforms(), + 'classifiers': meta.get_classifiers(), + 'download_url': meta.get_download_url(), + # PEP 314 + 'provides': meta.get_provides(), + 'requires': meta.get_requires(), + 'obsoletes': meta.get_obsoletes(), + } + if data['provides'] or data['requires'] or data['obsoletes']: + data['metadata_version'] = '1.1' + return data + + def post_to_server(self, data, auth=None): + ''' Post a query to the server, and return a string response. + ''' + if 'name' in data: + self.announce('Registering %s to %s' % (data['name'], + self.repository), + log.INFO) + # Build up the MIME payload for the urllib2 POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = '\n--' + boundary + end_boundary = sep_boundary + '--' + body = io.StringIO() + for key, value in data.items(): + # handle multiple entries for the same name + if type(value) not in (type([]), type( () )): + value = [value] + for value in value: + value = str(value) + body.write(sep_boundary) + body.write('\nContent-Disposition: form-data; name="%s"'%key) + body.write("\n\n") + body.write(value) + if value and value[-1] == '\r': + body.write('\n') # write an extra newline (lurve Macs) + body.write(end_boundary) + body.write("\n") + body = body.getvalue().encode("utf-8") + + # build the Request + headers = { + 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, + 'Content-length': str(len(body)) + } + req = urllib.request.Request(self.repository, body, headers) + + # handle HTTP and include the Basic Auth handler + opener = urllib.request.build_opener( + urllib.request.HTTPBasicAuthHandler(password_mgr=auth) + ) + data = '' + try: + result = opener.open(req) + except urllib.error.HTTPError as e: + if self.show_response: + data = e.fp.read() + result = e.code, e.msg + except urllib.error.URLError as e: + result = 500, str(e) + else: + if self.show_response: + data = self._read_pypi_response(result) + result = 200, 'OK' + if self.show_response: + msg = '\n'.join(('-' * 75, data, '-' * 75)) + self.announce(msg, log.INFO) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py new file mode 100644 index 0000000000000000000000000000000000000000..95e9fda186fc8f5b884215f7bea251b515e72cae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py @@ -0,0 +1,214 @@ +""" +distutils.command.upload + +Implements the Distutils 'upload' subcommand (upload package to a package +index). +""" + +import os +import io +import hashlib +from base64 import standard_b64encode +from urllib.request import urlopen, Request, HTTPError +from urllib.parse import urlparse +from distutils.errors import DistutilsError, DistutilsOptionError +from distutils.core import PyPIRCCommand +from distutils.spawn import spawn +from distutils import log + + +# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256) +# https://bugs.python.org/issue40698 +_FILE_CONTENT_DIGESTS = { + "md5_digest": getattr(hashlib, "md5", None), + "sha256_digest": getattr(hashlib, "sha256", None), + "blake2_256_digest": getattr(hashlib, "blake2b", None), +} + + +class upload(PyPIRCCommand): + + description = "upload binary package to PyPI" + + user_options = PyPIRCCommand.user_options + [ + ('sign', 's', + 'sign files to upload using gpg'), + ('identity=', 'i', 'GPG identity used to sign files'), + ] + + boolean_options = PyPIRCCommand.boolean_options + ['sign'] + + def initialize_options(self): + PyPIRCCommand.initialize_options(self) + self.username = '' + self.password = '' + self.show_response = 0 + self.sign = False + self.identity = None + + def finalize_options(self): + PyPIRCCommand.finalize_options(self) + if self.identity and not self.sign: + raise DistutilsOptionError( + "Must use --sign for --identity to have meaning" + ) + config = self._read_pypirc() + if config != {}: + self.username = config['username'] + self.password = config['password'] + self.repository = config['repository'] + self.realm = config['realm'] + + # getting the password from the distribution + # if previously set by the register command + if not self.password and self.distribution.password: + self.password = self.distribution.password + + def run(self): + if not self.distribution.dist_files: + msg = ("Must create and upload files in one command " + "(e.g. setup.py sdist upload)") + raise DistutilsOptionError(msg) + for command, pyversion, filename in self.distribution.dist_files: + self.upload_file(command, pyversion, filename) + + def upload_file(self, command, pyversion, filename): + # Makes sure the repository URL is compliant + schema, netloc, url, params, query, fragments = \ + urlparse(self.repository) + if params or query or fragments: + raise AssertionError("Incompatible url %s" % self.repository) + + if schema not in ('http', 'https'): + raise AssertionError("unsupported schema " + schema) + + # Sign if requested + if self.sign: + gpg_args = ["gpg", "--detach-sign", "-a", filename] + if self.identity: + gpg_args[2:2] = ["--local-user", self.identity] + spawn(gpg_args, + dry_run=self.dry_run) + + # Fill in the data - send all the meta-data in case we need to + # register a new release + f = open(filename,'rb') + try: + content = f.read() + finally: + f.close() + + meta = self.distribution.metadata + data = { + # action + ':action': 'file_upload', + 'protocol_version': '1', + + # identify release + 'name': meta.get_name(), + 'version': meta.get_version(), + + # file content + 'content': (os.path.basename(filename),content), + 'filetype': command, + 'pyversion': pyversion, + + # additional meta-data + 'metadata_version': '1.0', + 'summary': meta.get_description(), + 'home_page': meta.get_url(), + 'author': meta.get_contact(), + 'author_email': meta.get_contact_email(), + 'license': meta.get_licence(), + 'description': meta.get_long_description(), + 'keywords': meta.get_keywords(), + 'platform': meta.get_platforms(), + 'classifiers': meta.get_classifiers(), + 'download_url': meta.get_download_url(), + # PEP 314 + 'provides': meta.get_provides(), + 'requires': meta.get_requires(), + 'obsoletes': meta.get_obsoletes(), + } + + data['comment'] = '' + + # file content digests + for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items(): + if digest_cons is None: + continue + try: + data[digest_name] = digest_cons(content).hexdigest() + except ValueError: + # hash digest not available or blocked by security policy + pass + + if self.sign: + with open(filename + ".asc", "rb") as f: + data['gpg_signature'] = (os.path.basename(filename) + ".asc", + f.read()) + + # set up the authentication + user_pass = (self.username + ":" + self.password).encode('ascii') + # The exact encoding of the authentication string is debated. + # Anyway PyPI only accepts ascii for both username or password. + auth = "Basic " + standard_b64encode(user_pass).decode('ascii') + + # Build up the MIME payload for the POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = b'\r\n--' + boundary.encode('ascii') + end_boundary = sep_boundary + b'--\r\n' + body = io.BytesIO() + for key, value in data.items(): + title = '\r\nContent-Disposition: form-data; name="%s"' % key + # handle multiple entries for the same name + if not isinstance(value, list): + value = [value] + for value in value: + if type(value) is tuple: + title += '; filename="%s"' % value[0] + value = value[1] + else: + value = str(value).encode('utf-8') + body.write(sep_boundary) + body.write(title.encode('utf-8')) + body.write(b"\r\n\r\n") + body.write(value) + body.write(end_boundary) + body = body.getvalue() + + msg = "Submitting %s to %s" % (filename, self.repository) + self.announce(msg, log.INFO) + + # build the Request + headers = { + 'Content-type': 'multipart/form-data; boundary=%s' % boundary, + 'Content-length': str(len(body)), + 'Authorization': auth, + } + + request = Request(self.repository, data=body, + headers=headers) + # send the data + try: + result = urlopen(request) + status = result.getcode() + reason = result.msg + except HTTPError as e: + status = e.code + reason = e.msg + except OSError as e: + self.announce(str(e), log.ERROR) + raise + + if status == 200: + self.announce('Server response (%s): %s' % (status, reason), + log.INFO) + if self.show_response: + text = self._read_pypi_response(result) + msg = '\n'.join(('-' * 75, text, '-' * 75)) + self.announce(msg, log.INFO) + else: + msg = 'Upload failed (%s): %s' % (status, reason) + self.announce(msg, log.ERROR) + raise DistutilsError(msg) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/activations.py b/llmeval-env/lib/python3.10/site-packages/transformers/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..2355fb5fed678d0de6e2c53f52644a35a691a34e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/activations.py @@ -0,0 +1,239 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from collections import OrderedDict + +import torch +from packaging import version +from torch import Tensor, nn + +from .utils import logging + + +logger = logging.get_logger(__name__) + + +class PytorchGELUTanh(nn.Module): + """ + A fast C implementation of the tanh approximation of the GeLU activation function. See + https://arxiv.org/abs/1606.08415. + + This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical + match due to rounding errors. + """ + + def __init__(self): + super().__init__() + if version.parse(torch.__version__) < version.parse("1.12.0"): + raise ImportError( + f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use " + "PytorchGELUTanh. Please upgrade torch." + ) + + def forward(self, input: Tensor) -> Tensor: + return nn.functional.gelu(input, approximate="tanh") + + +class NewGELUActivation(nn.Module): + """ + Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see + the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 + """ + + def forward(self, input: Tensor) -> Tensor: + return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0)))) + + +class GELUActivation(nn.Module): + """ + Original Implementation of the GELU activation function in Google BERT repo when initially created. For + information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional + Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 + """ + + def __init__(self, use_gelu_python: bool = False): + super().__init__() + if use_gelu_python: + self.act = self._gelu_python + else: + self.act = nn.functional.gelu + + def _gelu_python(self, input: Tensor) -> Tensor: + return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0))) + + def forward(self, input: Tensor) -> Tensor: + return self.act(input) + + +class FastGELUActivation(nn.Module): + """ + Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs + """ + + def forward(self, input: Tensor) -> Tensor: + return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input))) + + +class QuickGELUActivation(nn.Module): + """ + Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs + """ + + def forward(self, input: Tensor) -> Tensor: + return input * torch.sigmoid(1.702 * input) + + +class ClippedGELUActivation(nn.Module): + """ + Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as + it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to + https://arxiv.org/abs/2004.09602. + + Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when + initially created. + + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415 + """ + + def __init__(self, min: float, max: float): + if min > max: + raise ValueError(f"min should be < max (got min: {min}, max: {max})") + + super().__init__() + self.min = min + self.max = max + + def forward(self, x: Tensor) -> Tensor: + return torch.clip(gelu(x), self.min, self.max) + + +class AccurateGELUActivation(nn.Module): + """ + Applies GELU approximation that is faster than default and more accurate than QuickGELU. See: + https://github.com/hendrycks/GELUs + + Implemented along with MEGA (Moving Average Equipped Gated Attention) + """ + + def __init__(self): + super().__init__() + self.precomputed_constant = math.sqrt(2 / math.pi) + + def forward(self, input: Tensor) -> Tensor: + return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3)))) + + +class MishActivation(nn.Module): + """ + See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also + visit the official repository for the paper: https://github.com/digantamisra98/Mish + """ + + def __init__(self): + super().__init__() + if version.parse(torch.__version__) < version.parse("1.9.0"): + self.act = self._mish_python + else: + self.act = nn.functional.mish + + def _mish_python(self, input: Tensor) -> Tensor: + return input * torch.tanh(nn.functional.softplus(input)) + + def forward(self, input: Tensor) -> Tensor: + return self.act(input) + + +class LinearActivation(nn.Module): + """ + Applies the linear activation function, i.e. forwarding input directly to output. + """ + + def forward(self, input: Tensor) -> Tensor: + return input + + +class LaplaceActivation(nn.Module): + """ + Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See + https://arxiv.org/abs/2209.10655 + + Inspired by squared relu, but with bounded range and gradient for better stability + """ + + def forward(self, input, mu=0.707107, sigma=0.282095): + input = (input - mu).div(sigma * math.sqrt(2.0)) + return 0.5 * (1.0 + torch.erf(input)) + + +class ReLUSquaredActivation(nn.Module): + """ + Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2 + """ + + def forward(self, input): + relu_applied = nn.functional.relu(input) + squared = torch.square(relu_applied) + return squared + + +class ClassInstantier(OrderedDict): + def __getitem__(self, key): + content = super().__getitem__(key) + cls, kwargs = content if isinstance(content, tuple) else (content, {}) + return cls(**kwargs) + + +ACT2CLS = { + "gelu": GELUActivation, + "gelu_10": (ClippedGELUActivation, {"min": -10, "max": 10}), + "gelu_fast": FastGELUActivation, + "gelu_new": NewGELUActivation, + "gelu_python": (GELUActivation, {"use_gelu_python": True}), + "gelu_pytorch_tanh": PytorchGELUTanh, + "gelu_accurate": AccurateGELUActivation, + "laplace": LaplaceActivation, + "leaky_relu": nn.LeakyReLU, + "linear": LinearActivation, + "mish": MishActivation, + "quick_gelu": QuickGELUActivation, + "relu": nn.ReLU, + "relu2": ReLUSquaredActivation, + "relu6": nn.ReLU6, + "sigmoid": nn.Sigmoid, + "silu": nn.SiLU, + "swish": nn.SiLU, + "tanh": nn.Tanh, +} +ACT2FN = ClassInstantier(ACT2CLS) + + +def get_activation(activation_string): + if activation_string in ACT2FN: + return ACT2FN[activation_string] + else: + raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}") + + +# For backwards compatibility with: from activations import gelu_python +gelu_python = get_activation("gelu_python") +gelu_new = get_activation("gelu_new") +gelu = get_activation("gelu") +gelu_fast = get_activation("gelu_fast") +quick_gelu = get_activation("quick_gelu") +silu = get_activation("silu") +mish = get_activation("mish") +linear_act = get_activation("linear") diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/activations_tf.py b/llmeval-env/lib/python3.10/site-packages/transformers/activations_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..d12b73ea45176f3a4bc42cdabe8b73078a3b90f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/activations_tf.py @@ -0,0 +1,147 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import tensorflow as tf +from packaging.version import parse + + +try: + import tf_keras as keras +except (ModuleNotFoundError, ImportError): + import keras + + if parse(keras.__version__).major > 2: + raise ValueError( + "Your currently installed version of Keras is Keras 3, but this is not yet supported in " + "Transformers. Please install the backwards-compatible tf-keras package with " + "`pip install tf-keras`." + ) + + +def _gelu(x): + """ + Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when + initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see + https://arxiv.org/abs/1606.08415 + """ + x = tf.convert_to_tensor(x) + cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype))) + + return x * cdf + + +def _gelu_new(x): + """ + Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841 + + Args: + x: float Tensor to perform activation + + Returns: + `x` with the GELU activation applied. + """ + x = tf.convert_to_tensor(x) + pi = tf.cast(math.pi, x.dtype) + coeff = tf.cast(0.044715, x.dtype) + cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3)))) + + return x * cdf + + +def mish(x): + x = tf.convert_to_tensor(x) + + return x * tf.tanh(tf.math.softplus(x)) + + +def gelu_fast(x): + x = tf.convert_to_tensor(x) + coeff1 = tf.cast(0.044715, x.dtype) + coeff2 = tf.cast(0.7978845608, x.dtype) + + return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x))) + + +def quick_gelu(x): + x = tf.convert_to_tensor(x) + coeff = tf.cast(1.702, x.dtype) + return x * tf.math.sigmoid(coeff * x) + + +def gelu_10(x): + """ + Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as + it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to + https://arxiv.org/abs/2004.09602 + + Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when + initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see + https://arxiv.org/abs/1606.08415 :param x: :return: + """ + return tf.clip_by_value(_gelu(x), -10, 10) + + +def glu(x, axis=-1): + """ + Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where + the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B). + + Args: + `x`: float Tensor to perform activation + `axis`: dimension across which `x` be split in half + + Returns: + `x` with the GLU activation applied (with its size halved across the dimension `axis`). + """ + a, b = tf.split(x, 2, axis=axis) + return a * tf.math.sigmoid(b) + + +if parse(tf.version.VERSION) >= parse("2.4"): + + def approximate_gelu_wrap(x): + return keras.activations.gelu(x, approximate=True) + + gelu = keras.activations.gelu + gelu_new = approximate_gelu_wrap +else: + gelu = _gelu + gelu_new = _gelu_new + + +ACT2FN = { + "gelu": gelu, + "gelu_10": gelu_10, + "gelu_fast": gelu_fast, + "gelu_new": gelu_new, + "glu": glu, + "mish": mish, + "quick_gelu": quick_gelu, + "relu": keras.activations.relu, + "sigmoid": keras.activations.sigmoid, + "silu": keras.activations.swish, + "swish": keras.activations.swish, + "tanh": keras.activations.tanh, +} + + +def get_tf_activation(activation_string): + if activation_string in ACT2FN: + return ACT2FN[activation_string] + else: + raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}") diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/audio_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/audio_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c73550c1c3465422b99b90654ec675c85bc11c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/audio_utils.py @@ -0,0 +1,825 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team and the librosa & torchaudio authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Audio processing functions to extract features from audio waveforms. This code is pure numpy to support all frameworks +and remove unnecessary dependencies. +""" +import warnings +from typing import Optional, Tuple, Union + +import numpy as np + + +def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str = "htk") -> Union[float, np.ndarray]: + """ + Convert frequency from hertz to mels. + + Args: + freq (`float` or `np.ndarray`): + The frequency, or multiple frequencies, in hertz (Hz). + mel_scale (`str`, *optional*, defaults to `"htk"`): + The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`. + + Returns: + `float` or `np.ndarray`: The frequencies on the mel scale. + """ + + if mel_scale not in ["slaney", "htk", "kaldi"]: + raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".') + + if mel_scale == "htk": + return 2595.0 * np.log10(1.0 + (freq / 700.0)) + elif mel_scale == "kaldi": + return 1127.0 * np.log(1.0 + (freq / 700.0)) + + min_log_hertz = 1000.0 + min_log_mel = 15.0 + logstep = 27.0 / np.log(6.4) + mels = 3.0 * freq / 200.0 + + if isinstance(freq, np.ndarray): + log_region = freq >= min_log_hertz + mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep + elif freq >= min_log_hertz: + mels = min_log_mel + np.log(freq / min_log_hertz) * logstep + + return mels + + +def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str = "htk") -> Union[float, np.ndarray]: + """ + Convert frequency from mels to hertz. + + Args: + mels (`float` or `np.ndarray`): + The frequency, or multiple frequencies, in mels. + mel_scale (`str`, *optional*, `"htk"`): + The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`. + + Returns: + `float` or `np.ndarray`: The frequencies in hertz. + """ + + if mel_scale not in ["slaney", "htk", "kaldi"]: + raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".') + + if mel_scale == "htk": + return 700.0 * (np.power(10, mels / 2595.0) - 1.0) + elif mel_scale == "kaldi": + return 700.0 * (np.exp(mels / 1127.0) - 1.0) + + min_log_hertz = 1000.0 + min_log_mel = 15.0 + logstep = np.log(6.4) / 27.0 + freq = 200.0 * mels / 3.0 + + if isinstance(mels, np.ndarray): + log_region = mels >= min_log_mel + freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel)) + elif mels >= min_log_mel: + freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel)) + + return freq + + +def hertz_to_octave( + freq: Union[float, np.ndarray], tuning: Optional[float] = 0.0, bins_per_octave: Optional[int] = 12 +): + """ + Convert frequency from hertz to fractional octave numbers. + Adapted from *librosa*. + + Args: + freq (`float` or `np.ndarray`): + The frequency, or multiple frequencies, in hertz (Hz). + tuning (`float`, defaults to `0.`): + Tuning deviation from the Stuttgart pitch (A440) in (fractional) bins per octave. + bins_per_octave (`int`, defaults to `12`): + Number of bins per octave. + + Returns: + `float` or `np.ndarray`: The frequencies on the octave scale. + """ + stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave) + octave = np.log2(freq / (float(stuttgart_pitch) / 16)) + return octave + + +def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarray) -> np.ndarray: + """ + Creates a triangular filter bank. + + Adapted from *torchaudio* and *librosa*. + + Args: + fft_freqs (`np.ndarray` of shape `(num_frequency_bins,)`): + Discrete frequencies of the FFT bins in Hz. + filter_freqs (`np.ndarray` of shape `(num_mel_filters,)`): + Center frequencies of the triangular filters to create, in Hz. + + Returns: + `np.ndarray` of shape `(num_frequency_bins, num_mel_filters)` + """ + filter_diff = np.diff(filter_freqs) + slopes = np.expand_dims(filter_freqs, 0) - np.expand_dims(fft_freqs, 1) + down_slopes = -slopes[:, :-2] / filter_diff[:-1] + up_slopes = slopes[:, 2:] / filter_diff[1:] + return np.maximum(np.zeros(1), np.minimum(down_slopes, up_slopes)) + + +def chroma_filter_bank( + num_frequency_bins: int, + num_chroma: int, + sampling_rate: int, + tuning: float = 0.0, + power: Optional[float] = 2.0, + weighting_parameters: Optional[Tuple[float]] = (5.0, 2), + start_at_c_chroma: Optional[bool] = True, +): + """ + Creates a chroma filter bank, i.e a linear transformation to project spectrogram bins onto chroma bins. + + Adapted from *librosa*. + + Args: + num_frequency_bins (`int`): + Number of frequencies used to compute the spectrogram (should be the same as in `stft`). + num_chroma (`int`): + Number of chroma bins (i.e pitch classes). + sampling_rate (`float`): + Sample rate of the audio waveform. + tuning (`float`): + Tuning deviation from A440 in fractions of a chroma bin. + power (`float`, *optional*, defaults to 2.0): + If 12.0, normalizes each column with their L2 norm. If 1.0, normalizes each column with their L1 norm. + weighting_parameters (`Tuple[float]`, *optional*, defaults to `(5., 2.)`): + If specified, apply a Gaussian weighting parameterized by the first element of the tuple being the center and + the second element being the Gaussian half-width. + start_at_c_chroma (`float`, *optional*, defaults to `True`): + If True, the filter bank will start at the 'C' pitch class. Otherwise, it will start at 'A'. + Returns: + `np.ndarray` of shape `(num_frequency_bins, num_chroma)` + """ + # Get the FFT bins, not counting the DC component + frequencies = np.linspace(0, sampling_rate, num_frequency_bins, endpoint=False)[1:] + + freq_bins = num_chroma * hertz_to_octave(frequencies, tuning=tuning, bins_per_octave=num_chroma) + + # make up a value for the 0 Hz bin = 1.5 octaves below bin 1 + # (so chroma is 50% rotated from bin 1, and bin width is broad) + freq_bins = np.concatenate(([freq_bins[0] - 1.5 * num_chroma], freq_bins)) + + bins_width = np.concatenate((np.maximum(freq_bins[1:] - freq_bins[:-1], 1.0), [1])) + + chroma_filters = np.subtract.outer(freq_bins, np.arange(0, num_chroma, dtype="d")).T + + num_chroma2 = np.round(float(num_chroma) / 2) + + # Project into range -num_chroma/2 .. num_chroma/2 + # add on fixed offset of 10*num_chroma to ensure all values passed to + # rem are positive + chroma_filters = np.remainder(chroma_filters + num_chroma2 + 10 * num_chroma, num_chroma) - num_chroma2 + + # Gaussian bumps - 2*D to make them narrower + chroma_filters = np.exp(-0.5 * (2 * chroma_filters / np.tile(bins_width, (num_chroma, 1))) ** 2) + + # normalize each column + if power is not None: + chroma_filters = chroma_filters / np.sum(chroma_filters**power, axis=0, keepdims=True) ** (1.0 / power) + + # Maybe apply scaling for fft bins + if weighting_parameters is not None: + center, half_width = weighting_parameters + chroma_filters *= np.tile( + np.exp(-0.5 * (((freq_bins / num_chroma - center) / half_width) ** 2)), + (num_chroma, 1), + ) + + if start_at_c_chroma: + chroma_filters = np.roll(chroma_filters, -3 * (num_chroma // 12), axis=0) + + # remove aliasing columns, copy to ensure row-contiguity + return np.ascontiguousarray(chroma_filters[:, : int(1 + num_frequency_bins / 2)]) + + +def mel_filter_bank( + num_frequency_bins: int, + num_mel_filters: int, + min_frequency: float, + max_frequency: float, + sampling_rate: int, + norm: Optional[str] = None, + mel_scale: str = "htk", + triangularize_in_mel_space: bool = False, +) -> np.ndarray: + """ + Creates a frequency bin conversion matrix used to obtain a mel spectrogram. This is called a *mel filter bank*, and + various implementation exist, which differ in the number of filters, the shape of the filters, the way the filters + are spaced, the bandwidth of the filters, and the manner in which the spectrum is warped. The goal of these + features is to approximate the non-linear human perception of the variation in pitch with respect to the frequency. + + Different banks of mel filters were introduced in the literature. The following variations are supported: + + - MFCC FB-20: introduced in 1980 by Davis and Mermelstein, it assumes a sampling frequency of 10 kHz and a speech + bandwidth of `[0, 4600]` Hz. + - MFCC FB-24 HTK: from the Cambridge HMM Toolkit (HTK) (1995) uses a filter bank of 24 filters for a speech + bandwidth of `[0, 8000]` Hz. This assumes sampling rate ≥ 16 kHz. + - MFCC FB-40: from the Auditory Toolbox for MATLAB written by Slaney in 1998, assumes a sampling rate of 16 kHz and + speech bandwidth of `[133, 6854]` Hz. This version also includes area normalization. + - HFCC-E FB-29 (Human Factor Cepstral Coefficients) of Skowronski and Harris (2004), assumes a sampling rate of + 12.5 kHz and speech bandwidth of `[0, 6250]` Hz. + + This code is adapted from *torchaudio* and *librosa*. Note that the default parameters of torchaudio's + `melscale_fbanks` implement the `"htk"` filters while librosa uses the `"slaney"` implementation. + + Args: + num_frequency_bins (`int`): + Number of frequencies used to compute the spectrogram (should be the same as in `stft`). + num_mel_filters (`int`): + Number of mel filters to generate. + min_frequency (`float`): + Lowest frequency of interest in Hz. + max_frequency (`float`): + Highest frequency of interest in Hz. This should not exceed `sampling_rate / 2`. + sampling_rate (`int`): + Sample rate of the audio waveform. + norm (`str`, *optional*): + If `"slaney"`, divide the triangular mel weights by the width of the mel band (area normalization). + mel_scale (`str`, *optional*, defaults to `"htk"`): + The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`. + triangularize_in_mel_space (`bool`, *optional*, defaults to `False`): + If this option is enabled, the triangular filter is applied in mel space rather than frequency space. This + should be set to `true` in order to get the same results as `torchaudio` when computing mel filters. + + Returns: + `np.ndarray` of shape (`num_frequency_bins`, `num_mel_filters`): Triangular filter bank matrix. This is a + projection matrix to go from a spectrogram to a mel spectrogram. + """ + if norm is not None and norm != "slaney": + raise ValueError('norm must be one of None or "slaney"') + + # center points of the triangular mel filters + mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale) + mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale) + mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2) + filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale) + + if triangularize_in_mel_space: + # frequencies of FFT bins in Hz, but filters triangularized in mel space + fft_bin_width = sampling_rate / (num_frequency_bins * 2) + fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale) + filter_freqs = mel_freqs + else: + # frequencies of FFT bins in Hz + fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins) + + mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs) + + if norm is not None and norm == "slaney": + # Slaney-style mel is scaled to be approx constant energy per channel + enorm = 2.0 / (filter_freqs[2 : num_mel_filters + 2] - filter_freqs[:num_mel_filters]) + mel_filters *= np.expand_dims(enorm, 0) + + if (mel_filters.max(axis=0) == 0.0).any(): + warnings.warn( + "At least one mel filter has all zero values. " + f"The value for `num_mel_filters` ({num_mel_filters}) may be set too high. " + f"Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low." + ) + + return mel_filters + + +def optimal_fft_length(window_length: int) -> int: + """ + Finds the best FFT input size for a given `window_length`. This function takes a given window length and, if not + already a power of two, rounds it up to the next power or two. + + The FFT algorithm works fastest when the length of the input is a power of two, which may be larger than the size + of the window or analysis frame. For example, if the window is 400 samples, using an FFT input size of 512 samples + is more optimal than an FFT size of 400 samples. Using a larger FFT size does not affect the detected frequencies, + it simply gives a higher frequency resolution (i.e. the frequency bins are smaller). + """ + return 2 ** int(np.ceil(np.log2(window_length))) + + +def window_function( + window_length: int, + name: str = "hann", + periodic: bool = True, + frame_length: Optional[int] = None, + center: bool = True, +) -> np.ndarray: + """ + Returns an array containing the specified window. This window is intended to be used with `stft`. + + The following window types are supported: + + - `"boxcar"`: a rectangular window + - `"hamming"`: the Hamming window + - `"hann"`: the Hann window + - `"povey"`: the Povey window + + Args: + window_length (`int`): + The length of the window in samples. + name (`str`, *optional*, defaults to `"hann"`): + The name of the window function. + periodic (`bool`, *optional*, defaults to `True`): + Whether the window is periodic or symmetric. + frame_length (`int`, *optional*): + The length of the analysis frames in samples. Provide a value for `frame_length` if the window is smaller + than the frame length, so that it will be zero-padded. + center (`bool`, *optional*, defaults to `True`): + Whether to center the window inside the FFT buffer. Only used when `frame_length` is provided. + + Returns: + `np.ndarray` of shape `(window_length,)` or `(frame_length,)` containing the window. + """ + length = window_length + 1 if periodic else window_length + + if name == "boxcar": + window = np.ones(length) + elif name in ["hamming", "hamming_window"]: + window = np.hamming(length) + elif name in ["hann", "hann_window"]: + window = np.hanning(length) + elif name in ["povey"]: + window = np.power(np.hanning(length), 0.85) + else: + raise ValueError(f"Unknown window function '{name}'") + + if periodic: + window = window[:-1] + + if frame_length is None: + return window + + if window_length > frame_length: + raise ValueError( + f"Length of the window ({window_length}) may not be larger than frame_length ({frame_length})" + ) + + padded_window = np.zeros(frame_length) + offset = (frame_length - window_length) // 2 if center else 0 + padded_window[offset : offset + window_length] = window + return padded_window + + +# TODO This method does not support batching yet as we are mainly focused on inference. +def spectrogram( + waveform: np.ndarray, + window: np.ndarray, + frame_length: int, + hop_length: int, + fft_length: Optional[int] = None, + power: Optional[float] = 1.0, + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + preemphasis: Optional[float] = None, + mel_filters: Optional[np.ndarray] = None, + mel_floor: float = 1e-10, + log_mel: Optional[str] = None, + reference: float = 1.0, + min_value: float = 1e-10, + db_range: Optional[float] = None, + remove_dc_offset: Optional[bool] = None, + dtype: np.dtype = np.float32, +) -> np.ndarray: + """ + Calculates a spectrogram over one waveform using the Short-Time Fourier Transform. + + This function can create the following kinds of spectrograms: + + - amplitude spectrogram (`power = 1.0`) + - power spectrogram (`power = 2.0`) + - complex-valued spectrogram (`power = None`) + - log spectrogram (use `log_mel` argument) + - mel spectrogram (provide `mel_filters`) + - log-mel spectrogram (provide `mel_filters` and `log_mel`) + + How this works: + + 1. The input waveform is split into frames of size `frame_length` that are partially overlapping by `frame_length + - hop_length` samples. + 2. Each frame is multiplied by the window and placed into a buffer of size `fft_length`. + 3. The DFT is taken of each windowed frame. + 4. The results are stacked into a spectrogram. + + We make a distinction between the following "blocks" of sample data, each of which may have a different lengths: + + - The analysis frame. This is the size of the time slices that the input waveform is split into. + - The window. Each analysis frame is multiplied by the window to avoid spectral leakage. + - The FFT input buffer. The length of this determines how many frequency bins are in the spectrogram. + + In this implementation, the window is assumed to be zero-padded to have the same size as the analysis frame. A + padded window can be obtained from `window_function()`. The FFT input buffer may be larger than the analysis frame, + typically the next power of two. + + Note: This function is not optimized for speed yet. It should be mostly compatible with `librosa.stft` and + `torchaudio.functional.transforms.Spectrogram`, although it is more flexible due to the different ways spectrograms + can be constructed. + + Args: + waveform (`np.ndarray` of shape `(length,)`): + The input waveform. This must be a single real-valued, mono waveform. + window (`np.ndarray` of shape `(frame_length,)`): + The windowing function to apply, including zero-padding if necessary. The actual window length may be + shorter than `frame_length`, but we're assuming the array has already been zero-padded. + frame_length (`int`): + The length of the analysis frames in samples. With librosa this is always equal to `fft_length` but we also + allow smaller sizes. + hop_length (`int`): + The stride between successive analysis frames in samples. + fft_length (`int`, *optional*): + The size of the FFT buffer in samples. This determines how many frequency bins the spectrogram will have. + For optimal speed, this should be a power of two. If `None`, uses `frame_length`. + power (`float`, *optional*, defaults to 1.0): + If 1.0, returns the amplitude spectrogram. If 2.0, returns the power spectrogram. If `None`, returns + complex numbers. + center (`bool`, *optional*, defaults to `True`): + Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame + `t` will start at time `t * hop_length`. + pad_mode (`str`, *optional*, defaults to `"reflect"`): + Padding mode used when `center` is `True`. Possible values are: `"constant"` (pad with zeros), `"edge"` + (pad with edge values), `"reflect"` (pads with mirrored values). + onesided (`bool`, *optional*, defaults to `True`): + If True, only computes the positive frequencies and returns a spectrogram containing `fft_length // 2 + 1` + frequency bins. If False, also computes the negative frequencies and returns `fft_length` frequency bins. + preemphasis (`float`, *optional*) + Coefficient for a low-pass filter that applies pre-emphasis before the DFT. + mel_filters (`np.ndarray` of shape `(num_freq_bins, num_mel_filters)`, *optional*): + The mel filter bank. If supplied, applies a this filter bank to create a mel spectrogram. + mel_floor (`float`, *optional*, defaults to 1e-10): + Minimum value of mel frequency banks. + log_mel (`str`, *optional*): + How to convert the spectrogram to log scale. Possible options are: `None` (don't convert), `"log"` (take + the natural logarithm) `"log10"` (take the base-10 logarithm), `"dB"` (convert to decibels). Can only be + used when `power` is not `None`. + reference (`float`, *optional*, defaults to 1.0): + Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set + the loudest part to 0 dB. Must be greater than zero. + min_value (`float`, *optional*, defaults to `1e-10`): + The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking + `log(0)`. For a power spectrogram, the default of `1e-10` corresponds to a minimum of -100 dB. For an + amplitude spectrogram, the value `1e-5` corresponds to -100 dB. Must be greater than zero. + db_range (`float`, *optional*): + Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the + peak value and the smallest value will never be more than 80 dB. Must be greater than zero. + remove_dc_offset (`bool`, *optional*): + Subtract mean from waveform on each frame, applied before pre-emphasis. This should be set to `true` in + order to get the same results as `torchaudio.compliance.kaldi.fbank` when computing mel filters. + dtype (`np.dtype`, *optional*, defaults to `np.float32`): + Data type of the spectrogram tensor. If `power` is None, this argument is ignored and the dtype will be + `np.complex64`. + + Returns: + `nd.array` containing a spectrogram of shape `(num_frequency_bins, length)` for a regular spectrogram or shape + `(num_mel_filters, length)` for a mel spectrogram. + """ + window_length = len(window) + + if fft_length is None: + fft_length = frame_length + + if frame_length > fft_length: + raise ValueError(f"frame_length ({frame_length}) may not be larger than fft_length ({fft_length})") + + if window_length != frame_length: + raise ValueError(f"Length of the window ({window_length}) must equal frame_length ({frame_length})") + + if hop_length <= 0: + raise ValueError("hop_length must be greater than zero") + + if waveform.ndim != 1: + raise ValueError(f"Input waveform must have only one dimension, shape is {waveform.shape}") + + if np.iscomplexobj(waveform): + raise ValueError("Complex-valued input waveforms are not currently supported") + + if power is None and mel_filters is not None: + raise ValueError( + "You have provided `mel_filters` but `power` is `None`. Mel spectrogram computation is not yet supported for complex-valued spectrogram." + "Specify `power` to fix this issue." + ) + + # center pad the waveform + if center: + padding = [(int(frame_length // 2), int(frame_length // 2))] + waveform = np.pad(waveform, padding, mode=pad_mode) + + # promote to float64, since np.fft uses float64 internally + waveform = waveform.astype(np.float64) + window = window.astype(np.float64) + + # split waveform into frames of frame_length size + num_frames = int(1 + np.floor((waveform.size - frame_length) / hop_length)) + + num_frequency_bins = (fft_length // 2) + 1 if onesided else fft_length + spectrogram = np.empty((num_frames, num_frequency_bins), dtype=np.complex64) + + # rfft is faster than fft + fft_func = np.fft.rfft if onesided else np.fft.fft + buffer = np.zeros(fft_length) + + timestep = 0 + for frame_idx in range(num_frames): + buffer[:frame_length] = waveform[timestep : timestep + frame_length] + + if remove_dc_offset: + buffer[:frame_length] = buffer[:frame_length] - buffer[:frame_length].mean() + + if preemphasis is not None: + buffer[1:frame_length] -= preemphasis * buffer[: frame_length - 1] + buffer[0] *= 1 - preemphasis + + buffer[:frame_length] *= window + + spectrogram[frame_idx] = fft_func(buffer) + timestep += hop_length + + # note: ** is much faster than np.power + if power is not None: + spectrogram = np.abs(spectrogram, dtype=np.float64) ** power + + spectrogram = spectrogram.T + + if mel_filters is not None: + spectrogram = np.maximum(mel_floor, np.dot(mel_filters.T, spectrogram)) + + if power is not None and log_mel is not None: + if log_mel == "log": + spectrogram = np.log(spectrogram) + elif log_mel == "log10": + spectrogram = np.log10(spectrogram) + elif log_mel == "dB": + if power == 1.0: + spectrogram = amplitude_to_db(spectrogram, reference, min_value, db_range) + elif power == 2.0: + spectrogram = power_to_db(spectrogram, reference, min_value, db_range) + else: + raise ValueError(f"Cannot use log_mel option '{log_mel}' with power {power}") + else: + raise ValueError(f"Unknown log_mel option: {log_mel}") + + spectrogram = np.asarray(spectrogram, dtype) + + return spectrogram + + +def power_to_db( + spectrogram: np.ndarray, + reference: float = 1.0, + min_value: float = 1e-10, + db_range: Optional[float] = None, +) -> np.ndarray: + """ + Converts a power spectrogram to the decibel scale. This computes `10 * log10(spectrogram / reference)`, using basic + logarithm properties for numerical stability. + + The motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a + linear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it. + This means that large variations in energy may not sound all that different if the sound is loud to begin with. + This compression operation makes the (mel) spectrogram features match more closely what humans actually hear. + + Based on the implementation of `librosa.power_to_db`. + + Args: + spectrogram (`np.ndarray`): + The input power (mel) spectrogram. Note that a power spectrogram has the amplitudes squared! + reference (`float`, *optional*, defaults to 1.0): + Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set + the loudest part to 0 dB. Must be greater than zero. + min_value (`float`, *optional*, defaults to `1e-10`): + The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking + `log(0)`. The default of `1e-10` corresponds to a minimum of -100 dB. Must be greater than zero. + db_range (`float`, *optional*): + Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the + peak value and the smallest value will never be more than 80 dB. Must be greater than zero. + + Returns: + `np.ndarray`: the spectrogram in decibels + """ + if reference <= 0.0: + raise ValueError("reference must be greater than zero") + if min_value <= 0.0: + raise ValueError("min_value must be greater than zero") + + reference = max(min_value, reference) + + spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None) + spectrogram = 10.0 * (np.log10(spectrogram) - np.log10(reference)) + + if db_range is not None: + if db_range <= 0.0: + raise ValueError("db_range must be greater than zero") + spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None) + + return spectrogram + + +def amplitude_to_db( + spectrogram: np.ndarray, + reference: float = 1.0, + min_value: float = 1e-5, + db_range: Optional[float] = None, +) -> np.ndarray: + """ + Converts an amplitude spectrogram to the decibel scale. This computes `20 * log10(spectrogram / reference)`, using + basic logarithm properties for numerical stability. + + The motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a + linear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it. + This means that large variations in energy may not sound all that different if the sound is loud to begin with. + This compression operation makes the (mel) spectrogram features match more closely what humans actually hear. + + Args: + spectrogram (`np.ndarray`): + The input amplitude (mel) spectrogram. + reference (`float`, *optional*, defaults to 1.0): + Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set + the loudest part to 0 dB. Must be greater than zero. + min_value (`float`, *optional*, defaults to `1e-5`): + The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking + `log(0)`. The default of `1e-5` corresponds to a minimum of -100 dB. Must be greater than zero. + db_range (`float`, *optional*): + Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the + peak value and the smallest value will never be more than 80 dB. Must be greater than zero. + + Returns: + `np.ndarray`: the spectrogram in decibels + """ + if reference <= 0.0: + raise ValueError("reference must be greater than zero") + if min_value <= 0.0: + raise ValueError("min_value must be greater than zero") + + reference = max(min_value, reference) + + spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None) + spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference)) + + if db_range is not None: + if db_range <= 0.0: + raise ValueError("db_range must be greater than zero") + spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None) + + return spectrogram + + +### deprecated functions below this line ### + + +def get_mel_filter_banks( + nb_frequency_bins: int, + nb_mel_filters: int, + frequency_min: float, + frequency_max: float, + sample_rate: int, + norm: Optional[str] = None, + mel_scale: str = "htk", +) -> np.array: + warnings.warn( + "The function `get_mel_filter_banks` is deprecated and will be removed in version 4.31.0 of Transformers", + FutureWarning, + ) + return mel_filter_bank( + num_frequency_bins=nb_frequency_bins, + num_mel_filters=nb_mel_filters, + min_frequency=frequency_min, + max_frequency=frequency_max, + sampling_rate=sample_rate, + norm=norm, + mel_scale=mel_scale, + ) + + +def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True): + """ + In order to compute the short time fourier transform, the waveform needs to be split in overlapping windowed + segments called `frames`. + + The window length (window_length) defines how much of the signal is contained in each frame, while the hop length + defines the step between the beginning of each new frame. + + + Args: + waveform (`np.array` of shape `(sample_length,)`): + The raw waveform which will be split into smaller chunks. + hop_length (`int`, *optional*, defaults to 160): + Step between each window of the waveform. + fft_window_size (`int`, *optional*, defaults to 400): + Defines the size of the window. + center (`bool`, defaults to `True`): + Whether or not to center each frame around the middle of the frame. Centering is done by reflecting the + waveform on the left and on the right. + + Return: + framed_waveform (`np.array` of shape `(waveform.shape // hop_length , fft_window_size)`): + The framed waveforms that can be fed to `np.fft`. + """ + warnings.warn( + "The function `fram_wave` is deprecated and will be removed in version 4.31.0 of Transformers", + FutureWarning, + ) + frames = [] + for i in range(0, waveform.shape[0] + 1, hop_length): + if center: + half_window = (fft_window_size - 1) // 2 + 1 + start = i - half_window if i > half_window else 0 + end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0] + frame = waveform[start:end] + if start == 0: + padd_width = (-i + half_window, 0) + frame = np.pad(frame, pad_width=padd_width, mode="reflect") + + elif end == waveform.shape[0]: + padd_width = (0, (i - waveform.shape[0] + half_window)) + frame = np.pad(frame, pad_width=padd_width, mode="reflect") + + else: + frame = waveform[i : i + fft_window_size] + frame_width = frame.shape[0] + if frame_width < waveform.shape[0]: + frame = np.lib.pad( + frame, pad_width=(0, fft_window_size - frame_width), mode="constant", constant_values=0 + ) + frames.append(frame) + + frames = np.stack(frames, 0) + return frames + + +def stft(frames: np.array, windowing_function: np.array, fft_window_size: int = None): + """ + Calculates the complex Short-Time Fourier Transform (STFT) of the given framed signal. Should give the same results + as `torch.stft`. + + Args: + frames (`np.array` of dimension `(num_frames, fft_window_size)`): + A framed audio signal obtained using `audio_utils.fram_wav`. + windowing_function (`np.array` of dimension `(nb_frequency_bins, nb_mel_filters)`: + A array reprensenting the function that will be used to reduces the amplitude of the discontinuities at the + boundaries of each frame when computing the STFT. Each frame will be multiplied by the windowing_function. + For more information on the discontinuities, called *Spectral leakage*, refer to [this + tutorial]https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf + fft_window_size (`int`, *optional*): + Size of the window om which the Fourier transform is applied. This controls the frequency resolution of the + spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples. The number of + frequency bins (`nb_frequency_bins`) used to divide the window into equal strips is equal to + `(1+fft_window_size)//2`. An increase of the fft_window_size slows the calculus time proportionnally. + + Example: + + ```python + >>> from transformers.audio_utils import stft, fram_wave + >>> import numpy as np + + >>> audio = np.random.rand(50) + >>> fft_window_size = 10 + >>> hop_length = 2 + >>> framed_audio = fram_wave(audio, hop_length, fft_window_size) + >>> spectrogram = stft(framed_audio, np.hanning(fft_window_size + 1)) + ``` + + Returns: + spectrogram (`np.ndarray`): + A spectrogram of shape `(num_frames, nb_frequency_bins)` obtained using the STFT algorithm + """ + warnings.warn( + "The function `stft` is deprecated and will be removed in version 4.31.0 of Transformers", + FutureWarning, + ) + frame_size = frames.shape[1] + + if fft_window_size is None: + fft_window_size = frame_size + + if fft_window_size < frame_size: + raise ValueError("FFT size must greater or equal the frame size") + # number of FFT bins to store + nb_frequency_bins = (fft_window_size >> 1) + 1 + + spectrogram = np.empty((len(frames), nb_frequency_bins), dtype=np.complex64) + fft_signal = np.zeros(fft_window_size) + + for f, frame in enumerate(frames): + if windowing_function is not None: + np.multiply(frame, windowing_function, out=fft_signal[:frame_size]) + else: + fft_signal[:frame_size] = frame + spectrogram[f] = np.fft.fft(fft_signal, axis=0)[:nb_frequency_bins] + return spectrogram.T diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py b/llmeval-env/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..c544c8c9e10ca9ed4c44e73c066a37ee39b9d0a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py @@ -0,0 +1,448 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Convert pytorch checkpoints to TensorFlow""" + + +import argparse +import os + +from . import ( + AlbertConfig, + BartConfig, + BertConfig, + CamembertConfig, + CTRLConfig, + DistilBertConfig, + DPRConfig, + ElectraConfig, + FlaubertConfig, + GPT2Config, + LayoutLMConfig, + LxmertConfig, + OpenAIGPTConfig, + RobertaConfig, + T5Config, + TFAlbertForPreTraining, + TFBartForConditionalGeneration, + TFBartForSequenceClassification, + TFBertForPreTraining, + TFBertForQuestionAnswering, + TFBertForSequenceClassification, + TFCamembertForMaskedLM, + TFCTRLLMHeadModel, + TFDistilBertForMaskedLM, + TFDistilBertForQuestionAnswering, + TFDPRContextEncoder, + TFDPRQuestionEncoder, + TFDPRReader, + TFElectraForPreTraining, + TFFlaubertWithLMHeadModel, + TFGPT2LMHeadModel, + TFLayoutLMForMaskedLM, + TFLxmertForPreTraining, + TFLxmertVisualFeatureEncoder, + TFOpenAIGPTLMHeadModel, + TFRobertaForCausalLM, + TFRobertaForMaskedLM, + TFRobertaForSequenceClassification, + TFT5ForConditionalGeneration, + TFTransfoXLLMHeadModel, + TFWav2Vec2Model, + TFXLMRobertaForMaskedLM, + TFXLMWithLMHeadModel, + TFXLNetLMHeadModel, + TransfoXLConfig, + Wav2Vec2Config, + Wav2Vec2Model, + XLMConfig, + XLMRobertaConfig, + XLNetConfig, + is_torch_available, + load_pytorch_checkpoint_in_tf2_model, +) +from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging + + +if is_torch_available(): + import numpy as np + import torch + + from . import ( + AlbertForPreTraining, + BartForConditionalGeneration, + BertForPreTraining, + BertForQuestionAnswering, + BertForSequenceClassification, + CamembertForMaskedLM, + CTRLLMHeadModel, + DistilBertForMaskedLM, + DistilBertForQuestionAnswering, + DPRContextEncoder, + DPRQuestionEncoder, + DPRReader, + ElectraForPreTraining, + FlaubertWithLMHeadModel, + GPT2LMHeadModel, + LayoutLMForMaskedLM, + LxmertForPreTraining, + LxmertVisualFeatureEncoder, + OpenAIGPTLMHeadModel, + RobertaForMaskedLM, + RobertaForSequenceClassification, + T5ForConditionalGeneration, + TransfoXLLMHeadModel, + XLMRobertaForMaskedLM, + XLMWithLMHeadModel, + XLNetLMHeadModel, + ) + from .pytorch_utils import is_torch_greater_or_equal_than_1_13 + + +logging.set_verbosity_info() + +MODEL_CLASSES = { + "bart": ( + BartConfig, + TFBartForConditionalGeneration, + TFBartForSequenceClassification, + BartForConditionalGeneration, + ), + "bert": ( + BertConfig, + TFBertForPreTraining, + BertForPreTraining, + ), + "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": ( + BertConfig, + TFBertForQuestionAnswering, + BertForQuestionAnswering, + ), + "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": ( + BertConfig, + TFBertForQuestionAnswering, + BertForQuestionAnswering, + ), + "google-bert/bert-base-cased-finetuned-mrpc": ( + BertConfig, + TFBertForSequenceClassification, + BertForSequenceClassification, + ), + "dpr": ( + DPRConfig, + TFDPRQuestionEncoder, + TFDPRContextEncoder, + TFDPRReader, + DPRQuestionEncoder, + DPRContextEncoder, + DPRReader, + ), + "openai-community/gpt2": ( + GPT2Config, + TFGPT2LMHeadModel, + GPT2LMHeadModel, + ), + "xlnet": ( + XLNetConfig, + TFXLNetLMHeadModel, + XLNetLMHeadModel, + ), + "xlm": ( + XLMConfig, + TFXLMWithLMHeadModel, + XLMWithLMHeadModel, + ), + "xlm-roberta": ( + XLMRobertaConfig, + TFXLMRobertaForMaskedLM, + XLMRobertaForMaskedLM, + ), + "transfo-xl": ( + TransfoXLConfig, + TFTransfoXLLMHeadModel, + TransfoXLLMHeadModel, + ), + "openai-community/openai-gpt": ( + OpenAIGPTConfig, + TFOpenAIGPTLMHeadModel, + OpenAIGPTLMHeadModel, + ), + "roberta": ( + RobertaConfig, + TFRobertaForCausalLM, + TFRobertaForMaskedLM, + RobertaForMaskedLM, + ), + "layoutlm": ( + LayoutLMConfig, + TFLayoutLMForMaskedLM, + LayoutLMForMaskedLM, + ), + "FacebookAI/roberta-large-mnli": ( + RobertaConfig, + TFRobertaForSequenceClassification, + RobertaForSequenceClassification, + ), + "camembert": ( + CamembertConfig, + TFCamembertForMaskedLM, + CamembertForMaskedLM, + ), + "flaubert": ( + FlaubertConfig, + TFFlaubertWithLMHeadModel, + FlaubertWithLMHeadModel, + ), + "distilbert": ( + DistilBertConfig, + TFDistilBertForMaskedLM, + DistilBertForMaskedLM, + ), + "distilbert-base-distilled-squad": ( + DistilBertConfig, + TFDistilBertForQuestionAnswering, + DistilBertForQuestionAnswering, + ), + "lxmert": ( + LxmertConfig, + TFLxmertForPreTraining, + LxmertForPreTraining, + ), + "lxmert-visual-feature-encoder": ( + LxmertConfig, + TFLxmertVisualFeatureEncoder, + LxmertVisualFeatureEncoder, + ), + "Salesforce/ctrl": ( + CTRLConfig, + TFCTRLLMHeadModel, + CTRLLMHeadModel, + ), + "albert": ( + AlbertConfig, + TFAlbertForPreTraining, + AlbertForPreTraining, + ), + "t5": ( + T5Config, + TFT5ForConditionalGeneration, + T5ForConditionalGeneration, + ), + "electra": ( + ElectraConfig, + TFElectraForPreTraining, + ElectraForPreTraining, + ), + "wav2vec2": ( + Wav2Vec2Config, + TFWav2Vec2Model, + Wav2Vec2Model, + ), +} + + +def convert_pt_checkpoint_to_tf( + model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True +): + if model_type not in MODEL_CLASSES: + raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys())}.") + + config_class, model_class, pt_model_class, aws_config_map = MODEL_CLASSES[model_type] + + # Initialise TF model + if config_file in aws_config_map: + config_file = cached_file(config_file, CONFIG_NAME, force_download=not use_cached_models) + config = config_class.from_json_file(config_file) + config.output_hidden_states = True + config.output_attentions = True + print(f"Building TensorFlow model from configuration: {config}") + tf_model = model_class(config) + + # Load weights from tf checkpoint + if pytorch_checkpoint_path in aws_config_map.keys(): + pytorch_checkpoint_path = cached_file( + pytorch_checkpoint_path, WEIGHTS_NAME, force_download=not use_cached_models + ) + # Load PyTorch checkpoint in tf2 model: + tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path) + + if compare_with_pt_model: + tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network + + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + state_dict = torch.load( + pytorch_checkpoint_path, + map_location="cpu", + **weights_only_kwarg, + ) + pt_model = pt_model_class.from_pretrained( + pretrained_model_name_or_path=None, config=config, state_dict=state_dict + ) + + with torch.no_grad(): + pto = pt_model(**pt_model.dummy_inputs) + + np_pt = pto[0].numpy() + np_tf = tfo[0].numpy() + diff = np.amax(np.abs(np_pt - np_tf)) + print(f"Max absolute difference between models outputs {diff}") + assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}" + + # Save pytorch-model + print(f"Save TensorFlow model to {tf_dump_path}") + tf_model.save_weights(tf_dump_path, save_format="h5") + + +def convert_all_pt_checkpoints_to_tf( + args_model_type, + tf_dump_path, + model_shortcut_names_or_path=None, + config_shortcut_names_or_path=None, + compare_with_pt_model=False, + use_cached_models=False, + remove_cached_files=False, + only_convert_finetuned_models=False, +): + if args_model_type is None: + model_types = list(MODEL_CLASSES.keys()) + else: + model_types = [args_model_type] + + for j, model_type in enumerate(model_types, start=1): + print("=" * 100) + print(f" Converting model type {j}/{len(model_types)}: {model_type}") + print("=" * 100) + if model_type not in MODEL_CLASSES: + raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys())}.") + + config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type] + + if model_shortcut_names_or_path is None: + model_shortcut_names_or_path = list(aws_model_maps.keys()) + if config_shortcut_names_or_path is None: + config_shortcut_names_or_path = model_shortcut_names_or_path + + for i, (model_shortcut_name, config_shortcut_name) in enumerate( + zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1 + ): + print("-" * 100) + if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: + if not only_convert_finetuned_models: + print(f" Skipping finetuned checkpoint {model_shortcut_name}") + continue + model_type = model_shortcut_name + elif only_convert_finetuned_models: + print(f" Skipping not finetuned checkpoint {model_shortcut_name}") + continue + print( + f" Converting checkpoint {i}/{len(aws_config_map)}: {model_shortcut_name} - model_type {model_type}" + ) + print("-" * 100) + + if config_shortcut_name in aws_config_map: + config_file = cached_file(config_shortcut_name, CONFIG_NAME, force_download=not use_cached_models) + else: + config_file = config_shortcut_name + + if model_shortcut_name in aws_model_maps: + model_file = cached_file(model_shortcut_name, WEIGHTS_NAME, force_download=not use_cached_models) + else: + model_file = model_shortcut_name + + if os.path.isfile(model_shortcut_name): + model_shortcut_name = "converted_model" + + convert_pt_checkpoint_to_tf( + model_type=model_type, + pytorch_checkpoint_path=model_file, + config_file=config_file, + tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + "-tf_model.h5"), + compare_with_pt_model=compare_with_pt_model, + ) + if remove_cached_files: + os.remove(config_file) + os.remove(model_file) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." + ) + parser.add_argument( + "--model_type", + default=None, + type=str, + help=( + f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and " + "convert all the models from AWS." + ), + ) + parser.add_argument( + "--pytorch_checkpoint_path", + default=None, + type=str, + help=( + "Path to the PyTorch checkpoint path or shortcut name to download from AWS. " + "If not given, will download and convert all the checkpoints from AWS." + ), + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + help=( + "The config json file corresponding to the pre-trained model. \n" + "This specifies the model architecture. If not given and " + "--pytorch_checkpoint_path is not given or is a shortcut name " + "use the configuration associated to the shortcut name on the AWS" + ), + ) + parser.add_argument( + "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." + ) + parser.add_argument( + "--use_cached_models", + action="store_true", + help="Use cached models if possible instead of updating to latest checkpoint versions.", + ) + parser.add_argument( + "--remove_cached_files", + action="store_true", + help="Remove pytorch models after conversion (save memory when converting in batches).", + ) + parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") + args = parser.parse_args() + + # if args.pytorch_checkpoint_path is not None: + # convert_pt_checkpoint_to_tf(args.model_type.lower(), + # args.pytorch_checkpoint_path, + # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, + # args.tf_dump_path, + # compare_with_pt_model=args.compare_with_pt_model, + # use_cached_models=args.use_cached_models) + # else: + convert_all_pt_checkpoints_to_tf( + args.model_type.lower() if args.model_type is not None else None, + args.tf_dump_path, + model_shortcut_names_or_path=[args.pytorch_checkpoint_path] + if args.pytorch_checkpoint_path is not None + else None, + config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, + compare_with_pt_model=args.compare_with_pt_model, + use_cached_models=args.use_cached_models, + remove_cached_files=args.remove_cached_files, + only_convert_finetuned_models=args.only_convert_finetuned_models, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py b/llmeval-env/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..88f9e5f19a5c061ee66a5f37f7a17b5bab60f560 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py @@ -0,0 +1,1534 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities to convert slow tokenizers in their fast tokenizers counterparts. + +All the conversions are grouped here to gather SentencePiece dependencies outside of the fast tokenizers files and +allow to make our dependency on SentencePiece optional. +""" + +import warnings +from typing import Dict, List, Tuple + +from packaging import version +from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors +from tokenizers.models import BPE, Unigram, WordPiece + +from .utils import is_protobuf_available, requires_backends +from .utils.import_utils import PROTOBUF_IMPORT_ERROR + + +def import_protobuf(error_message=""): + if is_protobuf_available(): + import google.protobuf + + if version.parse(google.protobuf.__version__) < version.parse("4.0.0"): + from transformers.utils import sentencepiece_model_pb2 + else: + from transformers.utils import sentencepiece_model_pb2_new as sentencepiece_model_pb2 + return sentencepiece_model_pb2 + else: + raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message)) + + +def _get_prepend_scheme(add_prefix_space: bool, original_tokenizer) -> str: + if add_prefix_space: + prepend_scheme = "always" + if hasattr(original_tokenizer, "legacy") and not original_tokenizer.legacy: + prepend_scheme = "first" + else: + prepend_scheme = "never" + return prepend_scheme + + +class SentencePieceExtractor: + """ + Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece + """ + + def __init__(self, model: str): + requires_backends(self, "sentencepiece") + from sentencepiece import SentencePieceProcessor + + self.sp = SentencePieceProcessor() + self.sp.Load(model) + + def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]: + """ + By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to + order the merges with respect to the piece scores instead. + """ + sp = self.sp + vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} + + if vocab_scores is not None: + vocab_scores, reverse = dict(vocab_scores), True + else: + vocab_scores, reverse = vocab, False + + # Merges + merges = [] + for merge, piece_score in vocab_scores.items(): + local = [] + for index in range(1, len(merge)): + piece_l, piece_r = merge[:index], merge[index:] + if piece_l in vocab and piece_r in vocab: + local.append((piece_l, piece_r, piece_score)) + local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]])) + merges.extend(local) + + merges = sorted(merges, key=lambda val: val[2], reverse=reverse) + merges = [(val[0], val[1]) for val in merges] + return vocab, merges + + +class GemmaSentencePieceExtractor(SentencePieceExtractor): + def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]: + """ + By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to + order the merges with respect to the piece scores instead. + """ + sp = self.sp + vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} + + # there is a missing token in the vocab. We have to do this to support merges + # "<0x09>" is the bytefallback for `\t` + vocab["\t"] = vocab.pop("<0x09>") + + if vocab_scores is not None: + vocab_scores, reverse = dict(vocab_scores), True + else: + vocab_scores, reverse = vocab, False + + # Merges + merges = [] + for merge, piece_score in vocab_scores.items(): + local = [] + for index in range(1, len(merge)): + piece_l, piece_r = merge[:index], merge[index:] + if piece_l in vocab and piece_r in vocab: + local.append((piece_l, piece_r, piece_score)) + local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]])) + merges.extend(local) + + merges = sorted(merges, key=lambda val: val[2], reverse=reverse) + merges = [(val[0], val[1]) for val in merges] + return vocab, merges + + +def check_number_comma(piece: str) -> bool: + return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() + + +class Converter: + def __init__(self, original_tokenizer): + self.original_tokenizer = original_tokenizer + + def converted(self) -> Tokenizer: + raise NotImplementedError() + + +class BertConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + tokenize_chinese_chars = False + strip_accents = False + do_lower_case = False + if hasattr(self.original_tokenizer, "basic_tokenizer"): + tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:0 $A:0 {sep}:0", + pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + +class SplinterConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + tokenize_chinese_chars = False + strip_accents = False + do_lower_case = False + if hasattr(self.original_tokenizer, "basic_tokenizer"): + tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + question = str(self.original_tokenizer.question_token) + dot = "." + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + question_token_id = self.original_tokenizer.question_token_id + dot_token_id = self.original_tokenizer.convert_tokens_to_ids(".") + + if self.original_tokenizer.padding_side == "right": + pair = f"{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1" + else: + pair = f"{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1" + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:0 $A:0 {sep}:0", + pair=pair, + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + (question, question_token_id), + (dot, dot_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + +class FunnelConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + tokenize_chinese_chars = False + strip_accents = False + do_lower_case = False + if hasattr(self.original_tokenizer, "basic_tokenizer"): + tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer + pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1", + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + +class MPNetConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + tokenize_chinese_chars = False + strip_accents = False + do_lower_case = False + if hasattr(self.original_tokenizer, "basic_tokenizer"): + tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:0 $A:0 {sep}:0", + pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + +class OpenAIGPTConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + unk_token = self.original_tokenizer.unk_token + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + unk_token=str(unk_token), + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + if tokenizer.token_to_id(str(unk_token)) is not None: + tokenizer.add_special_tokens([str(unk_token)]) + + tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + tokenizer.decoder = decoders.BPEDecoder(suffix="") + + return tokenizer + + +class GPT2Converter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + if self.original_tokenizer.add_bos_token: + bos = self.original_tokenizer.bos_token + bos_token_id = self.original_tokenizer.bos_token_id + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{bos}:0 $A:0", + pair=f"{bos}:0 $A:0 $B:1", + special_tokens=[ + (bos, bos_token_id), + ], + ) + else: + # XXX trim_offsets=False actually means this post_processor doesn't + # really do anything. + tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) + return tokenizer + + +class HerbertConverter(Converter): + def converted(self) -> Tokenizer: + tokenizer_info_str = "#version:" + token_suffix = "" + + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + if tokenizer_info_str in merges[0][0]: + merges = merges[1:] + + tokenizer = Tokenizer( + BPE( + vocab, + merges, + dropout=None, + unk_token=self.original_tokenizer.unk_token, + end_of_word_suffix=token_suffix, + ) + ) + + tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix) + tokenizer.post_processor = processors.BertProcessing( + sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), + cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id), + ) + + return tokenizer + + +class Qwen2Converter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + unk_token=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + byte_fallback=False, + ) + ) + + tokenizer.normalizer = normalizers.NFC() + + tokenizer.pre_tokenizer = pre_tokenizers.Sequence( + [ + pre_tokenizers.Split( + Regex( + r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""" + ), + behavior="isolated", + invert=False, + ), + pre_tokenizers.ByteLevel( + add_prefix_space=getattr(self.original_tokenizer, "add_prefix_space", False), + use_regex=False, + ), + ] + ) + + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) + + return tokenizer + + +class RobertaConverter(Converter): + def converted(self) -> Tokenizer: + ot = self.original_tokenizer + vocab = ot.encoder + merges = list(ot.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.RobertaProcessing( + sep=(ot.sep_token, ot.sep_token_id), + cls=(ot.cls_token, ot.cls_token_id), + add_prefix_space=ot.add_prefix_space, + trim_offsets=True, # True by default on Roberta (historical) + ) + + return tokenizer + + +class RoFormerConverter(Converter): + def converted(self) -> Tokenizer: + from .models.roformer.tokenization_utils import JiebaPreTokenizer + + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + strip_accents = False + do_lower_case = False + if hasattr(self.original_tokenizer, "basic_tokenizer"): + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=False, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab)) + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:0 $A:0 {sep}:0", + pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + +class DebertaConverter(Converter): + def converted(self) -> Tokenizer: + ot = self.original_tokenizer + vocab = ot.encoder + merges = list(ot.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.TemplateProcessing( + single="[CLS]:0 $A:0 [SEP]:0", + pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", + special_tokens=[ + ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), + ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), + ], + ) + + return tokenizer + + +class SpmConverter(Converter): + def __init__(self, *args): + requires_backends(self, "protobuf") + + super().__init__(*args) + + # from .utils import sentencepiece_model_pb2 as model_pb2 + model_pb2 = import_protobuf() + + m = model_pb2.ModelProto() + with open(self.original_tokenizer.vocab_file, "rb") as f: + m.ParseFromString(f.read()) + self.proto = m + + if self.proto.trainer_spec.byte_fallback: + if not getattr(self, "handle_byte_fallback", None): + warnings.warn( + "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" + " which is not implemented in the fast tokenizers. In practice this means that the fast version of the" + " tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these " + "unknown tokens into a sequence of byte tokens matching the original piece of text." + ) + + def vocab(self, proto): + return [(piece.piece, piece.score) for piece in proto.pieces] + + def unk_id(self, proto): + return proto.trainer_spec.unk_id + + def tokenizer(self, proto): + model_type = proto.trainer_spec.model_type + vocab_scores = self.vocab(proto) + unk_id = self.unk_id(proto) + + if model_type == 1: + tokenizer = Tokenizer(Unigram(vocab_scores, unk_id)) + elif model_type == 2: + _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() + bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)} + tokenizer = Tokenizer( + BPE( + bpe_vocab, + merges, + unk_token=proto.trainer_spec.unk_piece, + fuse_unk=True, + ) + ) + else: + raise Exception( + "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" + ) + + return tokenizer + + def normalizer(self, proto): + precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap + _normalizers = [ + normalizers.Strip(left=False, right=True), # stripping is important + normalizers.Replace(Regex(" {2,}"), "▁"), + ] + if not precompiled_charsmap: + return normalizers.Sequence(_normalizers) + else: + return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers) + + def pre_tokenizer(self, replacement, add_prefix_space): + prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) + return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + + def post_processor(self): + return None + + def decoder(self, replacement, add_prefix_space): + prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) + return decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) + + def converted(self) -> Tokenizer: + tokenizer = self.tokenizer(self.proto) + + # Tokenizer assemble + normalizer = self.normalizer(self.proto) + if normalizer is not None: + tokenizer.normalizer = normalizer + + replacement = "▁" + add_prefix_space = True + if hasattr(self.original_tokenizer, "add_prefix_space"): + add_prefix_space = self.original_tokenizer.add_prefix_space + + pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) + if pre_tokenizer is not None: + tokenizer.pre_tokenizer = pre_tokenizer + + tokenizer.decoder = self.decoder(replacement, add_prefix_space) + post_processor = self.post_processor() + if post_processor: + tokenizer.post_processor = post_processor + + return tokenizer + + +class AlbertConverter(SpmConverter): + def vocab(self, proto): + return [ + (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) + for piece in proto.pieces + ] + + def normalizer(self, proto): + list_normalizers = [ + normalizers.Replace("``", '"'), + normalizers.Replace("''", '"'), + ] + if not self.original_tokenizer.keep_accents: + list_normalizers.append(normalizers.NFKD()) + list_normalizers.append(normalizers.StripAccents()) + if self.original_tokenizer.do_lower_case: + list_normalizers.append(normalizers.Lowercase()) + + precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap + + if precompiled_charsmap: + list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) + + list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) + return normalizers.Sequence(list_normalizers) + + def post_processor(self): + return processors.TemplateProcessing( + single="[CLS]:0 $A:0 [SEP]:0", + pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", + special_tokens=[ + ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), + ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), + ], + ) + + +class BarthezConverter(SpmConverter): + def unk_id(self, proto): + unk_id = 3 + return unk_id + + def post_processor(self): + return processors.TemplateProcessing( + single=" $A ", + pair=" $A $B ", + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class CamembertConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("NOTUSED", 0.0), + ("", 0.0), + ("NOTUSED", 0.0), + ("", 0.0), + ("NOTUSED", -100), + ] + # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead + vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]] + vocab += [("", 0.0)] + return vocab + + def unk_id(self, proto): + # See vocab unk position + return 3 + + def post_processor(self): + return processors.TemplateProcessing( + single=" $A ", + pair=" $A $B ", + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class DebertaV2Converter(SpmConverter): + def pre_tokenizer(self, replacement, add_prefix_space): + list_pretokenizers = [] + if self.original_tokenizer.split_by_punct: + list_pretokenizers.append(pre_tokenizers.Punctuation(behavior="isolated")) + prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) + list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)) + return pre_tokenizers.Sequence(list_pretokenizers) + + def normalizer(self, proto): + list_normalizers = [] + if self.original_tokenizer.do_lower_case: + list_normalizers.append(normalizers.Lowercase()) + list_normalizers.append(normalizers.Strip()) + + precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap + if precompiled_charsmap: + list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) + list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) + + return normalizers.Sequence(list_normalizers) + + def post_processor(self): + return processors.TemplateProcessing( + single="[CLS]:0 $A:0 [SEP]:0", + pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", + special_tokens=[ + ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), + ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), + ], + ) + + +class MBartConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("", 0.0), + ("", 0.0), + ("", 0.0), + ("", 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + vocab += [ + ("ar_AR", 0.0), + ("cs_CZ", 0.0), + ("de_DE", 0.0), + ("en_XX", 0.0), + ("es_XX", 0.0), + ("et_EE", 0.0), + ("fi_FI", 0.0), + ("fr_XX", 0.0), + ("gu_IN", 0.0), + ("hi_IN", 0.0), + ("it_IT", 0.0), + ("ja_XX", 0.0), + ("kk_KZ", 0.0), + ("ko_KR", 0.0), + ("lt_LT", 0.0), + ("lv_LV", 0.0), + ("my_MM", 0.0), + ("ne_NP", 0.0), + ("nl_XX", 0.0), + ("ro_RO", 0.0), + ("ru_RU", 0.0), + ("si_LK", 0.0), + ("tr_TR", 0.0), + ("vi_VN", 0.0), + ("zh_CN", 0.0), + ] + vocab += [("", 0.0)] + return vocab + + def unk_id(self, proto): + return 3 + + def post_processor(self): + return processors.TemplateProcessing( + single="$A en_XX", + pair="$A $B en_XX", + special_tokens=[ + ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class MBart50Converter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("", 0.0), + ("", 0.0), + ("", 0.0), + ("", 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip + vocab += [("", 0.0)] + return vocab + + def unk_id(self, proto): + return 3 + + def post_processor(self): + return processors.TemplateProcessing( + single="en_XX $A ", + pair="en_XX $A $B ", + special_tokens=[ + ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class NllbConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("", 0.0), + ("", 0.0), + ("", 0.0), + ("", 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + return vocab + + def unk_id(self, proto): + return 3 + + def post_processor(self): + return processors.TemplateProcessing( + single="eng_Latn $A ", + pair="eng_Latn $A $B ", + special_tokens=[ + ("eng_Latn", self.original_tokenizer.convert_tokens_to_ids("eng_Latn")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class SeamlessM4TConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("", 0.0), + ("", 0.0), + ("", 0.0), + ("", 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + return vocab + + def unk_id(self, proto): + return self.original_tokenizer.unk_token_id + + def post_processor(self): + return processors.TemplateProcessing( + single="__eng__ $A ", + pair="__eng__ $A $B ", + special_tokens=[ + ("__eng__", self.original_tokenizer.convert_tokens_to_ids("__eng__")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class XLMRobertaConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("", 0.0), + ("", 0.0), + ("", 0.0), + ("", 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + vocab += [("", 0.0)] + return vocab + + def unk_id(self, proto): + unk_id = 3 + return unk_id + + def post_processor(self): + return processors.TemplateProcessing( + single=" $A ", + pair=" $A $B ", + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class XLNetConverter(SpmConverter): + def vocab(self, proto): + return [ + (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) + for piece in proto.pieces + ] + + def normalizer(self, proto): + list_normalizers = [ + normalizers.Replace("``", '"'), + normalizers.Replace("''", '"'), + ] + if not self.original_tokenizer.keep_accents: + list_normalizers.append(normalizers.NFKD()) + list_normalizers.append(normalizers.StripAccents()) + if self.original_tokenizer.do_lower_case: + list_normalizers.append(normalizers.Lowercase()) + + precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap + + if precompiled_charsmap: + list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) + + list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) + return normalizers.Sequence(list_normalizers) + + def post_processor(self): + return processors.TemplateProcessing( + single="$A:0 :0 :2", + pair="$A:0 :0 $B:1 :1 :2", + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class ReformerConverter(SpmConverter): + pass + + +class RemBertConverter(SpmConverter): + # Inspired from AlbertConverter + def normalizer(self, proto): + list_normalizers = [ + normalizers.Replace("``", '"'), + normalizers.Replace("''", '"'), + normalizers.Replace(Regex(" {2,}"), " "), + ] + if not self.original_tokenizer.keep_accents: + list_normalizers.append(normalizers.NFKD()) + list_normalizers.append(normalizers.StripAccents()) + if self.original_tokenizer.do_lower_case: + list_normalizers.append(normalizers.Lowercase()) + + precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap + + if precompiled_charsmap: + list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) + + return normalizers.Sequence(list_normalizers) + + def post_processor(self): + return processors.TemplateProcessing( + single="[CLS]:0 $A:0 [SEP]:0", + pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", + special_tokens=[ + ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), + ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), + ], + ) + + +class BertGenerationConverter(SpmConverter): + pass + + +class PegasusConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + (self.original_tokenizer.pad_token, 0.0), + (self.original_tokenizer.eos_token, 0.0), + ] + + if self.original_tokenizer.mask_token_sent is not None: + vocab += [(self.original_tokenizer.mask_token_sent, 0.0)] + + if ( + self.original_tokenizer.mask_token is not None + and self.original_tokenizer.mask_token_id < self.original_tokenizer.offset + ): + vocab += [(self.original_tokenizer.mask_token, 0.0)] + + vocab += [(f"", -100.0) for i in range(2, self.original_tokenizer.offset)] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] + return vocab + + def unk_id(self, proto): + return proto.trainer_spec.unk_id + self.original_tokenizer.offset + + def pre_tokenizer(self, replacement, add_prefix_space): + prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) + return pre_tokenizers.Sequence( + [ + pre_tokenizers.WhitespaceSplit(), + pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme), + ] + ) + + def post_processor(self): + eos = self.original_tokenizer.eos_token + special_tokens = [ + (eos, self.original_tokenizer.eos_token_id), + ] + return processors.TemplateProcessing(single=["$A", eos], pair=["$A", "$B", eos], special_tokens=special_tokens) + + +class T5Converter(SpmConverter): + def vocab(self, proto): + num_extra_ids = self.original_tokenizer._extra_ids + vocab = [(piece.piece, piece.score) for piece in proto.pieces] + vocab += [(f"", 0.0) for i in range(num_extra_ids - 1, -1, -1)] + return vocab + + def post_processor(self): + return processors.TemplateProcessing( + single=["$A", ""], + pair=["$A", "", "$B", ""], + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class UdopConverter(SpmConverter): + def post_processor(self): + return processors.TemplateProcessing( + single=["$A", ""], + pair=["$A", "", "$B", ""], + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class WhisperConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + + prefix_token_ids = self.original_tokenizer.prefix_tokens + prefixes = self.original_tokenizer.convert_ids_to_tokens(prefix_token_ids) + eos = self.original_tokenizer.eos_token + eos_token_id = self.original_tokenizer.eos_token_id + prefix_template = " ".join([f"{token}:0" for token in prefixes]) + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{prefix_template} $A:0 {eos}:0", + pair=f"{prefix_template} $A:0 $B:1 {eos}:1", + special_tokens=[ + (eos, eos_token_id), + *zip(prefixes, prefix_token_ids), + ], + ) + + return tokenizer + + +class BigBirdConverter(SpmConverter): + def post_processor(self): + return processors.TemplateProcessing( + single="[CLS]:0 $A:0 [SEP]:0", + pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", + special_tokens=[ + ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), + ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), + ], + ) + + +class CLIPConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + unk_token = self.original_tokenizer.unk_token + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + unk_token=str(unk_token), + ) + ) + + tokenizer.normalizer = normalizers.Sequence( + [normalizers.NFC(), normalizers.Replace(Regex(r"\s+"), " "), normalizers.Lowercase()] + ) + tokenizer.pre_tokenizer = pre_tokenizers.Sequence( + [ + pre_tokenizers.Split( + Regex(r"""'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+"""), + behavior="removed", + invert=True, + ), + pre_tokenizers.ByteLevel(add_prefix_space=False), + ] + ) + tokenizer.decoder = decoders.ByteLevel() + + # Hack to have a ByteLevel and TemplaceProcessor + tokenizer.post_processor = processors.RobertaProcessing( + sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id), + cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id), + add_prefix_space=False, + trim_offsets=False, + ) + return tokenizer + + +class LayoutLMv2Converter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.vocab + tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) + + tokenize_chinese_chars = False + strip_accents = False + do_lower_case = True + if hasattr(self.original_tokenizer, "basic_tokenizer"): + tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars + strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents + do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case + + tokenizer.normalizer = normalizers.BertNormalizer( + clean_text=True, + handle_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + lowercase=do_lower_case, + ) + tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls}:0 $A:0 {sep}:0", + pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + tokenizer.decoder = decoders.WordPiece(prefix="##") + + return tokenizer + + +class BlenderbotConverter(Converter): + def converted(self) -> Tokenizer: + ot = self.original_tokenizer + vocab = ot.encoder + merges = list(ot.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.TemplateProcessing( + single=f"$A:0 {ot.eos_token}:0", + special_tokens=[ + (ot.eos_token, ot.eos_token_id), + ], + ) + + return tokenizer + + +class XGLMConverter(SpmConverter): + def vocab(self, proto): + vocab = [ + ("", 0.0), + ("", 0.0), + ("", 0.0), + ("", 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + vocab += [("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0)] # fmt: skip + return vocab + + def unk_id(self, proto): + unk_id = 3 + return unk_id + + def post_processor(self): + return processors.TemplateProcessing( + single=" $A", + pair=" $A $B", + special_tokens=[ + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ("", self.original_tokenizer.convert_tokens_to_ids("")), + ], + ) + + +class GemmaConvert(SpmConverter): + handle_byte_fallback = True + + """" + split_by_unicode_script: true + split_by_number: true + split_by_whitespace: true + treat_whitespace_as_suffix: false + allow_whitespace_only_pieces: true + split_digits: true + byte_fallback: true + """ + + def normalizer(self, proto): + return normalizers.Replace(" ", "▁") + + def vocab(self, proto): + vocab = [ + (self.original_tokenizer.pad_token, 0.0), + (self.original_tokenizer.eos_token, 0.0), + (self.original_tokenizer.bos_token, 0.0), + ] + for piece in proto.pieces[3:]: + if piece.piece == "<0x09>": + vocab += [("\t", piece.score)] + else: + vocab += [(piece.piece, piece.score)] + # vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + return vocab + + def pre_tokenizer(self, replacement, add_prefix_space): + return None + + def unk_id(self, proto): + unk_id = 3 + return unk_id + + def decoder(self, replacement, add_prefix_space): + return decoders.Sequence( + [ + decoders.Replace("▁", " "), + decoders.ByteFallback(), + decoders.Fuse(), + ] + ) + + def tokenizer(self, proto): + model_type = proto.trainer_spec.model_type + vocab_scores = self.vocab(proto) + if model_type == 1: + import tokenizers + + if version.parse(tokenizers.__version__) < version.parse("0.14.0"): + tokenizer = Tokenizer(Unigram(vocab_scores, 0)) + else: + tokenizer = Tokenizer(Unigram(vocab_scores, 0, byte_fallback=True)) + + elif model_type == 2: + _, merges = GemmaSentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores) + bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} + + tokenizer = Tokenizer( + BPE( + bpe_vocab, + merges, + unk_token=proto.trainer_spec.unk_piece, + fuse_unk=True, + byte_fallback=True, + dropout=None, + ) + ) + tokenizer.add_special_tokens( + [ + AddedToken("", normalized=False, special=True), + AddedToken("", normalized=False, special=True), + AddedToken("", normalized=False, special=True), + AddedToken("", normalized=False, special=True), + ] + ) + else: + raise Exception( + "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" + ) + user_defined_symbols = [ + AddedToken(token, normalized=False, special=False) for token in proto.trainer_spec.user_defined_symbols + ] + tokenizer.add_tokens(user_defined_symbols) + return tokenizer + + +class LlamaConverter(SpmConverter): + handle_byte_fallback = True + + def vocab(self, proto): + vocab = [ + (self.original_tokenizer.convert_ids_to_tokens(0), 0.0), + (self.original_tokenizer.convert_ids_to_tokens(1), 0.0), + (self.original_tokenizer.convert_ids_to_tokens(2), 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + return vocab + + def unk_id(self, proto): + unk_id = 0 + return unk_id + + def decoder(self, replacement, add_prefix_space): + sequence = [ + decoders.Replace("▁", " "), + decoders.ByteFallback(), + decoders.Fuse(), + ] + if add_prefix_space: + sequence += [decoders.Strip(content=" ", left=1)] + return decoders.Sequence(sequence) + + def tokenizer(self, proto): + model_type = proto.trainer_spec.model_type + vocab_scores = self.vocab(proto) + if model_type == 1: + import tokenizers + + if version.parse(tokenizers.__version__) < version.parse("0.14.0"): + tokenizer = Tokenizer(Unigram(vocab_scores, 0)) + else: + tokenizer = Tokenizer(Unigram(vocab_scores, 0, byte_fallback=True)) + + elif model_type == 2: + _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores) + bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} + tokenizer = Tokenizer( + BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True) + ) + tokenizer.add_special_tokens( + [ + AddedToken(self.original_tokenizer.convert_ids_to_tokens(0), normalized=False, special=True), + AddedToken(self.original_tokenizer.convert_ids_to_tokens(1), normalized=False, special=True), + AddedToken(self.original_tokenizer.convert_ids_to_tokens(2), normalized=False, special=True), + ] + ) + else: + raise Exception( + "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" + ) + + return tokenizer + + def normalizer(self, proto): + sequence = [] + if hasattr(self.original_tokenizer, "add_prefix_space"): + if self.original_tokenizer.add_prefix_space: + sequence += [normalizers.Prepend(prepend="▁")] + sequence += [normalizers.Replace(pattern=" ", content="▁")] + return normalizers.Sequence(sequence) + + def pre_tokenizer(self, replacement, add_prefix_space): + return None + + def post_processor(self): + # the processor is defined in the LlamaTokenizerFast class. + return None + + +class MarkupLMConverter(Converter): + def converted(self) -> Tokenizer: + ot = self.original_tokenizer + vocab = ot.encoder + merges = list(ot.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + unk_token=self.original_tokenizer.unk_token, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls} $A {sep}", + pair=f"{cls} $A {sep} $B {sep}", + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + + return tokenizer + + +SLOW_TO_FAST_CONVERTERS = { + "AlbertTokenizer": AlbertConverter, + "BartTokenizer": RobertaConverter, + "BarthezTokenizer": BarthezConverter, + "BertTokenizer": BertConverter, + "BigBirdTokenizer": BigBirdConverter, + "BlenderbotTokenizer": BlenderbotConverter, + "CamembertTokenizer": CamembertConverter, + "CLIPTokenizer": CLIPConverter, + "CodeGenTokenizer": GPT2Converter, + "ConvBertTokenizer": BertConverter, + "DebertaTokenizer": DebertaConverter, + "DebertaV2Tokenizer": DebertaV2Converter, + "DistilBertTokenizer": BertConverter, + "DPRReaderTokenizer": BertConverter, + "DPRQuestionEncoderTokenizer": BertConverter, + "DPRContextEncoderTokenizer": BertConverter, + "ElectraTokenizer": BertConverter, + "FNetTokenizer": AlbertConverter, + "FunnelTokenizer": FunnelConverter, + "GPT2Tokenizer": GPT2Converter, + "HerbertTokenizer": HerbertConverter, + "LayoutLMTokenizer": BertConverter, + "LayoutLMv2Tokenizer": BertConverter, + "LayoutLMv3Tokenizer": RobertaConverter, + "LayoutXLMTokenizer": XLMRobertaConverter, + "LongformerTokenizer": RobertaConverter, + "LEDTokenizer": RobertaConverter, + "LxmertTokenizer": BertConverter, + "MarkupLMTokenizer": MarkupLMConverter, + "MBartTokenizer": MBartConverter, + "MBart50Tokenizer": MBart50Converter, + "MPNetTokenizer": MPNetConverter, + "MobileBertTokenizer": BertConverter, + "MvpTokenizer": RobertaConverter, + "NllbTokenizer": NllbConverter, + "OpenAIGPTTokenizer": OpenAIGPTConverter, + "PegasusTokenizer": PegasusConverter, + "Qwen2Tokenizer": Qwen2Converter, + "RealmTokenizer": BertConverter, + "ReformerTokenizer": ReformerConverter, + "RemBertTokenizer": RemBertConverter, + "RetriBertTokenizer": BertConverter, + "RobertaTokenizer": RobertaConverter, + "RoFormerTokenizer": RoFormerConverter, + "SeamlessM4TTokenizer": SeamlessM4TConverter, + "SqueezeBertTokenizer": BertConverter, + "T5Tokenizer": T5Converter, + "UdopTokenizer": UdopConverter, + "WhisperTokenizer": WhisperConverter, + "XLMRobertaTokenizer": XLMRobertaConverter, + "XLNetTokenizer": XLNetConverter, + "SplinterTokenizer": SplinterConverter, + "XGLMTokenizer": XGLMConverter, + "LlamaTokenizer": LlamaConverter, + "CodeLlamaTokenizer": LlamaConverter, + "GemmaTokenizer": GemmaConvert, +} + + +def convert_slow_tokenizer(transformer_tokenizer) -> Tokenizer: + """ + Utilities to convert a slow tokenizer instance in a fast tokenizer instance. + + Args: + transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]): + Instance of a slow tokenizer to convert in the backend tokenizer for + [`~tokenization_utils_base.PreTrainedTokenizerFast`]. + + Return: + A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a + [`~tokenization_utils_base.PreTrainedTokenizerFast`] + """ + + tokenizer_class_name = transformer_tokenizer.__class__.__name__ + + if tokenizer_class_name not in SLOW_TO_FAST_CONVERTERS: + raise ValueError( + f"An instance of tokenizer class {tokenizer_class_name} cannot be converted in a Fast tokenizer instance." + " No converter was found. Currently available slow->fast convertors:" + f" {list(SLOW_TO_FAST_CONVERTERS.keys())}" + ) + + converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name] + + return converter_class(transformer_tokenizer).converted() diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..a032ee93b03db82216f29e2ce20f9af833980851 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Convert slow tokenizers checkpoints in fast (serialization format of the `tokenizers` library)""" + +import argparse +import os + +import transformers + +from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS +from .utils import logging + + +logging.set_verbosity_info() + +logger = logging.get_logger(__name__) + + +TOKENIZER_CLASSES = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} + + +def convert_slow_checkpoint_to_fast(tokenizer_name, checkpoint_name, dump_path, force_download): + if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: + raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.") + + if tokenizer_name is None: + tokenizer_names = TOKENIZER_CLASSES + else: + tokenizer_names = {tokenizer_name: getattr(transformers, tokenizer_name + "Fast")} + + logger.info(f"Loading tokenizer classes: {tokenizer_names}") + + for tokenizer_name in tokenizer_names: + tokenizer_class = TOKENIZER_CLASSES[tokenizer_name] + + add_prefix = True + if checkpoint_name is None: + checkpoint_names = list(tokenizer_class.max_model_input_sizes.keys()) + else: + checkpoint_names = [checkpoint_name] + + logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}") + + for checkpoint in checkpoint_names: + logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}") + + # Load tokenizer + tokenizer = tokenizer_class.from_pretrained(checkpoint, force_download=force_download) + + # Save fast tokenizer + logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}") + + # For organization names we create sub-directories + if "/" in checkpoint: + checkpoint_directory, checkpoint_prefix_name = checkpoint.split("/") + dump_path_full = os.path.join(dump_path, checkpoint_directory) + elif add_prefix: + checkpoint_prefix_name = checkpoint + dump_path_full = dump_path + else: + checkpoint_prefix_name = None + dump_path_full = dump_path + + logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}") + + if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]: + file_path = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint] + next_char = file_path.split(checkpoint)[-1][0] + if next_char == "/": + dump_path_full = os.path.join(dump_path_full, checkpoint_prefix_name) + checkpoint_prefix_name = None + + logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}") + + file_names = tokenizer.save_pretrained( + dump_path_full, legacy_format=False, filename_prefix=checkpoint_prefix_name + ) + logger.info(f"=> File names {file_names}") + + for file_name in file_names: + if not file_name.endswith("tokenizer.json"): + os.remove(file_name) + logger.info(f"=> removing {file_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." + ) + parser.add_argument( + "--tokenizer_name", + default=None, + type=str, + help=( + f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " + "download and convert all the checkpoints from AWS." + ), + ) + parser.add_argument( + "--checkpoint_name", + default=None, + type=str, + help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", + ) + parser.add_argument( + "--force_download", + action="store_true", + help="Re-download checkpoints.", + ) + args = parser.parse_args() + + convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/debug_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/debug_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dbceb1d849076999c6821556accaea05e53a9ff9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/debug_utils.py @@ -0,0 +1,346 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from .utils import ExplicitEnum, is_torch_available, logging + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class DebugUnderflowOverflow: + """ + This debug class helps detect and understand where the model starts getting very large or very small, and more + importantly `nan` or `inf` weight and activation elements. + + There are 2 working modes: + + 1. Underflow/overflow detection (default) + 2. Specific batch absolute min/max tracing without detection + + Mode 1: Underflow/overflow detection + + To activate the underflow/overflow detection, initialize the object with the model : + + ```python + debug_overflow = DebugUnderflowOverflow(model) + ``` + + then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output + elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event, + each frame reporting + + 1. the fully qualified module name plus the class name whose `forward` was run + 2. the absolute min and max value of all elements for each module weights, and the inputs and output + + For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 + mixed precision : + + ``` + Detected inf/nan during batch_number=0 + Last 21 forward frames: + abs min abs max metadata + [...] + encoder.block.2.layer.1.DenseReluDense.wi_0 Linear + 2.17e-07 4.50e+00 weight + 1.79e-06 4.65e+00 input[0] + 2.68e-06 3.70e+01 output + encoder.block.2.layer.1.DenseReluDense.wi_1 Linear + 8.08e-07 2.66e+01 weight + 1.79e-06 4.65e+00 input[0] + 1.27e-04 2.37e+02 output + encoder.block.2.layer.1.DenseReluDense.wo Linear + 1.01e-06 6.44e+00 weight + 0.00e+00 9.74e+03 input[0] + 3.18e-04 6.27e+04 output + encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense + 1.79e-06 4.65e+00 input[0] + 3.18e-04 6.27e+04 output + encoder.block.2.layer.1.dropout Dropout + 3.18e-04 6.27e+04 input[0] + 0.00e+00 inf output + ``` + + You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was + around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which + renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than + 64K, and we get an overlow. + + As you can see it's the previous frames that we need to look into when the numbers start going into very large for + fp16 numbers. + + The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed. + + By default the last 21 frames are printed. You can change the default to adjust for your needs. For example : + + ```python + debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) + ``` + + To validate that you have set up this debugging feature correctly, and you intend to use it in a training that + may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in + the next section. + + + Mode 2. Specific batch absolute min/max tracing without detection + + The second work mode is per-batch tracing with the underflow/overflow detection feature turned off. + + Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a + given batch, and only do that for batches 1 and 3. Then you instantiate this class as : + + ```python + debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) + ``` + + And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed. + + This is helpful if you know that the program starts misbehaving after a certain batch number, so you can + fast-forward right to that area. + + + Early stopping: + + You can also specify the batch number after which to stop the training, with : + + ```python + debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) + ``` + + This feature is mainly useful in the tracing mode, but you can use it for any mode. + + + **Performance**: + + As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training + down. Therefore remember to turn it off once the debugging needs have been met. + + Args: + model (`nn.Module`): + The model to debug. + max_frames_to_save (`int`, *optional*, defaults to 21): + How many frames back to record + trace_batch_nums(`List[int]`, *optional*, defaults to `[]`): + Which batch numbers to trace (turns detection off) + abort_after_batch_num (`int``, *optional*): + Whether to abort after a certain batch number has finished + """ + + def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None): + self.model = model + self.trace_batch_nums = trace_batch_nums + self.abort_after_batch_num = abort_after_batch_num + + # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence + self.frames = collections.deque([], max_frames_to_save) + self.frame = [] + self.batch_number = 0 + self.total_calls = 0 + self.detected_overflow = False + self.prefix = " " + + self.analyse_model() + + self.register_forward_hook() + + def save_frame(self, frame=None): + if frame is not None: + self.expand_frame(frame) + self.frames.append("\n".join(self.frame)) + self.frame = [] # start a new frame + + def expand_frame(self, line): + self.frame.append(line) + + def trace_frames(self): + print("\n".join(self.frames)) + self.frames = [] + + def reset_saved_frames(self): + self.frames = [] + + def dump_saved_frames(self): + print(f"\nDetected inf/nan during batch_number={self.batch_number}") + print(f"Last {len(self.frames)} forward frames:") + print(f"{'abs min':8} {'abs max':8} metadata") + print("\n".join(self.frames)) + print("\n\n") + self.frames = [] + + def analyse_model(self): + # extract the fully qualified module names, to be able to report at run time. e.g.: + # encoder.block.2.layer.0.SelfAttention.o + # + # for shared weights only the first shared module name will be registered + self.module_names = {m: name for name, m in self.model.named_modules()} + # self.longest_module_name = max(len(v) for v in self.module_names.values()) + + def analyse_variable(self, var, ctx): + if torch.is_tensor(var): + self.expand_frame(get_abs_min_max(var, ctx)) + if detect_overflow(var, ctx): + self.detected_overflow = True + elif var is None: + self.expand_frame(f"{'None':>17} {ctx}") + else: + self.expand_frame(f"{'not a tensor':>17} {ctx}") + + def batch_start_frame(self): + self.expand_frame(f"\n\n{self.prefix} *** Starting batch number={self.batch_number} ***") + self.expand_frame(f"{'abs min':8} {'abs max':8} metadata") + + def batch_end_frame(self): + self.expand_frame(f"{self.prefix} *** Finished batch number={self.batch_number-1} ***\n\n") + + def create_frame(self, module, input, output): + self.expand_frame(f"{self.prefix} {self.module_names[module]} {module.__class__.__name__}") + + # params + for name, p in module.named_parameters(recurse=False): + self.analyse_variable(p, name) + + # inputs + if isinstance(input, tuple): + for i, x in enumerate(input): + self.analyse_variable(x, f"input[{i}]") + else: + self.analyse_variable(input, "input") + + # outputs + if isinstance(output, tuple): + for i, x in enumerate(output): + # possibly a tuple of tuples + if isinstance(x, tuple): + for j, y in enumerate(x): + self.analyse_variable(y, f"output[{i}][{j}]") + else: + self.analyse_variable(x, f"output[{i}]") + else: + self.analyse_variable(output, "output") + + self.save_frame() + + def register_forward_hook(self): + self.model.apply(self._register_forward_hook) + + def _register_forward_hook(self, module): + module.register_forward_hook(self.forward_hook) + + def forward_hook(self, module, input, output): + # - input is a tuple of packed inputs (could be non-Tensors) + # - output could be a Tensor or a tuple of Tensors and non-Tensors + + last_frame_of_batch = False + + trace_mode = True if self.batch_number in self.trace_batch_nums else False + if trace_mode: + self.reset_saved_frames() + + if self.total_calls == 0: + self.batch_start_frame() + self.total_calls += 1 + + # count batch numbers - the very first forward hook of the batch will be called when the + # batch completes - i.e. it gets called very last - we know this batch has finished + if module == self.model: + self.batch_number += 1 + last_frame_of_batch = True + + self.create_frame(module, input, output) + + # if last_frame_of_batch: + # self.batch_end_frame() + + if trace_mode: + self.trace_frames() + + if last_frame_of_batch: + self.batch_start_frame() + + if self.detected_overflow and not trace_mode: + self.dump_saved_frames() + + # now we can abort, as it's pointless to continue running + raise ValueError( + "DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. " + "Please scroll up above this traceback to see the activation values prior to this event." + ) + + # abort after certain batch if requested to do so + if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num: + raise ValueError( + f"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to" + f" `abort_after_batch_num={self.abort_after_batch_num}` arg" + ) + + +def get_abs_min_max(var, ctx): + abs_var = var.abs() + return f"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}" + + +def detect_overflow(var, ctx): + """ + Report whether the tensor contains any `nan` or `inf` entries. + + This is useful for detecting overflows/underflows and best to call right after the function that did some math that + modified the tensor in question. + + This function contains a few other helper features that you can enable and tweak directly if you want to track + various other things. + + Args: + var: the tensor variable to check + ctx: the message to print as a context + + Return: + `True` if `inf` or `nan` was detected, `False` otherwise + """ + detected = False + if torch.isnan(var).any().item(): + detected = True + print(f"{ctx} has nans") + if torch.isinf(var).any().item(): + detected = True + print(f"{ctx} has infs") + + # if needed to monitor large elements can enable the following + if 0: # and detected: + n100 = var[torch.ge(var.abs(), 100)] + if n100.numel() > 0: + print(f"{ctx}: n100={n100.numel()}") + n1000 = var[torch.ge(var.abs(), 1000)] + if n1000.numel() > 0: + print(f"{ctx}: n1000={n1000.numel()}") + n10000 = var[torch.ge(var.abs(), 10000)] + if n10000.numel() > 0: + print(f"{ctx}: n10000={n10000.numel()}") + + if 0: + print(f"min={var.min():9.2e} max={var.max():9.2e}") + + if 0: + print(f"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})") + + return detected + + +class DebugOption(ExplicitEnum): + UNDERFLOW_OVERFLOW = "underflow_overflow" + TPU_METRICS_DEBUG = "tpu_metrics_debug" diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/dependency_versions_check.py b/llmeval-env/lib/python3.10/site-packages/transformers/dependency_versions_check.py new file mode 100644 index 0000000000000000000000000000000000000000..82d07850847ec357f36ff51088ddec36aceff093 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/dependency_versions_check.py @@ -0,0 +1,63 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dependency_versions_table import deps +from .utils.versions import require_version, require_version_core + + +# define which module versions we always want to check at run time +# (usually the ones defined in `install_requires` in setup.py) +# +# order specific notes: +# - tqdm must be checked before tokenizers + +pkgs_to_check_at_runtime = [ + "python", + "tqdm", + "regex", + "requests", + "packaging", + "filelock", + "numpy", + "tokenizers", + "huggingface-hub", + "safetensors", + "accelerate", + "pyyaml", +] + +for pkg in pkgs_to_check_at_runtime: + if pkg in deps: + if pkg == "tokenizers": + # must be loaded here, or else tqdm check may fail + from .utils import is_tokenizers_available + + if not is_tokenizers_available(): + continue # not required, check version only if installed + elif pkg == "accelerate": + # must be loaded here, or else tqdm check may fail + from .utils import is_accelerate_available + + # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of + # Transformers with PyTorch + if not is_accelerate_available(): + continue # not required, check version only if installed + + require_version_core(deps[pkg]) + else: + raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") + + +def dep_version_check(pkg, hint=None): + require_version(deps[pkg], hint) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/dynamic_module_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/dynamic_module_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a89105029868a15047562b387ffd50ac1a630bed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/dynamic_module_utils.py @@ -0,0 +1,633 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to dynamically load objects from the Hub.""" +import filecmp +import importlib +import os +import re +import shutil +import signal +import sys +import typing +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +from huggingface_hub import try_to_load_from_cache + +from .utils import ( + HF_MODULES_CACHE, + TRANSFORMERS_DYNAMIC_MODULE_NAME, + cached_file, + extract_commit_hash, + is_offline_mode, + logging, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def init_hf_modules(): + """ + Creates the cache directory for modules with an init, and adds it to the Python path. + """ + # This function has already been executed if HF_MODULES_CACHE already is in the Python path. + if HF_MODULES_CACHE in sys.path: + return + + sys.path.append(HF_MODULES_CACHE) + os.makedirs(HF_MODULES_CACHE, exist_ok=True) + init_path = Path(HF_MODULES_CACHE) / "__init__.py" + if not init_path.exists(): + init_path.touch() + importlib.invalidate_caches() + + +def create_dynamic_module(name: Union[str, os.PathLike]): + """ + Creates a dynamic module in the cache directory for modules. + + Args: + name (`str` or `os.PathLike`): + The name of the dynamic module to create. + """ + init_hf_modules() + dynamic_module_path = (Path(HF_MODULES_CACHE) / name).resolve() + # If the parent module does not exist yet, recursively create it. + if not dynamic_module_path.parent.exists(): + create_dynamic_module(dynamic_module_path.parent) + os.makedirs(dynamic_module_path, exist_ok=True) + init_path = dynamic_module_path / "__init__.py" + if not init_path.exists(): + init_path.touch() + # It is extremely important to invalidate the cache when we change stuff in those modules, or users end up + # with errors about module that do not exist. Same for all other `invalidate_caches` in this file. + importlib.invalidate_caches() + + +def get_relative_imports(module_file: Union[str, os.PathLike]) -> List[str]: + """ + Get the list of modules that are relatively imported in a module file. + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of relative imports in the module. + """ + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import .xxx` + relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from .xxx import yyy` + relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) + # Unique-ify + return list(set(relative_imports)) + + +def get_relative_import_files(module_file: Union[str, os.PathLike]) -> List[str]: + """ + Get the list of all files that are needed for a given module. Note that this function recurses through the relative + imports (if a imports b and b imports c, it will return module files for b and c). + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all relative imports a given module needs (recursively), which will give us the list + of module files a given module needs. + """ + no_change = False + files_to_check = [module_file] + all_relative_imports = [] + + # Let's recurse through all relative imports + while not no_change: + new_imports = [] + for f in files_to_check: + new_imports.extend(get_relative_imports(f)) + + module_path = Path(module_file).parent + new_import_files = [str(module_path / m) for m in new_imports] + new_import_files = [f for f in new_import_files if f not in all_relative_imports] + files_to_check = [f"{f}.py" for f in new_import_files] + + no_change = len(new_import_files) == 0 + all_relative_imports.extend(files_to_check) + + return all_relative_imports + + +def get_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Extracts all the libraries (not relative imports this time) that are imported in a file. + + Args: + filename (`str` or `os.PathLike`): The module file to inspect. + + Returns: + `List[str]`: The list of all packages required to use the input module. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # filter out try/except block so in custom code we can have try/except imports + content = re.sub(r"\s*try\s*:\s*.*?\s*except\s*.*?:", "", content, flags=re.MULTILINE | re.DOTALL) + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + return list(set(imports)) + + +def check_imports(filename: Union[str, os.PathLike]) -> List[str]: + """ + Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a + library is missing. + + Args: + filename (`str` or `os.PathLike`): The module file to check. + + Returns: + `List[str]`: The list of relative imports in the file. + """ + imports = get_imports(filename) + missing_packages = [] + for imp in imports: + try: + importlib.import_module(imp) + except ImportError: + missing_packages.append(imp) + + if len(missing_packages) > 0: + raise ImportError( + "This modeling file requires the following packages that were not found in your environment: " + f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" + ) + + return get_relative_imports(filename) + + +def get_class_in_module(class_name: str, module_path: Union[str, os.PathLike]) -> typing.Type: + """ + Import a module on the cache directory for modules and extract a class from it. + + Args: + class_name (`str`): The name of the class to import. + module_path (`str` or `os.PathLike`): The path to the module to import. + + Returns: + `typing.Type`: The class looked for. + """ + name = os.path.normpath(module_path).replace(".py", "").replace(os.path.sep, ".") + module_path = str(Path(HF_MODULES_CACHE) / module_path) + module = importlib.machinery.SourceFileLoader(name, module_path).load_module() + return getattr(module, class_name) + + +def get_cached_module_file( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + repo_type: Optional[str] = None, + _commit_hash: Optional[str] = None, + **deprecated_kwargs, +) -> str: + """ + Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached + Transformers module. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `str`: The path to the module inside the cache. + """ + use_auth_token = deprecated_kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + submodule = os.path.basename(pretrained_model_name_or_path) + else: + submodule = pretrained_model_name_or_path.replace("/", os.path.sep) + cached_module = try_to_load_from_cache( + pretrained_model_name_or_path, module_file, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type + ) + + new_files = [] + try: + # Load from URL or cache if already cached + resolved_module_file = cached_file( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + revision=revision, + repo_type=repo_type, + _commit_hash=_commit_hash, + ) + if not is_local and cached_module != resolved_module_file: + new_files.append(module_file) + + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + + # Check we have all the requirements in our environment + modules_needed = check_imports(resolved_module_file) + + # Now we move the module inside our cached dynamic modules. + full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule + create_dynamic_module(full_submodule) + submodule_path = Path(HF_MODULES_CACHE) / full_submodule + if submodule == os.path.basename(pretrained_model_name_or_path): + # We copy local files to avoid putting too many folders in sys.path. This copy is done when the file is new or + # has changed since last copy. + if not (submodule_path / module_file).exists() or not filecmp.cmp( + resolved_module_file, str(submodule_path / module_file) + ): + shutil.copy(resolved_module_file, submodule_path / module_file) + importlib.invalidate_caches() + for module_needed in modules_needed: + module_needed = f"{module_needed}.py" + module_needed_file = os.path.join(pretrained_model_name_or_path, module_needed) + if not (submodule_path / module_needed).exists() or not filecmp.cmp( + module_needed_file, str(submodule_path / module_needed) + ): + shutil.copy(module_needed_file, submodule_path / module_needed) + importlib.invalidate_caches() + else: + # Get the commit hash + commit_hash = extract_commit_hash(resolved_module_file, _commit_hash) + + # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the + # benefit of versioning. + submodule_path = submodule_path / commit_hash + full_submodule = full_submodule + os.path.sep + commit_hash + create_dynamic_module(full_submodule) + + if not (submodule_path / module_file).exists(): + shutil.copy(resolved_module_file, submodule_path / module_file) + importlib.invalidate_caches() + # Make sure we also have every file with relative + for module_needed in modules_needed: + if not (submodule_path / f"{module_needed}.py").exists(): + get_cached_module_file( + pretrained_model_name_or_path, + f"{module_needed}.py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + _commit_hash=commit_hash, + ) + new_files.append(f"{module_needed}.py") + + if len(new_files) > 0 and revision is None: + new_files = "\n".join([f"- {f}" for f in new_files]) + repo_type_str = "" if repo_type is None else f"{repo_type}s/" + url = f"https://huggingface.co/{repo_type_str}{pretrained_model_name_or_path}" + logger.warning( + f"A new version of the following files was downloaded from {url}:\n{new_files}" + "\n. Make sure to double-check they do not contain any added malicious code. To avoid downloading new " + "versions of the code file, you can pin a revision." + ) + + return os.path.join(full_submodule, module_file) + + +def get_class_from_dynamic_module( + class_reference: str, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + repo_type: Optional[str] = None, + code_revision: Optional[str] = None, + **kwargs, +) -> typing.Type: + """ + Extracts a class from a module file, present in the local folder or repository of a model. + + + + Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should + therefore only be called on trusted repos. + + + + + + Args: + class_reference (`str`): + The full name of the class to load, including its module and optionally its repo. + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + This is used when `class_reference` does not specify another repo. + module_file (`str`): + The name of the module file containing the class to look for. + class_name (`str`): + The name of the class to import in the module. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + repo_type (`str`, *optional*): + Specify the repo type (useful when downloading from a space for instance). + code_revision (`str`, *optional*, defaults to `"main"`): + The specific revision to use for the code on the Hub, if the code leaves in a different repository than the + rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for + storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `typing.Type`: The class, dynamically imported from the module. + + Examples: + + ```python + # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("modeling.MyBertModel", "sgugger/my-bert-model") + + # Download module `modeling.py` from a given repo and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("sgugger/my-bert-model--modeling.MyBertModel", "sgugger/another-bert-model") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + # Catch the name of the repo if it's specified in `class_reference` + if "--" in class_reference: + repo_id, class_reference = class_reference.split("--") + else: + repo_id = pretrained_model_name_or_path + module_file, class_name = class_reference.split(".") + + if code_revision is None and pretrained_model_name_or_path == repo_id: + code_revision = revision + # And lastly we get the class inside our newly created module + final_module = get_cached_module_file( + repo_id, + module_file + ".py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=code_revision, + local_files_only=local_files_only, + repo_type=repo_type, + ) + return get_class_in_module(class_name, final_module) + + +def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[Dict] = None) -> List[str]: + """ + Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally + adds the proper fields in a config. + + Args: + obj (`Any`): The object for which to save the module files. + folder (`str` or `os.PathLike`): The folder where to save. + config (`PretrainedConfig` or dictionary, `optional`): + A config in which to register the auto_map corresponding to this custom object. + + Returns: + `List[str]`: The list of files saved. + """ + if obj.__module__ == "__main__": + logger.warning( + f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put " + "this code in a separate module so we can include it in the saved folder and make it easier to share via " + "the Hub." + ) + return + + def _set_auto_map_in_config(_config): + module_name = obj.__class__.__module__ + last_module = module_name.split(".")[-1] + full_name = f"{last_module}.{obj.__class__.__name__}" + # Special handling for tokenizers + if "Tokenizer" in full_name: + slow_tokenizer_class = None + fast_tokenizer_class = None + if obj.__class__.__name__.endswith("Fast"): + # Fast tokenizer: we have the fast tokenizer class and we may have the slow one has an attribute. + fast_tokenizer_class = f"{last_module}.{obj.__class__.__name__}" + if getattr(obj, "slow_tokenizer_class", None) is not None: + slow_tokenizer = getattr(obj, "slow_tokenizer_class") + slow_tok_module_name = slow_tokenizer.__module__ + last_slow_tok_module = slow_tok_module_name.split(".")[-1] + slow_tokenizer_class = f"{last_slow_tok_module}.{slow_tokenizer.__name__}" + else: + # Slow tokenizer: no way to have the fast class + slow_tokenizer_class = f"{last_module}.{obj.__class__.__name__}" + + full_name = (slow_tokenizer_class, fast_tokenizer_class) + + if isinstance(_config, dict): + auto_map = _config.get("auto_map", {}) + auto_map[obj._auto_class] = full_name + _config["auto_map"] = auto_map + elif getattr(_config, "auto_map", None) is not None: + _config.auto_map[obj._auto_class] = full_name + else: + _config.auto_map = {obj._auto_class: full_name} + + # Add object class to the config auto_map + if isinstance(config, (list, tuple)): + for cfg in config: + _set_auto_map_in_config(cfg) + elif config is not None: + _set_auto_map_in_config(config) + + result = [] + # Copy module file to the output folder. + object_file = sys.modules[obj.__module__].__file__ + dest_file = Path(folder) / (Path(object_file).name) + shutil.copy(object_file, dest_file) + result.append(dest_file) + + # Gather all relative imports recursively and make sure they are copied as well. + for needed_file in get_relative_import_files(object_file): + dest_file = Path(folder) / (Path(needed_file).name) + shutil.copy(needed_file, dest_file) + result.append(dest_file) + + return result + + +def _raise_timeout_error(signum, frame): + raise ValueError( + "Loading this model requires you to execute custom code contained in the model repository on your local " + "machine. Please set the option `trust_remote_code=True` to permit loading of this model." + ) + + +TIME_OUT_REMOTE_CODE = 15 + + +def resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code): + if trust_remote_code is None: + if has_local_code: + trust_remote_code = False + elif has_remote_code and TIME_OUT_REMOTE_CODE > 0: + prev_sig_handler = None + try: + prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error) + signal.alarm(TIME_OUT_REMOTE_CODE) + while trust_remote_code is None: + answer = input( + f"The repository for {model_name} contains custom code which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" + f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n" + f"Do you wish to run the custom code? [y/N] " + ) + if answer.lower() in ["yes", "y", "1"]: + trust_remote_code = True + elif answer.lower() in ["no", "n", "0", ""]: + trust_remote_code = False + signal.alarm(0) + except Exception: + # OS which does not support signal.SIGALRM + raise ValueError( + f"The repository for {model_name} contains custom code which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + finally: + if prev_sig_handler is not None: + signal.signal(signal.SIGALRM, prev_sig_handler) + signal.alarm(0) + elif has_remote_code: + # For the CI which puts the timeout at 0 + _raise_timeout_error(None, None) + + if has_remote_code and not has_local_code and not trust_remote_code: + raise ValueError( + f"Loading {model_name} requires you to execute the configuration file in that" + " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" + " set the option `trust_remote_code=True` to remove this error." + ) + + return trust_remote_code diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/hf_argparser.py b/llmeval-env/lib/python3.10/site-packages/transformers/hf_argparser.py new file mode 100644 index 0000000000000000000000000000000000000000..045bf798050e93969eef60b71c944545251f7428 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/hf_argparser.py @@ -0,0 +1,424 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +import json +import os +import sys +import types +from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError +from copy import copy +from enum import Enum +from inspect import isclass +from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints + +import yaml + + +DataClass = NewType("DataClass", Any) +DataClassType = NewType("DataClassType", Any) + + +# From https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse +def string_to_bool(v): + if isinstance(v, bool): + return v + if v.lower() in ("yes", "true", "t", "y", "1"): + return True + elif v.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise ArgumentTypeError( + f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." + ) + + +def make_choice_type_function(choices: list) -> Callable[[str], Any]: + """ + Creates a mapping function from each choices string representation to the actual value. Used to support multiple + value types for a single argument. + + Args: + choices (list): List of choices. + + Returns: + Callable[[str], Any]: Mapping function from string representation to actual value for each choice. + """ + str_to_choice = {str(choice): choice for choice in choices} + return lambda arg: str_to_choice.get(arg, arg) + + +def HfArg( + *, + aliases: Union[str, List[str]] = None, + help: str = None, + default: Any = dataclasses.MISSING, + default_factory: Callable[[], Any] = dataclasses.MISSING, + metadata: dict = None, + **kwargs, +) -> dataclasses.Field: + """Argument helper enabling a concise syntax to create dataclass fields for parsing with `HfArgumentParser`. + + Example comparing the use of `HfArg` and `dataclasses.field`: + ``` + @dataclass + class Args: + regular_arg: str = dataclasses.field(default="Huggingface", metadata={"aliases": ["--example", "-e"], "help": "This syntax could be better!"}) + hf_arg: str = HfArg(default="Huggingface", aliases=["--example", "-e"], help="What a nice syntax!") + ``` + + Args: + aliases (Union[str, List[str]], optional): + Single string or list of strings of aliases to pass on to argparse, e.g. `aliases=["--example", "-e"]`. + Defaults to None. + help (str, optional): Help string to pass on to argparse that can be displayed with --help. Defaults to None. + default (Any, optional): + Default value for the argument. If not default or default_factory is specified, the argument is required. + Defaults to dataclasses.MISSING. + default_factory (Callable[[], Any], optional): + The default_factory is a 0-argument function called to initialize a field's value. It is useful to provide + default values for mutable types, e.g. lists: `default_factory=list`. Mutually exclusive with `default=`. + Defaults to dataclasses.MISSING. + metadata (dict, optional): Further metadata to pass on to `dataclasses.field`. Defaults to None. + + Returns: + Field: A `dataclasses.Field` with the desired properties. + """ + if metadata is None: + # Important, don't use as default param in function signature because dict is mutable and shared across function calls + metadata = {} + if aliases is not None: + metadata["aliases"] = aliases + if help is not None: + metadata["help"] = help + + return dataclasses.field(metadata=metadata, default=default, default_factory=default_factory, **kwargs) + + +class HfArgumentParser(ArgumentParser): + """ + This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments. + + The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) + arguments to the parser after initialization and you'll get the output back after parsing as an additional + namespace. Optional: To create sub argument groups use the `_argument_group_name` attribute in the dataclass. + """ + + dataclass_types: Iterable[DataClassType] + + def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs): + """ + Args: + dataclass_types: + Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args. + kwargs (`Dict[str, Any]`, *optional*): + Passed to `argparse.ArgumentParser()` in the regular way. + """ + # To make the default appear when using --help + if "formatter_class" not in kwargs: + kwargs["formatter_class"] = ArgumentDefaultsHelpFormatter + super().__init__(**kwargs) + if dataclasses.is_dataclass(dataclass_types): + dataclass_types = [dataclass_types] + self.dataclass_types = list(dataclass_types) + for dtype in self.dataclass_types: + self._add_dataclass_arguments(dtype) + + @staticmethod + def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field): + field_name = f"--{field.name}" + kwargs = field.metadata.copy() + # field.metadata is not used at all by Data Classes, + # it is provided as a third-party extension mechanism. + if isinstance(field.type, str): + raise RuntimeError( + "Unresolved type detected, which should have been done with the help of " + "`typing.get_type_hints` method by default" + ) + + aliases = kwargs.pop("aliases", []) + if isinstance(aliases, str): + aliases = [aliases] + + origin_type = getattr(field.type, "__origin__", field.type) + if origin_type is Union or (hasattr(types, "UnionType") and isinstance(origin_type, types.UnionType)): + if str not in field.type.__args__ and ( + len(field.type.__args__) != 2 or type(None) not in field.type.__args__ + ): + raise ValueError( + "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" + " the argument parser only supports one type per argument." + f" Problem encountered in field '{field.name}'." + ) + if type(None) not in field.type.__args__: + # filter `str` in Union + field.type = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] + origin_type = getattr(field.type, "__origin__", field.type) + elif bool not in field.type.__args__: + # filter `NoneType` in Union (except for `Union[bool, NoneType]`) + field.type = ( + field.type.__args__[0] if isinstance(None, field.type.__args__[1]) else field.type.__args__[1] + ) + origin_type = getattr(field.type, "__origin__", field.type) + + # A variable to store kwargs for a boolean field, if needed + # so that we can init a `no_*` complement argument (see below) + bool_kwargs = {} + if origin_type is Literal or (isinstance(field.type, type) and issubclass(field.type, Enum)): + if origin_type is Literal: + kwargs["choices"] = field.type.__args__ + else: + kwargs["choices"] = [x.value for x in field.type] + + kwargs["type"] = make_choice_type_function(kwargs["choices"]) + + if field.default is not dataclasses.MISSING: + kwargs["default"] = field.default + else: + kwargs["required"] = True + elif field.type is bool or field.type == Optional[bool]: + # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. + # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument + bool_kwargs = copy(kwargs) + + # Hack because type=bool in argparse does not behave as we want. + kwargs["type"] = string_to_bool + if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): + # Default value is False if we have no default when of type bool. + default = False if field.default is dataclasses.MISSING else field.default + # This is the value that will get picked if we don't include --field_name in any way + kwargs["default"] = default + # This tells argparse we accept 0 or 1 value after --field_name + kwargs["nargs"] = "?" + # This is the value that will get picked if we do --field_name (without value) + kwargs["const"] = True + elif isclass(origin_type) and issubclass(origin_type, list): + kwargs["type"] = field.type.__args__[0] + kwargs["nargs"] = "+" + if field.default_factory is not dataclasses.MISSING: + kwargs["default"] = field.default_factory() + elif field.default is dataclasses.MISSING: + kwargs["required"] = True + else: + kwargs["type"] = field.type + if field.default is not dataclasses.MISSING: + kwargs["default"] = field.default + elif field.default_factory is not dataclasses.MISSING: + kwargs["default"] = field.default_factory() + else: + kwargs["required"] = True + parser.add_argument(field_name, *aliases, **kwargs) + + # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. + # Order is important for arguments with the same destination! + # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down + # here and we do not need those changes/additional keys. + if field.default is True and (field.type is bool or field.type == Optional[bool]): + bool_kwargs["default"] = False + parser.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **bool_kwargs) + + def _add_dataclass_arguments(self, dtype: DataClassType): + if hasattr(dtype, "_argument_group_name"): + parser = self.add_argument_group(dtype._argument_group_name) + else: + parser = self + + try: + type_hints: Dict[str, type] = get_type_hints(dtype) + except NameError: + raise RuntimeError( + f"Type resolution failed for {dtype}. Try declaring the class in global scope or " + "removing line of `from __future__ import annotations` which opts in Postponed " + "Evaluation of Annotations (PEP 563)" + ) + except TypeError as ex: + # Remove this block when we drop Python 3.9 support + if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(ex): + python_version = ".".join(map(str, sys.version_info[:3])) + raise RuntimeError( + f"Type resolution failed for {dtype} on Python {python_version}. Try removing " + "line of `from __future__ import annotations` which opts in union types as " + "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " + "support Python versions that lower than 3.10, you need to use " + "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " + "`X | None`." + ) from ex + raise + + for field in dataclasses.fields(dtype): + if not field.init: + continue + field.type = type_hints[field.name] + self._parse_dataclass_field(parser, field) + + def parse_args_into_dataclasses( + self, + args=None, + return_remaining_strings=False, + look_for_args_file=True, + args_filename=None, + args_file_flag=None, + ) -> Tuple[DataClass, ...]: + """ + Parse command-line args into instances of the specified dataclass types. + + This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at: + docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args + + Args: + args: + List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser) + return_remaining_strings: + If true, also return a list of remaining argument strings. + look_for_args_file: + If true, will look for a ".args" file with the same base name as the entry point script for this + process, and will append its potential content to the command line args. + args_filename: + If not None, will uses this file instead of the ".args" file specified in the previous argument. + args_file_flag: + If not None, will look for a file in the command-line args specified with this flag. The flag can be + specified multiple times and precedence is determined by the order (last one wins). + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer.abspath + - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser + after initialization. + - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args) + """ + + if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): + args_files = [] + + if args_filename: + args_files.append(Path(args_filename)) + elif look_for_args_file and len(sys.argv): + args_files.append(Path(sys.argv[0]).with_suffix(".args")) + + # args files specified via command line flag should overwrite default args files so we add them last + if args_file_flag: + # Create special parser just to extract the args_file_flag values + args_file_parser = ArgumentParser() + args_file_parser.add_argument(args_file_flag, type=str, action="append") + + # Use only remaining args for further parsing (remove the args_file_flag) + cfg, args = args_file_parser.parse_known_args(args=args) + cmd_args_file_paths = vars(cfg).get(args_file_flag.lstrip("-"), None) + + if cmd_args_file_paths: + args_files.extend([Path(p) for p in cmd_args_file_paths]) + + file_args = [] + for args_file in args_files: + if args_file.exists(): + file_args += args_file.read_text().split() + + # in case of duplicate arguments the last one has precedence + # args specified via the command line should overwrite args from files, so we add them last + args = file_args + args if args is not None else file_args + sys.argv[1:] + namespace, remaining_args = self.parse_known_args(args=args) + outputs = [] + for dtype in self.dataclass_types: + keys = {f.name for f in dataclasses.fields(dtype) if f.init} + inputs = {k: v for k, v in vars(namespace).items() if k in keys} + for k in keys: + delattr(namespace, k) + obj = dtype(**inputs) + outputs.append(obj) + if len(namespace.__dict__) > 0: + # additional namespace. + outputs.append(namespace) + if return_remaining_strings: + return (*outputs, remaining_args) + else: + if remaining_args: + raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}") + + return (*outputs,) + + def parse_dict(self, args: Dict[str, Any], allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: + """ + Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass + types. + + Args: + args (`dict`): + dict containing config values + allow_extra_keys (`bool`, *optional*, defaults to `False`): + Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed. + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer. + """ + unused_keys = set(args.keys()) + outputs = [] + for dtype in self.dataclass_types: + keys = {f.name for f in dataclasses.fields(dtype) if f.init} + inputs = {k: v for k, v in args.items() if k in keys} + unused_keys.difference_update(inputs.keys()) + obj = dtype(**inputs) + outputs.append(obj) + if not allow_extra_keys and unused_keys: + raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}") + return tuple(outputs) + + def parse_json_file( + self, json_file: Union[str, os.PathLike], allow_extra_keys: bool = False + ) -> Tuple[DataClass, ...]: + """ + Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the + dataclass types. + + Args: + json_file (`str` or `os.PathLike`): + File name of the json file to parse + allow_extra_keys (`bool`, *optional*, defaults to `False`): + Defaults to False. If False, will raise an exception if the json file contains keys that are not + parsed. + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer. + """ + with open(Path(json_file), encoding="utf-8") as open_json_file: + data = json.loads(open_json_file.read()) + outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys) + return tuple(outputs) + + def parse_yaml_file( + self, yaml_file: Union[str, os.PathLike], allow_extra_keys: bool = False + ) -> Tuple[DataClass, ...]: + """ + Alternative helper method that does not use `argparse` at all, instead loading a yaml file and populating the + dataclass types. + + Args: + yaml_file (`str` or `os.PathLike`): + File name of the yaml file to parse + allow_extra_keys (`bool`, *optional*, defaults to `False`): + Defaults to False. If False, will raise an exception if the json file contains keys that are not + parsed. + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer. + """ + outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys) + return tuple(outputs) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/hyperparameter_search.py b/llmeval-env/lib/python3.10/site-packages/transformers/hyperparameter_search.py new file mode 100644 index 0000000000000000000000000000000000000000..c14165165ca1f92fb28e27b718c8bd81e1bc3a93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/hyperparameter_search.py @@ -0,0 +1,141 @@ +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .integrations import ( + is_optuna_available, + is_ray_tune_available, + is_sigopt_available, + is_wandb_available, + run_hp_search_optuna, + run_hp_search_ray, + run_hp_search_sigopt, + run_hp_search_wandb, +) +from .trainer_utils import ( + HPSearchBackend, + default_hp_space_optuna, + default_hp_space_ray, + default_hp_space_sigopt, + default_hp_space_wandb, +) +from .utils import logging + + +logger = logging.get_logger(__name__) + + +class HyperParamSearchBackendBase: + name: str + pip_package: str = None + + @staticmethod + def is_available(): + raise NotImplementedError + + def run(self, trainer, n_trials: int, direction: str, **kwargs): + raise NotImplementedError + + def default_hp_space(self, trial): + raise NotImplementedError + + def ensure_available(self): + if not self.is_available(): + raise RuntimeError( + f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." + ) + + @classmethod + def pip_install(cls): + return f"`pip install {cls.pip_package or cls.name}`" + + +class OptunaBackend(HyperParamSearchBackendBase): + name = "optuna" + + @staticmethod + def is_available(): + return is_optuna_available() + + def run(self, trainer, n_trials: int, direction: str, **kwargs): + return run_hp_search_optuna(trainer, n_trials, direction, **kwargs) + + def default_hp_space(self, trial): + return default_hp_space_optuna(trial) + + +class RayTuneBackend(HyperParamSearchBackendBase): + name = "ray" + pip_package = "'ray[tune]'" + + @staticmethod + def is_available(): + return is_ray_tune_available() + + def run(self, trainer, n_trials: int, direction: str, **kwargs): + return run_hp_search_ray(trainer, n_trials, direction, **kwargs) + + def default_hp_space(self, trial): + return default_hp_space_ray(trial) + + +class SigOptBackend(HyperParamSearchBackendBase): + name = "sigopt" + + @staticmethod + def is_available(): + return is_sigopt_available() + + def run(self, trainer, n_trials: int, direction: str, **kwargs): + return run_hp_search_sigopt(trainer, n_trials, direction, **kwargs) + + def default_hp_space(self, trial): + return default_hp_space_sigopt(trial) + + +class WandbBackend(HyperParamSearchBackendBase): + name = "wandb" + + @staticmethod + def is_available(): + return is_wandb_available() + + def run(self, trainer, n_trials: int, direction: str, **kwargs): + return run_hp_search_wandb(trainer, n_trials, direction, **kwargs) + + def default_hp_space(self, trial): + return default_hp_space_wandb(trial) + + +ALL_HYPERPARAMETER_SEARCH_BACKENDS = { + HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] +} + + +def default_hp_search_backend() -> str: + available_backends = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] + if len(available_backends) > 0: + name = available_backends[0].name + if len(available_backends) > 1: + logger.info( + f"{len(available_backends)} hyperparameter search backends available. Using {name} as the default." + ) + return name + raise RuntimeError( + "No hyperparameter search backend available.\n" + + "\n".join( + f" - To install {backend.name} run {backend.pip_install()}" + for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() + ) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/image_processing_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/image_processing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..70f1a339de706afe66c2ddebbb755571b51dbe76 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/image_processing_utils.py @@ -0,0 +1,793 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import json +import os +import warnings +from io import BytesIO +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np +import requests + +from .dynamic_module_utils import custom_object_save +from .feature_extraction_utils import BatchFeature as BaseBatchFeature +from .image_transforms import center_crop, normalize, rescale +from .image_utils import ChannelDimension +from .utils import ( + IMAGE_PROCESSOR_NAME, + PushToHubMixin, + add_model_info_to_auto_map, + cached_file, + copy_func, + download_url, + is_offline_mode, + is_remote_url, + is_vision_available, + logging, +) + + +if is_vision_available(): + from PIL import Image + +logger = logging.get_logger(__name__) + + +# TODO: Move BatchFeature to be imported by both image_processing_utils and image_processing_utils +# We override the class string here, but logic is the same. +class BatchFeature(BaseBatchFeature): + r""" + Holds the output of the image processor specific `__call__` methods. + + This class is derived from a python dictionary and can be used as a dictionary. + + Args: + data (`dict`): + Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.). + tensor_type (`Union[None, str, TensorType]`, *optional*): + You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at + initialization. + """ + + +# TODO: (Amy) - factor out the common parts of this and the feature extractor +class ImageProcessingMixin(PushToHubMixin): + """ + This is an image processor mixin used to provide saving/loading functionality for sequential and image feature + extractors. + """ + + _auto_class = None + + def __init__(self, **kwargs): + """Set elements of `kwargs` as attributes.""" + # This key was saved while we still used `XXXFeatureExtractor` for image processing. Now we use + # `XXXImageProcessor`, this attribute and its value are misleading. + kwargs.pop("feature_extractor_type", None) + # Pop "processor_class" as it should be saved as private attribute + self._processor_class = kwargs.pop("processor_class", None) + # Additional attributes without default values + for key, value in kwargs.items(): + try: + setattr(self, key, value) + except AttributeError as err: + logger.error(f"Can't set {key} with value {value} for {self}") + raise err + + def _set_processor_class(self, processor_class: str): + """Sets processor class as an attribute.""" + self._processor_class = processor_class + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ): + r""" + Instantiate a type of [`~image_processing_utils.ImageProcessingMixin`] from an image processor. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained image_processor hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a image processor file saved using the + [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g., + `./my_model_directory/`. + - a path or url to a saved image processor JSON *file*, e.g., + `./my_model_directory/preprocessor_config.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model image processor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the image processor files and override the cached versions if + they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final image processor object. If `True`, then this + functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of + `kwargs` which has not been used to update `image_processor` and is otherwise ignored. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are image processor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + Returns: + A image processor of type [`~image_processing_utils.ImageProcessingMixin`]. + + Examples: + + ```python + # We can't instantiate directly the base class *ImageProcessingMixin* so let's show the examples on a + # derived class: *CLIPImageProcessor* + image_processor = CLIPImageProcessor.from_pretrained( + "openai/clip-vit-base-patch32" + ) # Download image_processing_config from huggingface.co and cache. + image_processor = CLIPImageProcessor.from_pretrained( + "./test/saved_model/" + ) # E.g. image processor (or model) was saved using *save_pretrained('./test/saved_model/')* + image_processor = CLIPImageProcessor.from_pretrained("./test/saved_model/preprocessor_config.json") + image_processor = CLIPImageProcessor.from_pretrained( + "openai/clip-vit-base-patch32", do_normalize=False, foo=False + ) + assert image_processor.do_normalize is False + image_processor, unused_kwargs = CLIPImageProcessor.from_pretrained( + "openai/clip-vit-base-patch32", do_normalize=False, foo=False, return_unused_kwargs=True + ) + assert image_processor.do_normalize is False + assert unused_kwargs == {"foo": False} + ```""" + kwargs["cache_dir"] = cache_dir + kwargs["force_download"] = force_download + kwargs["local_files_only"] = local_files_only + kwargs["revision"] = revision + + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + image_processor_dict, kwargs = cls.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) + + return cls.from_dict(image_processor_dict, **kwargs) + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the + [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the image processor JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self) + + # If we save using the predefined names, we can load using `from_pretrained` + output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME) + + self.to_json_file(output_image_processor_file) + logger.info(f"Image processor saved in {output_image_processor_file}") + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=kwargs.get("token"), + ) + + return [output_image_processor_file] + + @classmethod + def get_image_processor_dict( + cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a + image processor of type [`~image_processor_utils.ImageProcessingMixin`] using `from_dict`. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`): + The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + + Returns: + `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the image processor object. + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + use_auth_token = kwargs.pop("use_auth_token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", "") + + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + user_agent = {"file_type": "image processor", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isdir(pretrained_model_name_or_path): + image_processor_file = os.path.join(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME) + if os.path.isfile(pretrained_model_name_or_path): + resolved_image_processor_file = pretrained_model_name_or_path + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + image_processor_file = pretrained_model_name_or_path + resolved_image_processor_file = download_url(pretrained_model_name_or_path) + else: + image_processor_file = IMAGE_PROCESSOR_NAME + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_image_processor_file = cached_file( + pretrained_model_name_or_path, + image_processor_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load image processor for '{pretrained_model_name_or_path}'. If you were trying to load" + " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a {IMAGE_PROCESSOR_NAME} file" + ) + + try: + # Load image_processor dict + with open(resolved_image_processor_file, "r", encoding="utf-8") as reader: + text = reader.read() + image_processor_dict = json.loads(text) + + except json.JSONDecodeError: + raise EnvironmentError( + f"It looks like the config file at '{resolved_image_processor_file}' is not a valid JSON file." + ) + + if is_local: + logger.info(f"loading configuration file {resolved_image_processor_file}") + else: + logger.info( + f"loading configuration file {image_processor_file} from cache at {resolved_image_processor_file}" + ) + + if "auto_map" in image_processor_dict and not is_local: + image_processor_dict["auto_map"] = add_model_info_to_auto_map( + image_processor_dict["auto_map"], pretrained_model_name_or_path + ) + + return image_processor_dict, kwargs + + @classmethod + def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): + """ + Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters. + + Args: + image_processor_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the image processor object. Such a dictionary can be + retrieved from a pretrained checkpoint by leveraging the + [`~image_processing_utils.ImageProcessingMixin.to_dict`] method. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the image processor object. + + Returns: + [`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those + parameters. + """ + image_processor_dict = image_processor_dict.copy() + return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) + + # The `size` parameter is a dict and was previously an int or tuple in feature extractors. + # We set `size` here directly to the `image_processor_dict` so that it is converted to the appropriate + # dict within the image processor and isn't overwritten if `size` is passed in as a kwarg. + if "size" in kwargs and "size" in image_processor_dict: + image_processor_dict["size"] = kwargs.pop("size") + if "crop_size" in kwargs and "crop_size" in image_processor_dict: + image_processor_dict["crop_size"] = kwargs.pop("crop_size") + + image_processor = cls(**image_processor_dict) + + # Update image_processor with kwargs if needed + to_remove = [] + for key, value in kwargs.items(): + if hasattr(image_processor, key): + setattr(image_processor, key, value) + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + logger.info(f"Image processor {image_processor}") + if return_unused_kwargs: + return image_processor, kwargs + else: + return image_processor + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance. + """ + output = copy.deepcopy(self.__dict__) + output["image_processor_type"] = self.__class__.__name__ + + return output + + @classmethod + def from_json_file(cls, json_file: Union[str, os.PathLike]): + """ + Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON + file of parameters. + + Args: + json_file (`str` or `os.PathLike`): + Path to the JSON file containing the parameters. + + Returns: + A image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object + instantiated from that JSON file. + """ + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + image_processor_dict = json.loads(text) + return cls(**image_processor_dict) + + def to_json_string(self) -> str: + """ + Serializes this instance to a JSON string. + + Returns: + `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. + """ + dictionary = self.to_dict() + + for key, value in dictionary.items(): + if isinstance(value, np.ndarray): + dictionary[key] = value.tolist() + + # make sure private name "_processor_class" is correctly + # saved as "processor_class" + _processor_class = dictionary.pop("_processor_class", None) + if _processor_class is not None: + dictionary["processor_class"] = _processor_class + + return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike]): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this image_processor instance's parameters will be saved. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string()) + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + @classmethod + def register_for_auto_class(cls, auto_class="AutoImageProcessor"): + """ + Register this class with a given auto class. This should only be used for custom image processors as the ones + in the library are already mapped with `AutoImageProcessor `. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoImageProcessor "`): + The auto class to register this new image processor with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + def fetch_images(self, image_url_or_urls: Union[str, List[str]]): + """ + Convert a single or a list of urls into the corresponding `PIL.Image` objects. + + If a single url is passed, the return value will be a single object. If a list is passed a list of objects is + returned. + """ + headers = { + "User-Agent": ( + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0" + " Safari/537.36" + ) + } + if isinstance(image_url_or_urls, list): + return [self.fetch_images(x) for x in image_url_or_urls] + elif isinstance(image_url_or_urls, str): + response = requests.get(image_url_or_urls, stream=True, headers=headers) + response.raise_for_status() + return Image.open(BytesIO(response.content)) + else: + raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}") + + +class BaseImageProcessor(ImageProcessingMixin): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def __call__(self, images, **kwargs) -> BatchFeature: + """Preprocess an image or a batch of images.""" + return self.preprocess(images, **kwargs) + + def preprocess(self, images, **kwargs) -> BatchFeature: + raise NotImplementedError("Each image processor must implement its own preprocess method") + + def rescale( + self, + image: np.ndarray, + scale: float, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Rescale an image by a scale factor. image = image * scale. + + Args: + image (`np.ndarray`): + Image to rescale. + scale (`float`): + The scaling factor to rescale pixel values by. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + + Returns: + `np.ndarray`: The rescaled image. + """ + return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs) + + def normalize( + self, + image: np.ndarray, + mean: Union[float, Iterable[float]], + std: Union[float, Iterable[float]], + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Normalize an image. image = (image - image_mean) / image_std. + + Args: + image (`np.ndarray`): + Image to normalize. + mean (`float` or `Iterable[float]`): + Image mean to use for normalization. + std (`float` or `Iterable[float]`): + Image standard deviation to use for normalization. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + + Returns: + `np.ndarray`: The normalized image. + """ + return normalize( + image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs + ) + + def center_crop( + self, + image: np.ndarray, + size: Dict[str, int], + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along + any edge, the image is padded with 0's and then center cropped. + + Args: + image (`np.ndarray`): + Image to center crop. + size (`Dict[str, int]`): + Size of the output image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") + return center_crop( + image, + size=(size["height"], size["width"]), + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + +VALID_SIZE_DICT_KEYS = ({"height", "width"}, {"shortest_edge"}, {"shortest_edge", "longest_edge"}, {"longest_edge"}) + + +def is_valid_size_dict(size_dict): + if not isinstance(size_dict, dict): + return False + + size_dict_keys = set(size_dict.keys()) + for allowed_keys in VALID_SIZE_DICT_KEYS: + if size_dict_keys == allowed_keys: + return True + return False + + +def convert_to_size_dict( + size, max_size: Optional[int] = None, default_to_square: bool = True, height_width_order: bool = True +): + # By default, if size is an int we assume it represents a tuple of (size, size). + if isinstance(size, int) and default_to_square: + if max_size is not None: + raise ValueError("Cannot specify both size as an int, with default_to_square=True and max_size") + return {"height": size, "width": size} + # In other configs, if size is an int and default_to_square is False, size represents the length of + # the shortest edge after resizing. + elif isinstance(size, int) and not default_to_square: + size_dict = {"shortest_edge": size} + if max_size is not None: + size_dict["longest_edge"] = max_size + return size_dict + # Otherwise, if size is a tuple it's either (height, width) or (width, height) + elif isinstance(size, (tuple, list)) and height_width_order: + return {"height": size[0], "width": size[1]} + elif isinstance(size, (tuple, list)) and not height_width_order: + return {"height": size[1], "width": size[0]} + elif size is None and max_size is not None: + if default_to_square: + raise ValueError("Cannot specify both default_to_square=True and max_size") + return {"longest_edge": max_size} + + raise ValueError(f"Could not convert size input to size dict: {size}") + + +def get_size_dict( + size: Union[int, Iterable[int], Dict[str, int]] = None, + max_size: Optional[int] = None, + height_width_order: bool = True, + default_to_square: bool = True, + param_name="size", +) -> dict: + """ + Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards + compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height, + width) or (width, height) format. + + - If `size` is tuple, it is converted to `{"height": size[0], "width": size[1]}` or `{"height": size[1], "width": + size[0]}` if `height_width_order` is `False`. + - If `size` is an int, and `default_to_square` is `True`, it is converted to `{"height": size, "width": size}`. + - If `size` is an int and `default_to_square` is False, it is converted to `{"shortest_edge": size}`. If `max_size` + is set, it is added to the dict as `{"longest_edge": max_size}`. + + Args: + size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*): + The `size` parameter to be cast into a size dictionary. + max_size (`Optional[int]`, *optional*): + The `max_size` parameter to be cast into a size dictionary. + height_width_order (`bool`, *optional*, defaults to `True`): + If `size` is a tuple, whether it's in (height, width) or (width, height) order. + default_to_square (`bool`, *optional*, defaults to `True`): + If `size` is an int, whether to default to a square image or not. + """ + if not isinstance(size, dict): + size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order) + logger.info( + f"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}." + f" Converted to {size_dict}.", + ) + else: + size_dict = size + + if not is_valid_size_dict(size_dict): + raise ValueError( + f"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}" + ) + return size_dict + + +def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple: + """ + Selects the best resolution from a list of possible resolutions based on the original size. + + This is done by calculating the effective and wasted resolution for each possible resolution. + + The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution. + + Args: + original_size (tuple): + The original size of the image in the format (height, width). + possible_resolutions (list): + A list of possible resolutions in the format [(height1, width1), (height2, width2), ...]. + + Returns: + tuple: The best fit resolution in the format (height, width). + """ + original_height, original_width = original_size + best_fit = None + max_effective_resolution = 0 + min_wasted_resolution = float("inf") + + for height, width in possible_resolutions: + scale = min(width / original_width, height / original_height) + downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale) + effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height) + wasted_resolution = (width * height) - effective_resolution + + if effective_resolution > max_effective_resolution or ( + effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution + ): + max_effective_resolution = effective_resolution + min_wasted_resolution = wasted_resolution + best_fit = (height, width) + + return best_fit + + +ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub) +if ImageProcessingMixin.push_to_hub.__doc__ is not None: + ImageProcessingMixin.push_to_hub.__doc__ = ImageProcessingMixin.push_to_hub.__doc__.format( + object="image processor", object_class="AutoImageProcessor", object_files="image processor file" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/image_transforms.py b/llmeval-env/lib/python3.10/site-packages/transformers/image_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..016fae4405e973549bd18ad1e8c0fe4591444b8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/image_transforms.py @@ -0,0 +1,803 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Iterable, List, Optional, Tuple, Union + +import numpy as np + +from .image_utils import ( + ChannelDimension, + ImageInput, + get_channel_dimension_axis, + get_image_size, + infer_channel_dimension_format, +) +from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor +from .utils.import_utils import ( + is_flax_available, + is_tf_available, + is_torch_available, + is_vision_available, + requires_backends, +) + + +if is_vision_available(): + import PIL + + from .image_utils import PILImageResampling + +if is_torch_available(): + import torch + +if is_tf_available(): + import tensorflow as tf + +if is_flax_available(): + import jax.numpy as jnp + + +def to_channel_dimension_format( + image: np.ndarray, + channel_dim: Union[ChannelDimension, str], + input_channel_dim: Optional[Union[ChannelDimension, str]] = None, +) -> np.ndarray: + """ + Converts `image` to the channel dimension format specified by `channel_dim`. + + Args: + image (`numpy.ndarray`): + The image to have its channel dimension set. + channel_dim (`ChannelDimension`): + The channel dimension format to use. + input_channel_dim (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input image. + + Returns: + `np.ndarray`: The image with the channel dimension set to `channel_dim`. + """ + if not isinstance(image, np.ndarray): + raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") + + if input_channel_dim is None: + input_channel_dim = infer_channel_dimension_format(image) + + target_channel_dim = ChannelDimension(channel_dim) + if input_channel_dim == target_channel_dim: + return image + + if target_channel_dim == ChannelDimension.FIRST: + image = image.transpose((2, 0, 1)) + elif target_channel_dim == ChannelDimension.LAST: + image = image.transpose((1, 2, 0)) + else: + raise ValueError("Unsupported channel dimension format: {}".format(channel_dim)) + + return image + + +def rescale( + image: np.ndarray, + scale: float, + data_format: Optional[ChannelDimension] = None, + dtype: np.dtype = np.float32, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> np.ndarray: + """ + Rescales `image` by `scale`. + + Args: + image (`np.ndarray`): + The image to rescale. + scale (`float`): + The scale to use for rescaling the image. + data_format (`ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + dtype (`np.dtype`, *optional*, defaults to `np.float32`): + The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature + extractors. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input image. + + Returns: + `np.ndarray`: The rescaled image. + """ + if not isinstance(image, np.ndarray): + raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") + + rescaled_image = image * scale + if data_format is not None: + rescaled_image = to_channel_dimension_format(rescaled_image, data_format, input_data_format) + + rescaled_image = rescaled_image.astype(dtype) + + return rescaled_image + + +def _rescale_for_pil_conversion(image): + """ + Detects whether or not the image needs to be rescaled before being converted to a PIL image. + + The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be + rescaled. + """ + if image.dtype == np.uint8: + do_rescale = False + elif np.allclose(image, image.astype(int)): + if np.all(0 <= image) and np.all(image <= 255): + do_rescale = False + else: + raise ValueError( + "The image to be converted to a PIL image contains values outside the range [0, 255], " + f"got [{image.min()}, {image.max()}] which cannot be converted to uint8." + ) + elif np.all(0 <= image) and np.all(image <= 1): + do_rescale = True + else: + raise ValueError( + "The image to be converted to a PIL image contains values outside the range [0, 1], " + f"got [{image.min()}, {image.max()}] which cannot be converted to uint8." + ) + return do_rescale + + +def to_pil_image( + image: Union[np.ndarray, "PIL.Image.Image", "torch.Tensor", "tf.Tensor", "jnp.ndarray"], + do_rescale: Optional[bool] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> "PIL.Image.Image": + """ + Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if + needed. + + Args: + image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`): + The image to convert to the `PIL.Image` format. + do_rescale (`bool`, *optional*): + Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default + to `True` if the image type is a floating type and casting to `int` would result in a loss of precision, + and `False` otherwise. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + + Returns: + `PIL.Image.Image`: The converted image. + """ + requires_backends(to_pil_image, ["vision"]) + + if isinstance(image, PIL.Image.Image): + return image + + # Convert all tensors to numpy arrays before converting to PIL image + if is_torch_tensor(image) or is_tf_tensor(image): + image = image.numpy() + elif is_jax_tensor(image): + image = np.array(image) + elif not isinstance(image, np.ndarray): + raise ValueError("Input image type not supported: {}".format(type(image))) + + # If the channel has been moved to first dim, we put it back at the end. + image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) + + # If there is a single channel, we squeeze it, as otherwise PIL can't handle it. + image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image + + # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed. + do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale + + if do_rescale: + image = rescale(image, 255) + + image = image.astype(np.uint8) + return PIL.Image.fromarray(image) + + +# Logic adapted from torchvision resizing logic: https://github.com/pytorch/vision/blob/511924c1ced4ce0461197e5caa64ce5b9e558aab/torchvision/transforms/functional.py#L366 +def get_resize_output_image_size( + input_image: np.ndarray, + size: Union[int, Tuple[int, int], List[int], Tuple[int]], + default_to_square: bool = True, + max_size: Optional[int] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> tuple: + """ + Find the target (height, width) dimension of the output image after resizing given the input image and the desired + size. + + Args: + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]): + The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to + this. + + If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If + `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this + number. i.e, if height > width, then image will be rescaled to (size * height / width, size). + default_to_square (`bool`, *optional*, defaults to `True`): + How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square + (`size`,`size`). If set to `False`, will replicate + [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) + with support for resizing only the smallest edge and providing an optional `max_size`. + max_size (`int`, *optional*): + The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater + than `max_size` after being resized according to `size`, then the image is resized again so that the longer + edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter + than `size`. Only used if `default_to_square` is `False`. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + + Returns: + `tuple`: The target (height, width) dimension of the output image after resizing. + """ + if isinstance(size, (tuple, list)): + if len(size) == 2: + return tuple(size) + elif len(size) == 1: + # Perform same logic as if size was an int + size = size[0] + else: + raise ValueError("size must have 1 or 2 elements if it is a list or tuple") + + if default_to_square: + return (size, size) + + height, width = get_image_size(input_image, input_data_format) + short, long = (width, height) if width <= height else (height, width) + requested_new_short = size + + new_short, new_long = requested_new_short, int(requested_new_short * long / short) + + if max_size is not None: + if max_size <= requested_new_short: + raise ValueError( + f"max_size = {max_size} must be strictly greater than the requested " + f"size for the smaller edge size = {size}" + ) + if new_long > max_size: + new_short, new_long = int(max_size * new_short / new_long), max_size + + return (new_long, new_short) if width <= height else (new_short, new_long) + + +def resize( + image: np.ndarray, + size: Tuple[int, int], + resample: "PILImageResampling" = None, + reducing_gap: Optional[int] = None, + data_format: Optional[ChannelDimension] = None, + return_numpy: bool = True, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> np.ndarray: + """ + Resizes `image` to `(height, width)` specified by `size` using the PIL library. + + Args: + image (`np.ndarray`): + The image to resize. + size (`Tuple[int, int]`): + The size to use for resizing the image. + resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`): + The filter to user for resampling. + reducing_gap (`int`, *optional*): + Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to + the fair resampling. See corresponding Pillow documentation for more details. + data_format (`ChannelDimension`, *optional*): + The channel dimension format of the output image. If unset, will use the inferred format from the input. + return_numpy (`bool`, *optional*, defaults to `True`): + Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is + returned. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + + Returns: + `np.ndarray`: The resized image. + """ + requires_backends(resize, ["vision"]) + + resample = resample if resample is not None else PILImageResampling.BILINEAR + + if not len(size) == 2: + raise ValueError("size must have 2 elements") + + # For all transformations, we want to keep the same data format as the input image unless otherwise specified. + # The resized image from PIL will always have channels last, so find the input format first. + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + data_format = input_data_format if data_format is None else data_format + + # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use + # the pillow library to resize the image and then convert back to numpy + do_rescale = False + if not isinstance(image, PIL.Image.Image): + do_rescale = _rescale_for_pil_conversion(image) + image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format) + height, width = size + # PIL images are in the format (width, height) + resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap) + + if return_numpy: + resized_image = np.array(resized_image) + # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image + # so we need to add it back if necessary. + resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image + # The image is always in channels last format after converting from a PIL image + resized_image = to_channel_dimension_format( + resized_image, data_format, input_channel_dim=ChannelDimension.LAST + ) + # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to + # rescale it back to the original range. + resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image + return resized_image + + +def normalize( + image: np.ndarray, + mean: Union[float, Iterable[float]], + std: Union[float, Iterable[float]], + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> np.ndarray: + """ + Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. + + image = (image - mean) / std + + Args: + image (`np.ndarray`): + The image to normalize. + mean (`float` or `Iterable[float]`): + The mean to use for normalization. + std (`float` or `Iterable[float]`): + The standard deviation to use for normalization. + data_format (`ChannelDimension`, *optional*): + The channel dimension format of the output image. If unset, will use the inferred format from the input. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + """ + if not isinstance(image, np.ndarray): + raise ValueError("image must be a numpy array") + + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format) + num_channels = image.shape[channel_axis] + + # We cast to float32 to avoid errors that can occur when subtracting uint8 values. + # We preserve the original dtype if it is a float type to prevent upcasting float16. + if not np.issubdtype(image.dtype, np.floating): + image = image.astype(np.float32) + + if isinstance(mean, Iterable): + if len(mean) != num_channels: + raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}") + else: + mean = [mean] * num_channels + mean = np.array(mean, dtype=image.dtype) + + if isinstance(std, Iterable): + if len(std) != num_channels: + raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}") + else: + std = [std] * num_channels + std = np.array(std, dtype=image.dtype) + + if input_data_format == ChannelDimension.LAST: + image = (image - mean) / std + else: + image = ((image.T - mean) / std).T + + image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image + return image + + +def center_crop( + image: np.ndarray, + size: Tuple[int, int], + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + return_numpy: Optional[bool] = None, +) -> np.ndarray: + """ + Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to + the size given, it will be padded (so the returned result will always be of size `size`). + + Args: + image (`np.ndarray`): + The image to crop. + size (`Tuple[int, int]`): + The target size for the cropped image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + If unset, will use the inferred format of the input image. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + If unset, will use the inferred format of the input image. + return_numpy (`bool`, *optional*): + Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the + previous ImageFeatureExtractionMixin method. + - Unset: will return the same type as the input image. + - `True`: will return a numpy array. + - `False`: will return a `PIL.Image.Image` object. + Returns: + `np.ndarray`: The cropped image. + """ + requires_backends(center_crop, ["vision"]) + + if return_numpy is not None: + warnings.warn("return_numpy is deprecated and will be removed in v.4.33", FutureWarning) + + return_numpy = True if return_numpy is None else return_numpy + + if not isinstance(image, np.ndarray): + raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") + + if not isinstance(size, Iterable) or len(size) != 2: + raise ValueError("size must have 2 elements representing the height and width of the output image") + + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + output_data_format = data_format if data_format is not None else input_data_format + + # We perform the crop in (C, H, W) format and then convert to the output format + image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) + + orig_height, orig_width = get_image_size(image, ChannelDimension.FIRST) + crop_height, crop_width = size + crop_height, crop_width = int(crop_height), int(crop_width) + + # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result. + top = (orig_height - crop_height) // 2 + bottom = top + crop_height + # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result. + left = (orig_width - crop_width) // 2 + right = left + crop_width + + # Check if cropped area is within image boundaries + if top >= 0 and bottom <= orig_height and left >= 0 and right <= orig_width: + image = image[..., top:bottom, left:right] + image = to_channel_dimension_format(image, output_data_format, ChannelDimension.FIRST) + return image + + # Otherwise, we may need to pad if the image is too small. Oh joy... + new_height = max(crop_height, orig_height) + new_width = max(crop_width, orig_width) + new_shape = image.shape[:-2] + (new_height, new_width) + new_image = np.zeros_like(image, shape=new_shape) + + # If the image is too small, pad it with zeros + top_pad = (new_height - orig_height) // 2 + bottom_pad = top_pad + orig_height + left_pad = (new_width - orig_width) // 2 + right_pad = left_pad + orig_width + new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image + + top += top_pad + bottom += top_pad + left += left_pad + right += left_pad + + new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)] + new_image = to_channel_dimension_format(new_image, output_data_format, ChannelDimension.FIRST) + + if not return_numpy: + new_image = to_pil_image(new_image) + + return new_image + + +def _center_to_corners_format_torch(bboxes_center: "torch.Tensor") -> "torch.Tensor": + center_x, center_y, width, height = bboxes_center.unbind(-1) + bbox_corners = torch.stack( + # top left x, top left y, bottom right x, bottom right y + [(center_x - 0.5 * width), (center_y - 0.5 * height), (center_x + 0.5 * width), (center_y + 0.5 * height)], + dim=-1, + ) + return bbox_corners + + +def _center_to_corners_format_numpy(bboxes_center: np.ndarray) -> np.ndarray: + center_x, center_y, width, height = bboxes_center.T + bboxes_corners = np.stack( + # top left x, top left y, bottom right x, bottom right y + [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height], + axis=-1, + ) + return bboxes_corners + + +def _center_to_corners_format_tf(bboxes_center: "tf.Tensor") -> "tf.Tensor": + center_x, center_y, width, height = tf.unstack(bboxes_center, axis=-1) + bboxes_corners = tf.stack( + # top left x, top left y, bottom right x, bottom right y + [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height], + axis=-1, + ) + return bboxes_corners + + +# 2 functions below inspired by https://github.com/facebookresearch/detr/blob/master/util/box_ops.py +def center_to_corners_format(bboxes_center: TensorType) -> TensorType: + """ + Converts bounding boxes from center format to corners format. + + center format: contains the coordinate for the center of the box and its width, height dimensions + (center_x, center_y, width, height) + corners format: contains the coodinates for the top-left and bottom-right corners of the box + (top_left_x, top_left_y, bottom_right_x, bottom_right_y) + """ + # Function is used during model forward pass, so we use the input framework if possible, without + # converting to numpy + if is_torch_tensor(bboxes_center): + return _center_to_corners_format_torch(bboxes_center) + elif isinstance(bboxes_center, np.ndarray): + return _center_to_corners_format_numpy(bboxes_center) + elif is_tf_tensor(bboxes_center): + return _center_to_corners_format_tf(bboxes_center) + + raise ValueError(f"Unsupported input type {type(bboxes_center)}") + + +def _corners_to_center_format_torch(bboxes_corners: "torch.Tensor") -> "torch.Tensor": + top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.unbind(-1) + b = [ + (top_left_x + bottom_right_x) / 2, # center x + (top_left_y + bottom_right_y) / 2, # center y + (bottom_right_x - top_left_x), # width + (bottom_right_y - top_left_y), # height + ] + return torch.stack(b, dim=-1) + + +def _corners_to_center_format_numpy(bboxes_corners: np.ndarray) -> np.ndarray: + top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.T + bboxes_center = np.stack( + [ + (top_left_x + bottom_right_x) / 2, # center x + (top_left_y + bottom_right_y) / 2, # center y + (bottom_right_x - top_left_x), # width + (bottom_right_y - top_left_y), # height + ], + axis=-1, + ) + return bboxes_center + + +def _corners_to_center_format_tf(bboxes_corners: "tf.Tensor") -> "tf.Tensor": + top_left_x, top_left_y, bottom_right_x, bottom_right_y = tf.unstack(bboxes_corners, axis=-1) + bboxes_center = tf.stack( + [ + (top_left_x + bottom_right_x) / 2, # center x + (top_left_y + bottom_right_y) / 2, # center y + (bottom_right_x - top_left_x), # width + (bottom_right_y - top_left_y), # height + ], + axis=-1, + ) + return bboxes_center + + +def corners_to_center_format(bboxes_corners: TensorType) -> TensorType: + """ + Converts bounding boxes from corners format to center format. + + corners format: contains the coordinates for the top-left and bottom-right corners of the box + (top_left_x, top_left_y, bottom_right_x, bottom_right_y) + center format: contains the coordinate for the center of the box and its the width, height dimensions + (center_x, center_y, width, height) + """ + # Inverse function accepts different input types so implemented here too + if is_torch_tensor(bboxes_corners): + return _corners_to_center_format_torch(bboxes_corners) + elif isinstance(bboxes_corners, np.ndarray): + return _corners_to_center_format_numpy(bboxes_corners) + elif is_tf_tensor(bboxes_corners): + return _corners_to_center_format_tf(bboxes_corners) + + raise ValueError(f"Unsupported input type {type(bboxes_corners)}") + + +# 2 functions below copied from https://github.com/cocodataset/panopticapi/blob/master/panopticapi/utils.py +# Copyright (c) 2018, Alexander Kirillov +# All rights reserved. +def rgb_to_id(color): + """ + Converts RGB color to unique ID. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + +def id_to_rgb(id_map): + """ + Converts unique ID to RGB color. + """ + if isinstance(id_map, np.ndarray): + id_map_copy = id_map.copy() + rgb_shape = tuple(list(id_map.shape) + [3]) + rgb_map = np.zeros(rgb_shape, dtype=np.uint8) + for i in range(3): + rgb_map[..., i] = id_map_copy % 256 + id_map_copy //= 256 + return rgb_map + color = [] + for _ in range(3): + color.append(id_map % 256) + id_map //= 256 + return color + + +class PaddingMode(ExplicitEnum): + """ + Enum class for the different padding modes to use when padding images. + """ + + CONSTANT = "constant" + REFLECT = "reflect" + REPLICATE = "replicate" + SYMMETRIC = "symmetric" + + +def pad( + image: np.ndarray, + padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]], + mode: PaddingMode = PaddingMode.CONSTANT, + constant_values: Union[float, Iterable[float]] = 0.0, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> np.ndarray: + """ + Pads the `image` with the specified (height, width) `padding` and `mode`. + + Args: + image (`np.ndarray`): + The image to pad. + padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`): + Padding to apply to the edges of the height, width axes. Can be one of three formats: + - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. + - `((before, after),)` yields same before and after pad for height and width. + - `(pad,)` or int is a shortcut for before = after = pad width for all axes. + mode (`PaddingMode`): + The padding mode to use. Can be one of: + - `"constant"`: pads with a constant value. + - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the + vector along each axis. + - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. + - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. + constant_values (`float` or `Iterable[float]`, *optional*): + The value to use for the padding if `mode` is `"constant"`. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + If unset, will use same as the input image. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + If unset, will use the inferred format of the input image. + + Returns: + `np.ndarray`: The padded image. + + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + + def _expand_for_data_format(values): + """ + Convert values to be in the format expected by np.pad based on the data format. + """ + if isinstance(values, (int, float)): + values = ((values, values), (values, values)) + elif isinstance(values, tuple) and len(values) == 1: + values = ((values[0], values[0]), (values[0], values[0])) + elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int): + values = (values, values) + elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple): + values = values + else: + raise ValueError(f"Unsupported format: {values}") + + # add 0 for channel dimension + values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0)) + + # Add additional padding if there's a batch dimension + values = (0, *values) if image.ndim == 4 else values + return values + + padding = _expand_for_data_format(padding) + + if mode == PaddingMode.CONSTANT: + constant_values = _expand_for_data_format(constant_values) + image = np.pad(image, padding, mode="constant", constant_values=constant_values) + elif mode == PaddingMode.REFLECT: + image = np.pad(image, padding, mode="reflect") + elif mode == PaddingMode.REPLICATE: + image = np.pad(image, padding, mode="edge") + elif mode == PaddingMode.SYMMETRIC: + image = np.pad(image, padding, mode="symmetric") + else: + raise ValueError(f"Invalid padding mode: {mode}") + + image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image + return image + + +# TODO (Amy): Accept 1/3/4 channel numpy array as input and return np.array as default +def convert_to_rgb(image: ImageInput) -> ImageInput: + """ + Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image + as is. + Args: + image (Image): + The image to convert. + """ + requires_backends(convert_to_rgb, ["vision"]) + + if not isinstance(image, PIL.Image.Image): + return image + + if image.mode == "RGB": + return image + + image = image.convert("RGB") + return image + + +def flip_channel_order( + image: np.ndarray, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> np.ndarray: + """ + Flips the channel order of the image. + + If the image is in RGB format, it will be converted to BGR and vice versa. + + Args: + image (`np.ndarray`): + The image to flip. + data_format (`ChannelDimension`, *optional*): + The channel dimension format for the output image. Can be one of: + - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `ChannelDimension.LAST`: image in (height, width, num_channels) format. + If unset, will use same as the input image. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format for the input image. Can be one of: + - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `ChannelDimension.LAST`: image in (height, width, num_channels) format. + If unset, will use the inferred format of the input image. + """ + input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format + + if input_data_format == ChannelDimension.LAST: + image = image[..., ::-1] + elif input_data_format == ChannelDimension.FIRST: + image = image[::-1, ...] + else: + raise ValueError(f"Unsupported channel dimension: {input_data_format}") + + if data_format is not None: + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + return image diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/keras_callbacks.py b/llmeval-env/lib/python3.10/site-packages/transformers/keras_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e832729a1eeb482d1193753cc2c07ad1f16c2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/keras_callbacks.py @@ -0,0 +1,413 @@ +import logging +import os +from pathlib import Path +from time import sleep +from typing import Callable, List, Optional, Union + +import numpy as np +import tensorflow as tf +from huggingface_hub import Repository, create_repo +from packaging.version import parse + +from . import IntervalStrategy, PreTrainedTokenizerBase +from .modelcard import TrainingSummary +from .modeling_tf_utils import keras + + +logger = logging.getLogger(__name__) + + +class KerasMetricCallback(keras.callbacks.Callback): + """ + Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be + compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string + operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the + `eval_dataset` before being passed to the `metric_fn` in `np.ndarray` format. The `metric_fn` should compute + metrics and return a dict mapping metric names to metric values. + + We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that + this example skips some post-processing for readability and simplicity, and should probably not be used as-is! + + ```py + from datasets import load_metric + + rouge_metric = load_metric("rouge") + + + def rouge_fn(predictions, labels): + decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) + return {key: value.mid.fmeasure * 100 for key, value in result.items()} + ``` + + The above function will return a dict containing values which will be logged like any other Keras metric: + + ``` + {'rouge1': 37.4199, 'rouge2': 13.9768, 'rougeL': 34.361, 'rougeLsum': 35.0781 + ``` + + Args: + metric_fn (`Callable`): + Metric function provided by the user. It will be called with two arguments - `predictions` and `labels`. + These contain the model's outputs and matching labels from the dataset. It should return a dict mapping + metric names to numerical values. + eval_dataset (`tf.data.Dataset` or `dict` or `tuple` or `np.ndarray` or `tf.Tensor`): + Validation data to be used to generate predictions for the `metric_fn`. + output_cols (`List[str], *optional*): + A list of columns to be retained from the model output as the predictions. Defaults to all. + label_cols ('`List[str]`, *optional*'): + A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not + supplied. + batch_size (`int`, *optional*): + Batch size. Only used when the data is not a pre-batched `tf.data.Dataset`. + predict_with_generate (`bool`, *optional*, defaults to `False`): + Whether we should use `model.generate()` to get outputs for the model. + use_xla_generation (`bool`, *optional*, defaults to `False`): + If we're generating, whether to compile model generation with XLA. This can massively increase the speed of + generation (up to 100X speedup) but will require a new XLA compilation for each input shape. When using XLA + generation, it's a good idea to pad your inputs to the same size, or to use the `pad_to_multiple_of` + argument in your `tokenizer` or `DataCollator`, which will reduce the number of unique input shapes and + save a lot of compilation time. This option has no effect is `predict_with_generate` is `False`. + generate_kwargs (`dict`, *optional*): + Keyword arguments to pass to `model.generate()` when generating. Has no effect if `predict_with_generate` + is `False`. + + """ + + def __init__( + self, + metric_fn: Callable, + eval_dataset: Union[tf.data.Dataset, np.ndarray, tf.Tensor, tuple, dict], + output_cols: Optional[List[str]] = None, + label_cols: Optional[List[str]] = None, + batch_size: Optional[int] = None, + predict_with_generate: bool = False, + use_xla_generation: bool = False, + generate_kwargs: Optional[dict] = None, + ): + super().__init__() + self.metric_fn = metric_fn + self.batch_size = batch_size + if not isinstance(eval_dataset, tf.data.Dataset): + if batch_size is None: + raise ValueError( + "When passing data to KerasMetricCallback that is not a pre-batched tf.data.Dataset " + "the batch_size argument must be set." + ) + # Wrap a tf.data.Dataset around it + eval_dataset = tf.data.Dataset.from_tensor_slices(eval_dataset).batch(batch_size, drop_remainder=False) + self.eval_dataset = eval_dataset + self.predict_with_generate = predict_with_generate + self.output_cols = output_cols + + # This next block attempts to parse out which elements of the dataset should be appended to the labels list + # that is passed to the metric_fn + if isinstance(eval_dataset.element_spec, tuple) and len(eval_dataset.element_spec) == 2: + input_spec, label_spec = eval_dataset.element_spec + else: + input_spec = eval_dataset.element_spec + label_spec = None + if label_cols is not None: + for label in label_cols: + if label not in input_spec: + raise ValueError(f"Label {label} is in label_cols but could not be found in the dataset inputs!") + self.label_cols = label_cols + self.use_keras_label = False + elif label_spec is not None: + # If the dataset inputs are split into a 2-tuple of inputs and labels, + # assume the second element is the labels + self.label_cols = None + self.use_keras_label = True + elif "labels" in input_spec: + self.label_cols = ["labels"] + self.use_keras_label = False + logging.warning("No label_cols specified for KerasMetricCallback, assuming you want the 'labels' key.") + elif "start_positions" in input_spec and "end_positions" in input_spec: + self.label_cols = ["start_positions", "end_positions"] + self.use_keras_label = False + logging.warning( + "No label_cols specified for KerasMetricCallback, assuming you want the " + "start_positions and end_positions keys." + ) + else: + raise ValueError("Could not autodetect label_cols for KerasMetricCallback, please specify them!") + if parse(tf.__version__) < parse("2.7"): + logging.warning("TF versions less than 2.7 may encounter issues with KerasMetricCallback!") + + self.use_xla_generation = use_xla_generation + self.generate_kwargs = {} if generate_kwargs is None else generate_kwargs + + self.generation_function = None + + @staticmethod + def _concatenate_batches(batches, padding_index=-100): + # If all batches are unidimensional or same length, do a simple concatenation + if batches[0].ndim == 1 or all(batch.shape[1] == batches[0].shape[1] for batch in batches): + return np.concatenate(batches, axis=0) + + # Welp, they're not the same length. Let's do some padding + max_len = max([batch.shape[1] for batch in batches]) + num_samples = sum([batch.shape[0] for batch in batches]) + output = np.full_like( + batches[0], fill_value=padding_index, shape=[num_samples, max_len] + list(batches[0].shape[2:]) + ) + # i keeps track of which part of the concatenated array we're writing the next batch to + i = 0 + for batch in batches: + output[i : i + len(batch), : batch.shape[1]] = batch + i += len(batch) + return output + + def _postprocess_predictions_or_labels(self, inputs): + if isinstance(inputs[0], dict): + outputs = {} + for key in inputs[0].keys(): + outputs[key] = self._concatenate_batches([batch[key] for batch in inputs]) + # If it's a dict with only one key, just return the array + if len(outputs) == 1: + outputs = list(outputs.values())[0] + elif isinstance(inputs[0], list) or isinstance(inputs[0], tuple): + outputs = [] + for input_list in zip(*inputs): + outputs.append(self._concatenate_batches(input_list)) + if len(outputs) == 1: + outputs = outputs[0] # If it's a list with only one element, just return the array + elif isinstance(inputs[0], np.ndarray): + outputs = self._concatenate_batches(inputs) + elif isinstance(inputs[0], tf.Tensor): + outputs = self._concatenate_batches([tensor.numpy() for tensor in inputs]) + else: + raise TypeError(f"Couldn't handle batch of type {type(inputs[0])}!") + return outputs + + def on_epoch_end(self, epoch, logs=None): + if hasattr(self.model, "config"): + ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) + else: + ignore_keys = [] + + main_input_name = None + if self.predict_with_generate: + # This dense conditional recognizes the case where we have an encoder-decoder model, but + # avoids getting tangled up when we just have a model with a layer called 'encoder' + if hasattr(self.model, "encoder") and hasattr(self.model.encoder, "main_input_name"): + main_input_name = self.model.encoder.main_input_name + else: + main_input_name = getattr(self.model, "main_input_name", "input_ids") + + if self.use_xla_generation and self.generation_function is None: + + def generation_function(inputs, attention_mask): + return self.model.generate(inputs, attention_mask=attention_mask, **self.generate_kwargs) + + self.generation_function = tf.function(generation_function, jit_compile=True) + + prediction_list = [] + label_list = [] + + # The whole predict/generate loop is handled inside this method + for batch in self.eval_dataset: + if isinstance(batch, tuple): + batch, labels = batch + else: + labels = None + if self.predict_with_generate: + if isinstance(batch, dict): + generation_inputs = batch[main_input_name] + attention_mask = batch.get("attention_mask", None) + else: + generation_inputs = batch + attention_mask = None + if self.use_xla_generation: + predictions = self.generation_function(generation_inputs, attention_mask=attention_mask) + else: + predictions = self.model.generate( + generation_inputs, attention_mask=attention_mask, **self.generate_kwargs + ) + else: + predictions = self.model.predict_on_batch(batch) + if isinstance(predictions, dict): + # This converts any dict-subclass to a regular dict + # Keras REALLY doesn't like it when we pass around a BatchEncoding or other derived class + predictions = dict(predictions) + if self.output_cols is not None: + predictions = {key: predictions[key] for key in self.output_cols} + else: + predictions = { + key: val for key, val in predictions.items() if key not in ignore_keys + ["loss"] + } + prediction_list.append(predictions) + if not self.use_keras_label: + labels = {key: batch[key].numpy() for key in self.label_cols} + elif isinstance(labels, dict): + labels = {key: array.numpy() for key, array in labels.items()} + elif isinstance(labels, list) or isinstance(labels, tuple): + labels = [array.numpy() for array in labels] + elif isinstance(labels, tf.Tensor): + labels = labels.numpy() + else: + raise TypeError(f"Confused by labels of type {type(labels)}") + label_list.append(labels) + + all_preds = self._postprocess_predictions_or_labels(prediction_list) + all_labels = self._postprocess_predictions_or_labels(label_list) + + metric_output = self.metric_fn((all_preds, all_labels)) + if not isinstance(metric_output, dict): + raise TypeError( + f"metric_fn should return a dict mapping metric names to values but instead returned {metric_output}" + ) + # This is the critical bit - Keras passes a dict containing the loss and standard metric values for this epoch + # in the logs argument. Ordinarily, this is so the callback can read them, but in this case we write a bunch of + # new keys in there, which will then get read by the History callback and treated like any other metric value. + # I promise that I have it in writing from Chollet that this is okay. + logs.update(metric_output) + + +class PushToHubCallback(keras.callbacks.Callback): + """ + Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can + be changed with the `save_strategy` argument. Pushed models can be accessed like any other model on the hub, such + as with the `from_pretrained` method. + + ```py + from transformers.keras_callbacks import PushToHubCallback + + push_to_hub_callback = PushToHubCallback( + output_dir="./model_save", + tokenizer=tokenizer, + hub_model_id="gpt5-7xlarge", + ) + + model.fit(train_dataset, callbacks=[push_to_hub_callback]) + ``` + + Args: + output_dir (`str`): + The output directory where the model predictions and checkpoints will be written and synced with the + repository on the Hub. + save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"epoch"`): + The checkpoint save strategy to adopt during training. Possible values are: + + - `"no"`: Save is done at the end of training. + - `"epoch"`: Save is done at the end of each epoch. + - `"steps"`: Save is done every `save_steps` + save_steps (`int`, *optional*): + The number of steps between saves when using the "steps" `save_strategy`. + tokenizer (`PreTrainedTokenizerBase`, *optional*): + The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights. + hub_model_id (`str`, *optional*): + The name of the repository to keep in sync with the local `output_dir`. It can be a simple model ID in + which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, + for instance `"user_name/model"`, which allows you to push to an organization you are a member of with + `"organization_name/model"`. + + Will default to the name of `output_dir`. + hub_token (`str`, *optional*): + The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with + `huggingface-cli login`. + checkpoint (`bool`, *optional*, defaults to `False`): + Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be + resumed. Only usable when `save_strategy` is `"epoch"`. + """ + + def __init__( + self, + output_dir: Union[str, Path], + save_strategy: Union[str, IntervalStrategy] = "epoch", + save_steps: Optional[int] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + hub_model_id: Optional[str] = None, + hub_token: Optional[str] = None, + checkpoint: bool = False, + **model_card_args, + ): + super().__init__() + if checkpoint and save_strategy != "epoch": + raise ValueError("Cannot save checkpoints when save_strategy is not 'epoch'!") + if isinstance(save_strategy, str): + save_strategy = IntervalStrategy(save_strategy.lower()) + self.save_strategy = save_strategy + if self.save_strategy == IntervalStrategy.STEPS and (not isinstance(save_steps, int) or save_steps <= 0): + raise ValueError("Please supply a positive integer argument for save_steps when save_strategy == 'steps'!") + self.save_steps = save_steps + output_dir = Path(output_dir) + + # Create repo and retrieve repo_id + if hub_model_id is None: + hub_model_id = output_dir.absolute().name + self.hub_model_id = create_repo(repo_id=hub_model_id, exist_ok=True, token=hub_token).repo_id + + self.output_dir = output_dir + self.repo = Repository(str(self.output_dir), clone_from=self.hub_model_id, token=hub_token) + + self.tokenizer = tokenizer + self.last_job = None + self.checkpoint = checkpoint + self.training_history = None + self.model_card_args = model_card_args + + def on_train_begin(self, logs=None): + # Although we can access model.history, we have no guarantees that the History callback will fire before this + # one, so we keep track of it here too + self.training_history = [] + + def on_train_batch_end(self, batch, logs=None): + if self.save_strategy == IntervalStrategy.STEPS and (batch + 1) % self.save_steps == 0: + if self.last_job is not None and not self.last_job.is_done: + return # The last upload is still running, don't start another + self.model.save_pretrained(self.output_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(self.output_dir) + _, self.last_job = self.repo.push_to_hub( + commit_message=f"Training in progress steps {batch}", blocking=False + ) + + def on_epoch_end(self, epoch, logs=None): + logs = logs.copy() # Don't accidentally write things that Keras will read later + if "epoch" not in logs: + logs["epoch"] = epoch + self.training_history.append(logs) + if self.save_strategy == IntervalStrategy.EPOCH: + if self.last_job is not None and not self.last_job.is_done: + return # The last upload is still running, don't start another + self.model.save_pretrained(self.output_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(self.output_dir) + if self.checkpoint: + checkpoint_dir = os.path.join(self.output_dir, "checkpoint") + self.model._save_checkpoint(checkpoint_dir, epoch) + train_summary = TrainingSummary.from_keras( + model=self.model, + model_name=self.hub_model_id, + keras_history=self.training_history, + **self.model_card_args, + ) + model_card = train_summary.to_model_card() + with (self.output_dir / "README.md").open("w") as f: + f.write(model_card) + _, self.last_job = self.repo.push_to_hub( + commit_message=f"Training in progress epoch {epoch}", blocking=False + ) + + def on_train_end(self, logs=None): + # Makes sure the latest version of the model is uploaded + if self.last_job is not None and not self.last_job.is_done: + logging.info("Pushing the last epoch to the Hub, this may take a while...") + while not self.last_job.is_done: + sleep(1) + else: + self.model.save_pretrained(self.output_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(self.output_dir) + train_summary = TrainingSummary.from_keras( + model=self.model, + model_name=self.hub_model_id, + keras_history=self.training_history, + **self.model_card_args, + ) + model_card = train_summary.to_model_card() + with (self.output_dir / "README.md").open("w") as f: + f.write(model_card) + self.repo.push_to_hub(commit_message="End of training", blocking=True) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_pytorch_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aceb462d12a8d09e875bc0a80daeac80e17c930b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_pytorch_utils.py @@ -0,0 +1,497 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch - Flax general utilities.""" + + +import os +from pickle import UnpicklingError +from typing import Dict, Tuple + +import jax +import jax.numpy as jnp +import numpy as np +from flax.serialization import from_bytes +from flax.traverse_util import flatten_dict, unflatten_dict + +import transformers + +from . import is_safetensors_available, is_torch_available +from .utils import logging + + +if is_torch_available(): + import torch + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.flax import load_file as safe_load_file + + +logger = logging.get_logger(__name__) + + +##################### +# PyTorch => Flax # +##################### + + +def load_pytorch_checkpoint_in_flax_state_dict( + flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False +): + """Load pytorch checkpoints in a flax model""" + + if not is_sharded: + pt_path = os.path.abspath(pytorch_checkpoint_path) + logger.info(f"Loading PyTorch weights from {pt_path}") + + if pt_path.endswith(".safetensors"): + pt_state_dict = {} + with safe_open(pt_path, framework="flax") as f: + for k in f.keys(): + pt_state_dict[k] = f.get_tensor(k) + else: + try: + import torch # noqa: F401 + + from .pytorch_utils import is_torch_greater_or_equal_than_1_13 # noqa: F401 + except (ImportError, ModuleNotFoundError): + logger.error( + "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" + " instructions." + ) + raise + + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + pt_state_dict = torch.load(pt_path, map_location="cpu", **weights_only_kwarg) + logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.") + + flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) + else: + # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files + flax_state_dict = convert_pytorch_sharded_state_dict_to_flax(pytorch_checkpoint_path, flax_model) + return flax_state_dict + + +def rename_key_and_reshape_tensor( + pt_tuple_key: Tuple[str], + pt_tensor: np.ndarray, + random_flax_state_dict: Dict[str, jnp.ndarray], + model_prefix: str, +) -> (Tuple[str], np.ndarray): + """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" + + def is_key_or_prefix_key_in_dict(key: Tuple[str]) -> bool: + """Checks if `key` of `(prefix,) + key` is in random_flax_state_dict""" + return len(set(random_flax_state_dict) & {key, (model_prefix,) + key}) > 0 + + # layer norm + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) + if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # batch norm layer mean + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("mean",) + if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # batch norm layer var + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("var",) + if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # embedding + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) + if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key): + return renamed_pt_tuple_key, pt_tensor + + # conv layer + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) + if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(pt_tuple_key): + pt_tensor = pt_tensor.transpose(2, 3, 1, 0) + return renamed_pt_tuple_key, pt_tensor + + # linear layer + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) + if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(pt_tuple_key): + pt_tensor = pt_tensor.T + return renamed_pt_tuple_key, pt_tensor + + # old PyTorch layer norm weight + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) + if pt_tuple_key[-1] == "gamma": + return renamed_pt_tuple_key, pt_tensor + + # old PyTorch layer norm bias + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) + if pt_tuple_key[-1] == "beta": + return renamed_pt_tuple_key, pt_tensor + + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + name = None + if pt_tuple_key[-3::2] == ("parametrizations", "original0"): + name = pt_tuple_key[-2] + "_g" + elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): + name = pt_tuple_key[-2] + "_v" + if name is not None: + renamed_pt_tuple_key = pt_tuple_key[:-3] + (name,) + return renamed_pt_tuple_key, pt_tensor + + return pt_tuple_key, pt_tensor + + +def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model): + # convert pytorch tensor to numpy + from_bin = is_torch_available() and isinstance(next(iter(pt_state_dict.values())), torch.Tensor) + bfloat16 = torch.bfloat16 if from_bin else "bfloat16" + + weight_dtypes = {k: v.dtype for k, v in pt_state_dict.items()} + + if from_bin: + for k, v in pt_state_dict.items(): + # numpy currently does not support bfloat16, need to go over float32 in this case to not lose precision + if v.dtype == bfloat16: + v = v.float() + pt_state_dict[k] = v.numpy() + + model_prefix = flax_model.base_model_prefix + + # use params dict if the model contains batch norm layers + if "params" in flax_model.params: + flax_model_params = flax_model.params["params"] + else: + flax_model_params = flax_model.params + random_flax_state_dict = flatten_dict(flax_model_params) + + # add batch_stats keys,values to dict + if "batch_stats" in flax_model.params: + flax_batch_stats = flatten_dict(flax_model.params["batch_stats"]) + random_flax_state_dict.update(flax_batch_stats) + + flax_state_dict = {} + + load_model_with_head_into_base_model = (model_prefix not in flax_model_params) and ( + model_prefix in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + load_base_model_into_model_with_head = (model_prefix in flax_model_params) and ( + model_prefix not in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + + # Need to change some parameters name to match Flax names + for pt_key, pt_tensor in pt_state_dict.items(): + pt_tuple_key = tuple(pt_key.split(".")) + is_bfloat_16 = weight_dtypes[pt_key] == bfloat16 + + # remove base model prefix if necessary + has_base_model_prefix = pt_tuple_key[0] == model_prefix + if load_model_with_head_into_base_model and has_base_model_prefix: + pt_tuple_key = pt_tuple_key[1:] + + # Correctly rename weight parameters + flax_key, flax_tensor = rename_key_and_reshape_tensor( + pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix + ) + + # add model prefix if necessary + require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict + if load_base_model_into_model_with_head and require_base_model_prefix: + flax_key = (model_prefix,) + flax_key + + if flax_key in random_flax_state_dict: + if flax_tensor.shape != random_flax_state_dict[flax_key].shape: + raise ValueError( + f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " + f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + + # add batch stats if the model contains batchnorm layers + if "batch_stats" in flax_model.params: + if "mean" in flax_key[-1] or "var" in flax_key[-1]: + flax_state_dict[("batch_stats",) + flax_key] = jnp.asarray(flax_tensor) + continue + # remove num_batches_tracked key + if "num_batches_tracked" in flax_key[-1]: + flax_state_dict.pop(flax_key, None) + continue + + # also add unexpected weight so that warning is thrown + flax_state_dict[("params",) + flax_key] = ( + jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) + ) + else: + # also add unexpected weight so that warning is thrown + flax_state_dict[flax_key] = ( + jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) + ) + + return unflatten_dict(flax_state_dict) + + +############################ +# Sharded Pytorch => Flax # +############################ + + +def convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model): + import torch + + from .pytorch_utils import is_torch_greater_or_equal_than_1_13 + + # Load the index + flax_state_dict = {} + for shard_file in shard_filenames: + # load using msgpack utils + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + pt_state_dict = torch.load(shard_file, **weights_only_kwarg) + weight_dtypes = {k: v.dtype for k, v in pt_state_dict.items()} + pt_state_dict = { + k: v.numpy() if v.dtype != torch.bfloat16 else v.float().numpy() for k, v in pt_state_dict.items() + } + + model_prefix = flax_model.base_model_prefix + + # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict + if "batch_stats" in flax_model.params: + flax_model_params = flax_model.params["params"] + + random_flax_state_dict = flatten_dict(flax_model_params) + random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"])) + else: + flax_model_params = flax_model.params + random_flax_state_dict = flatten_dict(flax_model_params) + + load_model_with_head_into_base_model = (model_prefix not in flax_model_params) and ( + model_prefix in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + load_base_model_into_model_with_head = (model_prefix in flax_model_params) and ( + model_prefix not in {k.split(".")[0] for k in pt_state_dict.keys()} + ) + # Need to change some parameters name to match Flax names + for pt_key, pt_tensor in pt_state_dict.items(): + pt_tuple_key = tuple(pt_key.split(".")) + is_bfloat_16 = weight_dtypes[pt_key] == torch.bfloat16 + + # remove base model prefix if necessary + has_base_model_prefix = pt_tuple_key[0] == model_prefix + if load_model_with_head_into_base_model and has_base_model_prefix: + pt_tuple_key = pt_tuple_key[1:] + + # Correctly rename weight parameters + flax_key, flax_tensor = rename_key_and_reshape_tensor( + pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix + ) + # add model prefix if necessary + require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict + if load_base_model_into_model_with_head and require_base_model_prefix: + flax_key = (model_prefix,) + flax_key + + if flax_key in random_flax_state_dict: + if flax_tensor.shape != random_flax_state_dict[flax_key].shape: + raise ValueError( + f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " + f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + + # add batch stats if the model contains batchnorm layers + if "batch_stats" in flax_model.params: + if "mean" in flax_key[-1]: + flax_state_dict[("batch_stats",) + flax_key] = jnp.asarray(flax_tensor) + continue + if "var" in flax_key[-1]: + flax_state_dict[("batch_stats",) + flax_key] = jnp.asarray(flax_tensor) + continue + # remove num_batches_tracked key + if "num_batches_tracked" in flax_key[-1]: + flax_state_dict.pop(flax_key, None) + continue + + # also add unexpected weight so that warning is thrown + flax_state_dict[("params",) + flax_key] = ( + jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) + ) + + else: + # also add unexpected weight so that warning is thrown + flax_state_dict[flax_key] = ( + jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) + ) + return unflatten_dict(flax_state_dict) + + +##################### +# Flax => PyTorch # +##################### + + +def load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path): + """Load flax checkpoints in a PyTorch model""" + flax_checkpoint_path = os.path.abspath(flax_checkpoint_path) + logger.info(f"Loading Flax weights from {flax_checkpoint_path}") + + # import correct flax class + flax_cls = getattr(transformers, "Flax" + model.__class__.__name__) + + # load flax weight dict + if flax_checkpoint_path.endswith(".safetensors"): + flax_state_dict = safe_load_file(flax_checkpoint_path) + flax_state_dict = unflatten_dict(flax_state_dict, sep=".") + else: + with open(flax_checkpoint_path, "rb") as state_f: + try: + flax_state_dict = from_bytes(flax_cls, state_f.read()) + except UnpicklingError: + raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. ") + + return load_flax_weights_in_pytorch_model(model, flax_state_dict) + + +def load_flax_weights_in_pytorch_model(pt_model, flax_state): + """Load flax checkpoints in a PyTorch model""" + + try: + import torch # noqa: F401 + except (ImportError, ModuleNotFoundError): + logger.error( + "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" + " instructions." + ) + raise + + # check if we have bf16 weights + is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() + if any(is_type_bf16): + # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 + # and bf16 is not fully supported in PT yet. + logger.warning( + "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " + "before loading those in PyTorch model." + ) + flax_state = jax.tree_util.tree_map( + lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state + ) + + flax_state_dict = flatten_dict(flax_state) + pt_model_dict = pt_model.state_dict() + + load_model_with_head_into_base_model = (pt_model.base_model_prefix in flax_state) and ( + pt_model.base_model_prefix not in {k.split(".")[0] for k in pt_model_dict.keys()} + ) + load_base_model_into_model_with_head = (pt_model.base_model_prefix not in flax_state) and ( + pt_model.base_model_prefix in {k.split(".")[0] for k in pt_model_dict.keys()} + ) + + # keep track of unexpected & missing keys + unexpected_keys = [] + missing_keys = set(pt_model_dict.keys()) + + for flax_key_tuple, flax_tensor in flax_state_dict.items(): + has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix + require_base_model_prefix = ".".join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict + + # adapt flax_key to prepare for loading from/to base model only + if load_model_with_head_into_base_model and has_base_model_prefix: + flax_key_tuple = flax_key_tuple[1:] + elif load_base_model_into_model_with_head and require_base_model_prefix: + flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple + + # rename flax weights to PyTorch format + if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(flax_key_tuple) not in pt_model_dict: + # conv layer + flax_key_tuple = flax_key_tuple[:-1] + ("weight",) + flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1)) + elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple) not in pt_model_dict: + # linear layer + flax_key_tuple = flax_key_tuple[:-1] + ("weight",) + flax_tensor = flax_tensor.T + elif flax_key_tuple[-1] in ["scale", "embedding"]: + flax_key_tuple = flax_key_tuple[:-1] + ("weight",) + + # adding batch stats from flax batch norm to pt + elif "mean" in flax_key_tuple[-1]: + flax_key_tuple = flax_key_tuple[:-1] + ("running_mean",) + elif "var" in flax_key_tuple[-1]: + flax_key_tuple = flax_key_tuple[:-1] + ("running_var",) + + if "batch_stats" in flax_state: + flax_key = ".".join(flax_key_tuple[1:]) # Remove the params/batch_stats header + else: + flax_key = ".".join(flax_key_tuple) + + # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. + special_pt_names = {} + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + for key in pt_model_dict: + key_components = key.split(".") + name = None + if key_components[-3::2] == ["parametrizations", "original0"]: + name = key_components[-2] + "_g" + elif key_components[-3::2] == ["parametrizations", "original1"]: + name = key_components[-2] + "_v" + if name is not None: + key_components = key_components[:-3] + [name] + key_to_check = ".".join(key_components) + special_pt_names[key_to_check] = key + + if flax_key in special_pt_names: + flax_key = special_pt_names[flax_key] + + if flax_key in pt_model_dict: + if flax_tensor.shape != pt_model_dict[flax_key].shape: + raise ValueError( + f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " + f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + else: + # add weight to pytorch dict + flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor + pt_model_dict[flax_key] = torch.from_numpy(flax_tensor) + # remove from missing keys + missing_keys.remove(flax_key) + else: + # weight is not expected by PyTorch model + unexpected_keys.append(flax_key) + + pt_model.load_state_dict(pt_model_dict) + + # re-transform missing_keys to list + missing_keys = list(missing_keys) + + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the Flax model were not used when initializing the PyTorch model" + f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" + " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" + f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" + " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" + " FlaxBertForSequenceClassification model)." + ) + else: + logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" + f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" + " use it for predictions and inference." + ) + else: + logger.warning( + f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" + "If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {pt_model.__class__.__name__} for predictions without further training." + ) + + return pt_model diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/modeling_outputs.py b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..7328e05186f2deddebb54f76d64427475de849a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_outputs.py @@ -0,0 +1,1753 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch + +from .utils import ModelOutput + + +@dataclass +class BaseModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithNoAttention(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPooling(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state after a pooling operation on the spatial dimensions. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithCrossAttentions(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class MoECausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden + states terms, to train a MoE model. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + z_loss for the sparse modules. + aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + aux_loss for the sparse modules. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse + modules. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + z_loss: torch.FloatTensor = None + aux_loss: torch.FloatTensor = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoEModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary + loss and the z_loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_probs: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoeModelOutputWithPast(ModelOutput): + """ + Base class for model's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary + loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoeCausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) with mixture of experts outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + + aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): + aux_loss for the sparse modules. + + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary + loss for Mixture of Experts models. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + aux_loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as + Mixture of Expert's router hidden states terms, to train a MoE model. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary + loss and the z_loss for Mixture of Experts models. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + router_probs: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqMoEModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse + modules. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class CausalLMOutput(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class CausalLMOutputWithPast(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class CausalLMOutputWithCrossAttentions(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross attentions weights after the attention softmax, used to compute the weighted average in the + cross-attention heads. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `torch.FloatTensor` tuples of length `config.n_layers`, with each tuple containing the cached key, + value states of the self-attention and the cross-attention layers if model is used in encoder-decoder + setting. Only relevant if `config.is_decoder = True`. + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SequenceClassifierOutputWithPast(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class MaskedLMOutput(ModelOutput): + """ + Base class for masked language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Masked language modeling (MLM) loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqLMOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqMoEOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + + Router logits of the encoder model, useful to compute the auxiliary loss and z_loss for Mixture of Experts + models. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + encoder_z_loss: torch.FloatTensor = None + decoder_z_loss: torch.FloatTensor = None + encoder_aux_loss: torch.FloatTensor = None + decoder_aux_loss: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class NextSentencePredictorOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided): + Next sequence prediction (classification) loss. + logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class MultipleChoiceModelOutput(ModelOutput): + """ + Base class for outputs of multiple choice models. + + Args: + loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): + *num_choices* is the second dimension of the input tensors. (see *input_ids* above). + + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class TokenClassifierOutput(ModelOutput): + """ + Base class for outputs of token classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class QuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of question answering models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of sequence-to-sequence question answering models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SemanticSegmenterOutput(ModelOutput): + """ + Base class for outputs of semantic segmentation models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): + Classification scores for each pixel. + + + + The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is + to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the + original image size as post-processing. You should always check your logits shape and resize as needed. + + + + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class ImageClassifierOutput(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class ImageClassifierOutputWithNoAttention(ModelOutput): + """ + Base class for outputs of image classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class DepthEstimatorOutput(ModelOutput): + """ + Base class for outputs of depth estimation models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`): + Predicted depth for each pixel. + + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + predicted_depth: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class ImageSuperResolutionOutput(ModelOutput): + """ + Base class for outputs of image super resolution models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Reconstruction loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed images, possibly upscaled. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Wav2Vec2BaseModelOutput(ModelOutput): + """ + Base class for models that have been trained with the Wav2Vec2 loss objective. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): + Sequence of extracted feature vectors of the last convolutional layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + extract_features: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class XVectorOutput(ModelOutput): + """ + Output type of [`Wav2Vec2ForXVector`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): + Classification hidden states before AMSoftmax. + embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): + Utterance embeddings used for vector similarity-based retrieval. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + embeddings: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BackboneOutput(ModelOutput): + """ + Base class for outputs of backbones. + + Args: + feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`): + Feature maps of the stages. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, num_channels, height, width)`, + depending on the backbone. + + Hidden-states of the model at the output of each stage plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Only applicable if the backbone uses attention. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + feature_maps: Tuple[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class BaseModelOutputWithPoolingAndProjection(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) after further processing + through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns + the classification token after processing through a linear layer and a tanh activation function. The linear + layer weights are trained from the next sentence prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + projection_state (`tuple(torch.FloatTensor)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` of shape `(batch_size,config.project_dim)`. + + Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + projection_state: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Seq2SeqSpectrogramOutput(ModelOutput): + """ + Base class for sequence-to-sequence spectrogram outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Spectrogram generation loss. + spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): + The predicted spectrogram. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + spectrogram: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class Seq2SeqTSModelOutput(ModelOutput): + """ + Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up + sequential decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Shift values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to shift back to the original magnitude. + scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale back to the original magnitude. + static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + loc: Optional[torch.FloatTensor] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class Seq2SeqTSPredictionOutput(ModelOutput): + """ + Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the + chosen distribution. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): + Distributional loss. + params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): + Parameters of the chosen distribution. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Shift values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to shift back to the original magnitude. + scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale back to the original magnitude. + static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + loss: Optional[torch.FloatTensor] = None + params: Optional[Tuple[torch.FloatTensor]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + loc: Optional[torch.FloatTensor] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class SampleTSPredictionOutput(ModelOutput): + """ + Base class for time series model's predictions outputs that contains the sampled values from the chosen + distribution. + + Args: + sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`): + Sampled values from the chosen distribution. + """ + + sequences: torch.FloatTensor = None + + +@dataclass +class MaskedImageModelingOutput(ModelOutput): + """ + Base class for outputs of masked image completion / in-painting models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): + Reconstruction loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed / completed images. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or + when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states + (also called feature maps) of the model at the output of each stage. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when + `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + @property + def logits(self): + warnings.warn( + "logits attribute is deprecated and will be removed in version 5 of Transformers." + " Please use the reconstruction attribute to retrieve the final output instead.", + FutureWarning, + ) + return self.reconstruction diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_pytorch_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..163178929f98a4b96eef4d28174d75e6a64a406c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_pytorch_utils.py @@ -0,0 +1,676 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch - TF 2.0 general utilities.""" + + +import os +import re + +import numpy + +from .utils import ( + ExplicitEnum, + expand_dims, + is_numpy_array, + is_safetensors_available, + is_torch_tensor, + logging, + reshape, + squeeze, + tensor_size, +) +from .utils import transpose as transpose_func + + +if is_safetensors_available(): + from safetensors import safe_open + + +logger = logging.get_logger(__name__) + + +class TransposeType(ExplicitEnum): + """ + Possible ... + """ + + NO = "no" + SIMPLE = "simple" + CONV1D = "conv1d" + CONV2D = "conv2d" + + +def convert_tf_weight_name_to_pt_weight_name( + tf_name, start_prefix_to_remove="", tf_weight_shape=None, name_scope=None +): + """ + Convert a TF 2.0 model variable name in a pytorch model weight name. + + Conventions for TF2.0 scopes -> PyTorch attribute names conversions: + + - '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) + - '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) + + return tuple with: + + - pytorch model weight name + - transpose: `TransposeType` member indicating whether and how TF2.0 and PyTorch weights matrices should be + transposed with regards to each other + """ + if name_scope is not None: + if not tf_name.startswith(name_scope) and "final_logits_bias" not in tf_name: + raise ValueError( + f"Weight name {tf_name} does not start with name_scope {name_scope}. This is an internal error " + "in Transformers, so (unless you were doing something really evil) please open an issue to report it!" + ) + tf_name = tf_name[len(name_scope) :] + tf_name = tf_name.lstrip("/") + tf_name = tf_name.replace(":0", "") # device ids + tf_name = re.sub( + r"/[^/]*___([^/]*)/", r"/\1/", tf_name + ) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) + tf_name = tf_name.replace( + "_._", "/" + ) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) + tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end + tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators + # Some weights have a single name without "/" such as final_logits_bias in BART + if len(tf_name) > 1: + tf_name = tf_name[1:] # Remove level zero + + tf_weight_shape = list(tf_weight_shape) + + # When should we transpose the weights + if tf_name[-1] == "kernel" and tf_weight_shape is not None and len(tf_weight_shape) == 4: + transpose = TransposeType.CONV2D + elif tf_name[-1] == "kernel" and tf_weight_shape is not None and len(tf_weight_shape) == 3: + transpose = TransposeType.CONV1D + elif bool( + tf_name[-1] in ["kernel", "pointwise_kernel", "depthwise_kernel"] + or "emb_projs" in tf_name + or "out_projs" in tf_name + ): + transpose = TransposeType.SIMPLE + else: + transpose = TransposeType.NO + + # Convert standard TF2.0 names in PyTorch names + if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma": + tf_name[-1] = "weight" + if tf_name[-1] == "beta": + tf_name[-1] = "bias" + + # The SeparableConv1D TF layer contains two weights that are translated to PyTorch Conv1D here + if tf_name[-1] == "pointwise_kernel" or tf_name[-1] == "depthwise_kernel": + tf_name[-1] = tf_name[-1].replace("_kernel", ".weight") + + # Remove prefix if needed + tf_name = ".".join(tf_name) + if start_prefix_to_remove: + tf_name = tf_name.replace(start_prefix_to_remove, "", 1) + + return tf_name, transpose + + +def apply_transpose(transpose: TransposeType, weight, match_shape=None, pt_to_tf=True): + """ + Apply a transpose to some weight then tries to reshape the weight to the same shape as a given shape, all in a + framework agnostic way. + """ + if transpose is TransposeType.CONV2D: + # Conv2D weight: + # PT: (num_out_channel, num_in_channel, kernel[0], kernel[1]) + # -> TF: (kernel[0], kernel[1], num_in_channel, num_out_channel) + axes = (2, 3, 1, 0) if pt_to_tf else (3, 2, 0, 1) + weight = transpose_func(weight, axes=axes) + elif transpose is TransposeType.CONV1D: + # Conv1D weight: + # PT: (num_out_channel, num_in_channel, kernel) + # -> TF: (kernel, num_in_channel, num_out_channel) + weight = transpose_func(weight, axes=(2, 1, 0)) + elif transpose is TransposeType.SIMPLE: + weight = transpose_func(weight) + + if match_shape is None: + return weight + + if len(match_shape) < len(weight.shape): + weight = squeeze(weight) + elif len(match_shape) > len(weight.shape): + weight = expand_dims(weight, axis=0) + + if list(match_shape) != list(weight.shape): + try: + weight = reshape(weight, match_shape) + except AssertionError as e: + e.args += (match_shape, match_shape) + raise e + + return weight + + +##################### +# PyTorch => TF 2.0 # +##################### + + +def load_pytorch_checkpoint_in_tf2_model( + tf_model, + pytorch_checkpoint_path, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, +): + """Load pytorch checkpoints in a TF 2.0 model""" + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + from safetensors.torch import load_file as safe_load_file # noqa: F401 + + from .pytorch_utils import is_torch_greater_or_equal_than_1_13 # noqa: F401 + except ImportError: + logger.error( + "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + # Treats a single file as a collection of shards with 1 shard. + if isinstance(pytorch_checkpoint_path, str): + pytorch_checkpoint_path = [pytorch_checkpoint_path] + + # Loads all shards into a single state dictionary + pt_state_dict = {} + for path in pytorch_checkpoint_path: + pt_path = os.path.abspath(path) + logger.info(f"Loading PyTorch weights from {pt_path}") + if pt_path.endswith(".safetensors"): + state_dict = safe_load_file(pt_path) + else: + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + state_dict = torch.load(pt_path, map_location="cpu", **weights_only_kwarg) + + pt_state_dict.update(state_dict) + + logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters") + + return load_pytorch_weights_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=tf_inputs, + allow_missing_keys=allow_missing_keys, + output_loading_info=output_loading_info, + _prefix=_prefix, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ) + + +def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False): + """Load pytorch checkpoints in a TF 2.0 model""" + pt_state_dict = pt_model.state_dict() + + return load_pytorch_weights_in_tf2_model( + tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys + ) + + +def load_pytorch_weights_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, +): + """Load pytorch state_dict in a TF 2.0 model.""" + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + # Numpy doesn't understand bfloat16, so upcast to a dtype that doesn't lose precision + pt_state_dict = { + k: v.numpy() if v.dtype != torch.bfloat16 else v.float().numpy() for k, v in pt_state_dict.items() + } + return load_pytorch_state_dict_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=tf_inputs, + allow_missing_keys=allow_missing_keys, + output_loading_info=output_loading_info, + _prefix=_prefix, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ) + + +def _log_key_warnings(missing_keys, unexpected_keys, mismatched_keys, class_name): + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the PyTorch model were not used when initializing the TF 2.0 model" + f" {class_name}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {class_name} from a PyTorch model trained on another task or with another architecture" + " (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n- This IS" + f" NOT expected if you are initializing {class_name} from a PyTorch model that you expect" + " to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a" + " BertForSequenceClassification model)." + ) + else: + logger.warning(f"All PyTorch model weights were used when initializing {class_name}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights or buffers of the TF 2.0 model {class_name} were not initialized from the" + f" PyTorch model and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a" + " down-stream task to be able to use it for predictions and inference." + ) + else: + logger.warning( + f"All the weights of {class_name} were initialized from the PyTorch model.\n" + "If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {class_name} for predictions without further training." + ) + + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {class_name} were not initialized from the model checkpoint" + f" are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + +def load_pytorch_state_dict_in_tf2_model( + tf_model, + pt_state_dict, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, + ignore_mismatched_sizes=False, + skip_logger_warnings=False, +): + """Load a pytorch state_dict in a TF 2.0 model. pt_state_dict can be either an actual dict or a lazy-loading + safetensors archive created with the safe_open() function.""" + import tensorflow as tf + + if tf_inputs is None: + tf_inputs = tf_model.dummy_inputs + + if _prefix is None: + _prefix = "" + if tf_inputs: + with tf.name_scope(_prefix): + tf_model(tf_inputs, training=False) # Make sure model is built + # Convert old format to new format if needed from a PyTorch state_dict + tf_keys_to_pt_keys = {} + for key in pt_state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if "running_var" in key: + new_key = key.replace("running_var", "moving_variance") + if "running_mean" in key: + new_key = key.replace("running_mean", "moving_mean") + + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + key_components = key.split(".") + name = None + if key_components[-3::2] == ["parametrizations", "original0"]: + name = key_components[-2] + "_g" + elif key_components[-3::2] == ["parametrizations", "original1"]: + name = key_components[-2] + "_v" + if name is not None: + key_components = key_components[:-3] + [name] + new_key = ".".join(key_components) + + if new_key is None: + new_key = key + tf_keys_to_pt_keys[new_key] = key + + # Matt: All TF models store the actual model stem in a MainLayer class, including the base model. + # In PT, the derived models (with heads) use the base model class as the stem instead, + # and there is no MainLayer class. This means that TF base classes have one + # extra layer in their weight names, corresponding to the MainLayer class. This code block compensates for that. + start_prefix_to_remove = "" + if not any(s.startswith(tf_model.base_model_prefix) for s in tf_keys_to_pt_keys.keys()): + start_prefix_to_remove = tf_model.base_model_prefix + "." + + symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights + tf_loaded_numel = 0 + all_pytorch_weights = set(tf_keys_to_pt_keys.keys()) + missing_keys = [] + mismatched_keys = [] + is_safetensor_archive = hasattr(pt_state_dict, "get_tensor") + for symbolic_weight in symbolic_weights: + sw_name = symbolic_weight.name + name, transpose = convert_tf_weight_name_to_pt_weight_name( + sw_name, + start_prefix_to_remove=start_prefix_to_remove, + tf_weight_shape=symbolic_weight.shape, + name_scope=_prefix, + ) + if tf_to_pt_weight_rename is not None: + aliases = tf_to_pt_weight_rename(name) # Is a tuple to account for possible name aliasing + for alias in aliases: # The aliases are in priority order, take the first one that matches + if alias in tf_keys_to_pt_keys: + name = alias + break + else: + # If none of the aliases match, just use the first one (it'll be reported as missing) + name = aliases[0] + + # Find associated numpy array in pytorch model state dict + if name not in tf_keys_to_pt_keys: + if allow_missing_keys: + missing_keys.append(name) + continue + elif tf_model._keys_to_ignore_on_load_missing is not None: + # authorized missing keys don't have to be loaded + if any(re.search(pat, name) is not None for pat in tf_model._keys_to_ignore_on_load_missing): + continue + raise AttributeError(f"{name} not found in PyTorch model") + state_dict_name = tf_keys_to_pt_keys[name] + if is_safetensor_archive: + array = pt_state_dict.get_tensor(state_dict_name) + else: + array = pt_state_dict[state_dict_name] + try: + array = apply_transpose(transpose, array, symbolic_weight.shape) + except tf.errors.InvalidArgumentError as e: + if not ignore_mismatched_sizes: + error_msg = str(e) + error_msg += ( + "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." + ) + raise tf.errors.InvalidArgumentError(error_msg) + else: + mismatched_keys.append((name, array.shape, symbolic_weight.shape)) + continue + + tf_loaded_numel += tensor_size(array) + + symbolic_weight.assign(tf.cast(array, symbolic_weight.dtype)) + del array # Immediately free memory to keep peak usage as low as possible + all_pytorch_weights.discard(name) + + logger.info(f"Loaded {tf_loaded_numel:,} parameters in the TF 2.0 model.") + + unexpected_keys = list(all_pytorch_weights) + + if tf_model._keys_to_ignore_on_load_missing is not None: + for pat in tf_model._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + if tf_model._keys_to_ignore_on_load_unexpected is not None: + for pat in tf_model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + if not skip_logger_warnings: + _log_key_warnings(missing_keys, unexpected_keys, mismatched_keys, class_name=tf_model.__class__.__name__) + + if output_loading_info: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + } + return tf_model, loading_info + + return tf_model + + +def load_sharded_pytorch_safetensors_in_tf2_model( + tf_model, + safetensors_shards, + tf_inputs=None, + allow_missing_keys=False, + output_loading_info=False, + _prefix=None, + tf_to_pt_weight_rename=None, + ignore_mismatched_sizes=False, +): + all_loading_infos = [] + for shard in safetensors_shards: + with safe_open(shard, framework="tf") as safetensors_archive: + tf_model, loading_info = load_pytorch_state_dict_in_tf2_model( + tf_model, + safetensors_archive, + tf_inputs=tf_inputs, + allow_missing_keys=allow_missing_keys, + output_loading_info=True, + _prefix=_prefix, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, + ignore_mismatched_sizes=ignore_mismatched_sizes, + skip_logger_warnings=True, # We will emit merged warnings at the end + ) + all_loading_infos.append(loading_info) + # Now we just need to merge the loading info + # Keys are missing only if they're missing in *every* shard + missing_keys = sorted(set.intersection(*[set(info["missing_keys"]) for info in all_loading_infos])) + # Keys are unexpected/mismatched if they're unexpected/mismatched in *any* shard + unexpected_keys = sum([info["unexpected_keys"] for info in all_loading_infos], []) + mismatched_keys = sum([info["mismatched_keys"] for info in all_loading_infos], []) + + _log_key_warnings(missing_keys, unexpected_keys, mismatched_keys, class_name=tf_model.__class__.__name__) + + if output_loading_info: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + } + return tf_model, loading_info + + return tf_model + + +##################### +# TF 2.0 => PyTorch # +##################### + + +def load_tf2_checkpoint_in_pytorch_model( + pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False +): + """ + Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see + https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). + """ + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + import transformers + + from .modeling_tf_utils import load_tf_weights + + logger.info(f"Loading TensorFlow weights from {tf_checkpoint_path}") + + # Instantiate and load the associated TF 2.0 model + tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beginning + tf_model_class = getattr(transformers, tf_model_class_name) + tf_model = tf_model_class(pt_model.config) + + if tf_inputs is None: + tf_inputs = tf_model.dummy_inputs + + if tf_inputs is not None: + tf_model(tf_inputs, training=False) # Make sure model is built + + load_tf_weights(tf_model, tf_checkpoint_path) + + return load_tf2_model_in_pytorch_model( + pt_model, tf_model, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info + ) + + +def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False, output_loading_info=False): + """Load TF 2.0 model in a pytorch model""" + weights = tf_model.weights + + return load_tf2_weights_in_pytorch_model( + pt_model, weights, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info + ) + + +def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False, output_loading_info=False): + """Load TF2.0 symbolic weights in a PyTorch model""" + try: + import tensorflow as tf # noqa: F401 + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " + "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." + ) + raise + + tf_state_dict = {tf_weight.name: tf_weight.numpy() for tf_weight in tf_weights} + return load_tf2_state_dict_in_pytorch_model( + pt_model, tf_state_dict, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info + ) + + +def load_tf2_state_dict_in_pytorch_model(pt_model, tf_state_dict, allow_missing_keys=False, output_loading_info=False): + import torch + + new_pt_params_dict = {} + current_pt_params_dict = dict(pt_model.named_parameters()) + + # Make sure we are able to load PyTorch base models as well as derived models (with heads) + # TF models always have a prefix, some of PyTorch models (base ones) don't + start_prefix_to_remove = "" + if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()): + start_prefix_to_remove = pt_model.base_model_prefix + "." + + # Build a map from potential PyTorch weight names to TF 2.0 Variables + tf_weights_map = {} + for name, tf_weight in tf_state_dict.items(): + pt_name, transpose = convert_tf_weight_name_to_pt_weight_name( + name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=tf_weight.shape + ) + tf_weights_map[pt_name] = (tf_weight, transpose) + + all_tf_weights = set(tf_weights_map.keys()) + loaded_pt_weights_data_ptr = {} + missing_keys_pt = [] + for pt_weight_name, pt_weight in current_pt_params_dict.items(): + # Handle PyTorch shared weight ()not duplicated in TF 2.0 + if pt_weight.data_ptr() in loaded_pt_weights_data_ptr: + new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()] + continue + + pt_weight_name_to_check = pt_weight_name + # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 + key_components = pt_weight_name.split(".") + name = None + if key_components[-3::2] == ["parametrizations", "original0"]: + name = key_components[-2] + "_g" + elif key_components[-3::2] == ["parametrizations", "original1"]: + name = key_components[-2] + "_v" + if name is not None: + key_components = key_components[:-3] + [name] + pt_weight_name_to_check = ".".join(key_components) + + # Find associated numpy array in pytorch model state dict + if pt_weight_name_to_check not in tf_weights_map: + if allow_missing_keys: + missing_keys_pt.append(pt_weight_name) + continue + + raise AttributeError(f"{pt_weight_name} not found in TF 2.0 model") + + array, transpose = tf_weights_map[pt_weight_name_to_check] + + array = apply_transpose(transpose, array, pt_weight.shape, pt_to_tf=False) + + if numpy.isscalar(array): + array = numpy.array(array) + if not is_torch_tensor(array) and not is_numpy_array(array): + array = array.numpy() + if is_numpy_array(array): + # Convert to torch tensor + array = torch.from_numpy(array) + + new_pt_params_dict[pt_weight_name] = array + loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = array + all_tf_weights.discard(pt_weight_name) + + missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False) + missing_keys += missing_keys_pt + + # Some models may have keys that are not in the state by design, removing them before needlessly warning + # the user. + if pt_model._keys_to_ignore_on_load_missing is not None: + for pat in pt_model._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + + if pt_model._keys_to_ignore_on_load_unexpected is not None: + for pat in pt_model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the TF 2.0 model were not used when initializing the PyTorch model" + f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {pt_model.__class__.__name__} from a TF 2.0 model trained on another task or with another architecture" + " (e.g. initializing a BertForSequenceClassification model from a TFBertForPreTraining model).\n- This IS" + f" NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect" + " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" + " TFBertForSequenceClassification model)." + ) + else: + logger.warning(f"All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model and are newly" + f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" + " use it for predictions and inference." + ) + else: + logger.warning( + f"All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\n" + "If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {pt_model.__class__.__name__} for predictions without further training." + ) + + logger.info(f"Weights or buffers not loaded from TF 2.0 model: {all_tf_weights}") + + if output_loading_info: + loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys} + return pt_model, loading_info + + return pt_model diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/modeling_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e4fcd2ebc11e6eec8b291200de7fe8540913b2ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/modeling_utils.py @@ -0,0 +1,4849 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +import copy +import functools +import gc +import importlib.metadata +import inspect +import itertools +import json +import os +import re +import shutil +import tempfile +import warnings +from contextlib import contextmanager +from dataclasses import dataclass +from functools import partial, wraps +from threading import Thread +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union +from zipfile import is_zipfile + +import torch +from packaging import version +from torch import Tensor, nn +from torch.nn import CrossEntropyLoss, Identity +from torch.utils.checkpoint import checkpoint + +from .activations import get_activation +from .configuration_utils import PretrainedConfig +from .dynamic_module_utils import custom_object_save +from .generation import GenerationConfig, GenerationMixin +from .integrations import PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled +from .pytorch_utils import ( # noqa: F401 + Conv1D, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + id_tensor_storage, + is_torch_greater_or_equal_than_1_13, + prune_conv1d_layer, + prune_layer, + prune_linear_layer, +) +from .quantizers import AutoHfQuantizer, HfQuantizer +from .quantizers.quantizers_utils import get_module_from_name +from .safetensors_conversion import auto_conversion +from .utils import ( + ADAPTER_SAFE_WEIGHTS_NAME, + ADAPTER_WEIGHTS_NAME, + CONFIG_NAME, + DUMMY_INPUTS, + FLAX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + TF2_WEIGHTS_NAME, + TF_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + ContextManagers, + ModelOutput, + PushToHubMixin, + cached_file, + copy_func, + download_url, + extract_commit_hash, + has_file, + is_accelerate_available, + is_bitsandbytes_available, + is_flash_attn_2_available, + is_offline_mode, + is_optimum_available, + is_peft_available, + is_remote_url, + is_safetensors_available, + is_torch_sdpa_available, + is_torch_xla_available, + logging, + replace_return_docstrings, + strtobool, +) +from .utils.hub import convert_file_size_to_int, create_and_tag_model_card, get_checkpoint_shard_files +from .utils.import_utils import ( + ENV_VARS_TRUE_VALUES, + is_sagemaker_mp_enabled, + is_torch_fx_proxy, + is_torchdynamo_compiling, +) +from .utils.quantization_config import BitsAndBytesConfig, QuantizationMethod + + +XLA_USE_BF16 = os.environ.get("XLA_USE_BF16", "0").upper() +XLA_DOWNCAST_BF16 = os.environ.get("XLA_DOWNCAST_BF16", "0").upper() + +if is_accelerate_available(): + from accelerate import dispatch_model, infer_auto_device_map, init_empty_weights + from accelerate.hooks import add_hook_to_module + from accelerate.utils import ( + check_tied_parameters_on_same_device, + find_tied_parameters, + get_balanced_memory, + get_max_memory, + load_offloaded_weights, + offload_weight, + save_offload_index, + set_module_tensor_to_device, + ) + +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.torch import load_file as safe_load_file + from safetensors.torch import save_file as safe_save_file + +logger = logging.get_logger(__name__) + + +_init_weights = True + + +def is_fsdp_enabled(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and strtobool(os.environ.get("ACCELERATE_USE_FSDP", "False")) == 1 + and strtobool(os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING", "False")) == 1 + ) + + +def is_local_dist_rank_0(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and int(os.environ.get("LOCAL_RANK", -1)) == 0 + ) + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") +else: + IS_SAGEMAKER_MP_POST_1_10 = False + +if is_peft_available(): + from .utils import find_adapter_config_file + +TORCH_INIT_FUNCTIONS = { + "uniform_": nn.init.uniform_, + "normal_": nn.init.normal_, + "trunc_normal_": nn.init.trunc_normal_, + "constant_": nn.init.constant_, + "xavier_uniform_": nn.init.xavier_uniform_, + "xavier_normal_": nn.init.xavier_normal_, + "kaiming_uniform_": nn.init.kaiming_uniform_, + "kaiming_normal_": nn.init.kaiming_normal_, + "uniform": nn.init.uniform, + "normal": nn.init.normal, + "xavier_uniform": nn.init.xavier_uniform, + "xavier_normal": nn.init.xavier_normal, + "kaiming_uniform": nn.init.kaiming_uniform, + "kaiming_normal": nn.init.kaiming_normal, +} + + +@contextmanager +def no_init_weights(_enable=True): + """ + Context manager to globally disable weight initialization to speed up loading large models. + + TODO(Patrick): Delete safety argument `_enable=True` at next major version. . + """ + global _init_weights + old_init_weights = _init_weights + + if _enable: + _init_weights = False + + def _skip_init(*args, **kwargs): + pass + + # # Save the original initialization functions + for name, init_func in TORCH_INIT_FUNCTIONS.items(): + setattr(torch.nn.init, name, _skip_init) + try: + yield + finally: + _init_weights = old_init_weights + if _enable: + # # Restore the original initialization functions + for name, init_func in TORCH_INIT_FUNCTIONS.items(): + setattr(torch.nn.init, name, init_func) + + +def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + try: + return next(parameter.parameters()).device + except StopIteration: + # For nn.DataParallel compatibility in PyTorch 1.5 + + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].device + + +def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + """ + Returns the first parameter dtype (can be non-floating) or asserts if none were found. + """ + try: + return next(parameter.parameters()).dtype + except StopIteration: + # For nn.DataParallel compatibility in PyTorch > 1.5 + + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].dtype + + +def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): + """ + Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found. + """ + last_dtype = None + for t in parameter.parameters(): + last_dtype = t.dtype + if t.is_floating_point(): + # Adding fix for https://github.com/pytorch/xla/issues/4152 + # Fixes issue where the model code passes a value that is out of range for XLA_USE_BF16=1 + # and XLA_DOWNCAST_BF16=1 so the conversion would cast it to -inf + # NOTE: `is_torch_xla_available()` is checked last as it induces a graph break in torch dynamo + if XLA_USE_BF16 in ENV_VARS_TRUE_VALUES and is_torch_xla_available(): + return torch.bfloat16 + if XLA_DOWNCAST_BF16 in ENV_VARS_TRUE_VALUES and is_torch_xla_available(): + if t.dtype == torch.float: + return torch.bfloat16 + if t.dtype == torch.double: + return torch.float32 + return t.dtype + + if last_dtype is not None: + # if no floating dtype was found return whatever the first dtype is + return last_dtype + + # For nn.DataParallel compatibility in PyTorch > 1.5 + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + last_tuple = None + for tuple in gen: + last_tuple = tuple + if tuple[1].is_floating_point(): + return tuple[1].dtype + + if last_tuple is not None: + # fallback to the last dtype + return last_tuple[1].dtype + + # fallback to buffer dtype + for t in parameter.buffers(): + last_dtype = t.dtype + if t.is_floating_point(): + return t.dtype + return last_dtype + + +def get_state_dict_float_dtype(state_dict): + """ + Returns the first found floating dtype in `state_dict` or asserts if none were found. + """ + for t in state_dict.values(): + if t.is_floating_point(): + return t.dtype + + raise ValueError("couldn't find any floating point dtypes in state_dict") + + +def get_state_dict_dtype(state_dict): + """ + Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype. + """ + for t in state_dict.values(): + if t.is_floating_point(): + return t.dtype + + # if no floating dtype was found return whatever the first dtype is + else: + return next(state_dict.values()).dtype + + +def dtype_byte_size(dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(torch.float32) + 4 + ``` + """ + if dtype == torch.bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", str(dtype)) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def shard_checkpoint( + state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME +): + """ + Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a + given size. + + The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no + optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the + limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], + [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's weight is bigger than `max_shard_size`, it will end up in its own sub-checkpoint which will + have a size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit + (like `"5MB"`). + weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): + The name of the model save file. + """ + max_shard_size = convert_file_size_to_int(max_shard_size) + + sharded_state_dicts = [{}] + last_block_size = 0 + total_size = 0 + storage_id_to_block = {} + + for key, weight in state_dict.items(): + # when bnb serialization is used the weights in the state dict can be strings + # check: https://github.com/huggingface/transformers/pull/24416 for more details + if isinstance(weight, str): + continue + else: + storage_id = id_tensor_storage(weight) + + # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block` + if storage_id in storage_id_to_block: + block_id = storage_id_to_block[storage_id] + sharded_state_dicts[block_id][key] = weight + continue + + weight_size = weight.numel() * dtype_byte_size(weight.dtype) + + # If this weight is going to tip up over the maximal size, we split, but only if we have put at least one + # weight in the current shard. + if last_block_size + weight_size > max_shard_size and len(sharded_state_dicts[-1]) > 0: + sharded_state_dicts.append({}) + last_block_size = 0 + + sharded_state_dicts[-1][key] = weight + last_block_size += weight_size + total_size += weight_size + storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 + + # If we only have one shard, we return it + if len(sharded_state_dicts) == 1: + return {weights_name: sharded_state_dicts[0]}, None + + # Otherwise, let's build the index + weight_map = {} + shards = {} + for idx, shard in enumerate(sharded_state_dicts): + shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin") + shard_file = shard_file.replace( + ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" + ) + shards[shard_file] = shard + for key in shard.keys(): + weight_map[key] = shard_file + + # Add the metadata + metadata = {"total_size": total_size} + index = {"metadata": metadata, "weight_map": weight_map} + return shards, index + + +def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True): + """ + This is the same as + [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) + but for a sharded checkpoint. + + This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being + loaded in the model. + + Args: + model (`torch.nn.Module`): The model in which to load the checkpoint. + folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. + strict (`bool`, *optional`, defaults to `True`): + Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. + prefer_safe (`bool`, *optional*, defaults to `False`) + If both safetensors and PyTorch save files are present in checkpoint and `prefer_safe` is True, the + safetensors files will be loaded. Otherwise, PyTorch files are always loaded when possible. + + Returns: + `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields + - `missing_keys` is a list of str containing the missing keys + - `unexpected_keys` is a list of str containing the unexpected keys + """ + # Load the index + index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) + safe_index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) + + index_present = os.path.isfile(index_file) + safe_index_present = os.path.isfile(safe_index_file) + + if not index_present and not (safe_index_present and is_safetensors_available()): + filenames = ( + (WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME) if is_safetensors_available() else (WEIGHTS_INDEX_NAME,) + ) + raise ValueError(f"Can't find a checkpoint index ({' or '.join(filenames)}) in {folder}.") + + load_safe = False + if safe_index_present: + if prefer_safe: + if is_safetensors_available(): + load_safe = True # load safe due to preference + else: + logger.warning( + f"Cannot load sharded checkpoint at {folder} safely since safetensors is not installed!" + ) + elif not index_present: + load_safe = True # load safe since we have no other choice + + load_index = safe_index_file if load_safe else index_file + + with open(load_index, "r", encoding="utf-8") as f: + index = json.load(f) + + shard_files = list(set(index["weight_map"].values())) + + # If strict=True, error before loading any of the state dicts. + loaded_keys = index["weight_map"].keys() + model_keys = model.state_dict().keys() + missing_keys = [key for key in model_keys if key not in loaded_keys] + unexpected_keys = [key for key in loaded_keys if key not in model_keys] + if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): + error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" + if len(missing_keys) > 0: + str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) + error_message += f"\nMissing key(s): {str_missing_keys}." + if len(unexpected_keys) > 0: + str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) + error_message += f"\nMissing key(s): {str_unexpected_keys}." + raise RuntimeError(error_message) + + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + loader = safe_load_file if load_safe else partial(torch.load, map_location="cpu", **weights_only_kwarg) + + for shard_file in shard_files: + state_dict = loader(os.path.join(folder, shard_file)) + model.load_state_dict(state_dict, strict=False) + + # Make sure memory is freed before we load the next state dict. + del state_dict + gc.collect() + + # Return the same thing as PyTorch load_state_dict function. + return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) + + +def load_state_dict(checkpoint_file: Union[str, os.PathLike], is_quantized: bool = False): + """ + Reads a PyTorch checkpoint file, returning properly formatted errors if they arise. + """ + if checkpoint_file.endswith(".safetensors") and is_safetensors_available(): + # Check format of the archive + with safe_open(checkpoint_file, framework="pt") as f: + metadata = f.metadata() + if metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: + raise OSError( + f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " + "you save your model with the `save_pretrained` method." + ) + return safe_load_file(checkpoint_file) + try: + if ( + (is_deepspeed_zero3_enabled() and torch.distributed.is_initialized() and torch.distributed.get_rank() > 0) + or (is_fsdp_enabled() and not is_local_dist_rank_0()) + ) and not is_quantized: + map_location = "meta" + else: + map_location = "cpu" + extra_args = {} + # mmap can only be used with files serialized with zipfile-based format. + if ( + isinstance(checkpoint_file, str) + and map_location != "meta" + and version.parse(torch.__version__) >= version.parse("2.1.0") + and is_zipfile(checkpoint_file) + ): + extra_args = {"mmap": True} + weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} + return torch.load( + checkpoint_file, + map_location=map_location, + **weights_only_kwarg, + **extra_args, + ) + except Exception as e: + try: + with open(checkpoint_file) as f: + if f.read(7) == "version": + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError( + f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " + "model. Make sure you have saved the model properly." + ) from e + except (UnicodeDecodeError, ValueError): + raise OSError( + f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' " + f"at '{checkpoint_file}'. " + "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." + ) + + +def set_initialized_submodules(model, state_dict_keys): + """ + Sets the `_is_hf_initialized` flag in all submodules of a given model when all its weights are in the loaded state + dict. + """ + not_initialized_submodules = {} + for module_name, module in model.named_modules(): + loaded_keys = {k.replace(f"{module_name}.", "") for k in state_dict_keys if k.startswith(f"{module_name}.")} + if loaded_keys.issuperset(module.state_dict()): + module._is_hf_initialized = True + else: + not_initialized_submodules[module_name] = module + return not_initialized_submodules + + +def _end_ptr(tensor: torch.Tensor) -> int: + # extract the end of the pointer if the tensor is a slice of a bigger tensor + if tensor.nelement(): + stop = tensor.view(-1)[-1].data_ptr() + tensor.element_size() + else: + stop = tensor.data_ptr() + return stop + + +def _get_tied_weight_keys(module: nn.Module, prefix=""): + tied_weight_keys = [] + if getattr(module, "_tied_weights_keys", None) is not None: + names = [f"{prefix}.{k}" if prefix else k for k in module._tied_weights_keys] + tied_weight_keys.extend(names) + if getattr(module, "_dynamic_tied_weights_keys", None) is not None: + names = [f"{prefix}.{k}" if prefix else k for k in module._dynamic_tied_weights_keys] + tied_weight_keys.extend(names) + for name, submodule in module.named_children(): + local_prefix = f"{prefix}.{name}" if prefix else name + tied_weight_keys.extend(_get_tied_weight_keys(submodule, prefix=local_prefix)) + return tied_weight_keys + + +def _find_disjoint(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> Tuple[List[Set[str]], List[str]]: + filtered_tensors = [] + for shared in tensors: + if len(shared) < 2: + filtered_tensors.append(shared) + continue + + areas = [] + for name in shared: + tensor = state_dict[name] + areas.append((tensor.data_ptr(), _end_ptr(tensor), name)) + areas.sort() + + _, last_stop, last_name = areas[0] + filtered_tensors.append({last_name}) + for start, stop, name in areas[1:]: + if start >= last_stop: + filtered_tensors.append({name}) + else: + filtered_tensors[-1].add(name) + last_stop = stop + disjoint_tensors = [] + shared_tensors = [] + for tensors in filtered_tensors: + if len(tensors) == 1: + disjoint_tensors.append(tensors.pop()) + else: + shared_tensors.append(tensors) + return shared_tensors, disjoint_tensors + + +def _find_identical(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> Tuple[List[Set[str]], Set[str]]: + shared_tensors = [] + identical = [] + for shared in tensors: + if len(shared) < 2: + continue + + areas = collections.defaultdict(set) + for name in shared: + tensor = state_dict[name] + area = (tensor.device, tensor.data_ptr(), _end_ptr(tensor)) + areas[area].add(name) + if len(areas) == 1: + identical.append(shared) + else: + shared_tensors.append(shared) + return shared_tensors, identical + + +def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): + # Convert old format to new format if needed from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, "_metadata", None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + error_msgs = [] + + # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants + # so we need to apply the function recursively. + def load(module: nn.Module, state_dict, prefix=""): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) + # Parameters of module and children will start with prefix. We can exit early if there are none in this + # state_dict + if len([key for key in state_dict if key.startswith(prefix)]) > 0: + if is_deepspeed_zero3_enabled(): + import deepspeed + + # In sharded models, each shard has only part of the full state_dict, so only gather + # parameters that are in the current state_dict. + named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) + params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] + if len(params_to_gather) > 0: + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): + if torch.distributed.get_rank() == 0: + module._load_from_state_dict(*args) + else: + module._load_from_state_dict(*args) + + for name, child in module._modules.items(): + if child is not None: + load(child, state_dict, prefix + name + ".") + + load(model_to_load, state_dict, prefix=start_prefix) + # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so + # it's safe to delete it. + del state_dict + + return error_msgs + + +def find_submodule_and_param_name(model, long_key, start_prefix): + """ + A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed + from the start of the key + """ + + if len(start_prefix) > 0 and long_key.startswith(start_prefix): + long_key = ".".join(long_key.split(".")[1:]) + + split_key = long_key.split(".") + submodule = model + while len(split_key) > 1: + if hasattr(submodule, split_key[0]): + submodule = getattr(submodule, split_key[0]) + del split_key[0] + else: + submodule = None + break + if submodule == model: + submodule = None + return submodule, split_key[0] + + +def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix): + """ + Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params. + + `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in + `bert.pooler.dense.weight` + + """ + + # dematerialize param storage for keys that are going to be replaced by state_dict, by + # putting those on the meta device + for k in loaded_state_dict_keys: + submodule, param_name = find_submodule_and_param_name(model, k, start_prefix) + if submodule is not None: + # selectively switch to the meta device only those params/buffers that will + # be next replaced from state_dict. This a complex way to do p.to_("meta") + # since we have no in-place to_ for tensors. + new_val = getattr(submodule, param_name) + if isinstance(new_val, torch.nn.Parameter): + # isinstance returns False for Params on meta device, so switch after the check + new_val = torch.nn.Parameter(new_val.to("meta")) + else: + new_val = new_val.to("meta") + setattr(submodule, param_name, new_val) + + +def _load_state_dict_into_meta_model( + model, + state_dict, + loaded_state_dict_keys, # left for now but could be removed, see below + start_prefix, + expected_keys, + device_map=None, + offload_folder=None, + offload_index=None, + state_dict_folder=None, + state_dict_index=None, + dtype=None, + hf_quantizer=None, + is_safetensors=False, + keep_in_fp32_modules=None, + unexpected_keys=None, # passing `unexpected` for cleanup from quantization items +): + """ + This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its + params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the + params back to the normal device, but only for `loaded_state_dict_keys`. + + `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in + `bert.pooler.dense.weight` + + """ + + # XXX: remaining features to implement to be fully compatible with _load_state_dict_into_model + # - deepspeed zero 3 support + # - need to copy metadata if any - see _load_state_dict_into_model + # - handling error_msgs - mimicking the error handling in module._load_from_state_dict() + # - Is there a situation where some keys aren't in `loaded_state_dict_keys` and in which case + # they won't get loaded. + + error_msgs = [] + + old_keys = [] + new_keys = [] + is_quantized = hf_quantizer is not None + for key in state_dict.keys(): + new_key = None + if "gamma" in key: + new_key = key.replace("gamma", "weight") + if "beta" in key: + new_key = key.replace("beta", "bias") + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + for param_name, param in state_dict.items(): + # First part of the test is always true as load_state_dict_keys always contains state_dict keys. + if param_name not in loaded_state_dict_keys or param_name not in expected_keys: + continue + + if param_name.startswith(start_prefix): + param_name = param_name[len(start_prefix) :] + + module_name = param_name + set_module_kwargs = {} + + # We convert floating dtypes to the `dtype` passed. We want to keep the buffers/params + # in int/uint/bool and not cast them. + if dtype is not None and torch.is_floating_point(param): + if ( + keep_in_fp32_modules is not None + and any( + module_to_keep_in_fp32 in param_name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules + ) + and dtype == torch.float16 + ): + param = param.to(torch.float32) + + # For backward compatibility with older versions of `accelerate` + # TODO: @sgugger replace this check with version check at the next `accelerate` release + if "dtype" in list(inspect.signature(set_module_tensor_to_device).parameters): + set_module_kwargs["dtype"] = torch.float32 + else: + param = param.to(dtype) + + # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model, and which + # uses `param.copy_(input_param)` that preserves the contiguity of the parameter in the model. + # Reference: https://github.com/pytorch/pytorch/blob/db79ceb110f6646523019a59bbd7b838f43d4a86/torch/nn/modules/module.py#L2040C29-L2040C29 + old_param = model + splits = param_name.split(".") + for split in splits: + old_param = getattr(old_param, split) + if old_param is None: + break + + if old_param is not None: + if dtype is None: + param = param.to(old_param.dtype) + + if old_param.is_contiguous(): + param = param.contiguous() + + set_module_kwargs["value"] = param + + if device_map is None: + param_device = "cpu" + else: + # find next higher level module that is defined in device_map: + # bert.lm_head.weight -> bert.lm_head -> bert -> '' + while len(module_name) > 0 and module_name not in device_map: + module_name = ".".join(module_name.split(".")[:-1]) + if module_name == "" and "" not in device_map: + # TODO: group all errors and raise at the end. + raise ValueError(f"{param_name} doesn't have any device set.") + param_device = device_map[module_name] + + if param_device == "disk": + if not is_safetensors: + offload_index = offload_weight(param, param_name, offload_folder, offload_index) + elif param_device == "cpu" and state_dict_index is not None: + state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) + elif ( + not is_quantized + or (not hf_quantizer.requires_parameters_quantization) + or ( + not hf_quantizer.check_quantized_param( + model, param, param_name, state_dict, param_device=param_device, device_map=device_map + ) + ) + ): + # For backward compatibility with older versions of `accelerate` and for non-quantized params + set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs) + else: + hf_quantizer.create_quantized_param(model, param, param_name, param_device, state_dict, unexpected_keys) + # For quantized modules with FSDP/DeepSpeed Stage 3, we need to quantize the parameter on the GPU + # and then cast it to CPU to avoid excessive memory usage on each GPU + # in comparison to the sharded model across GPUs. + if is_fsdp_enabled() or is_deepspeed_zero3_enabled(): + module, tensor_name = get_module_from_name(model, param_name) + value = getattr(module, tensor_name) + value = type(value)(value.data.to("cpu"), **value.__dict__) + setattr(module, tensor_name, value) + # TODO: consider removing used param_parts from state_dict before return + + return error_msgs, offload_index, state_dict_index + + +def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: + if variant is not None: + splits = weights_name.split(".") + splits = splits[:-1] + [variant] + splits[-1:] + weights_name = ".".join(splits) + + return weights_name + + +class ModuleUtilsMixin: + """ + A few utilities for `torch.nn.Modules`, to be used as a mixin. + """ + + @staticmethod + def _hook_rss_memory_pre_forward(module, *args, **kwargs): + try: + import psutil + except ImportError: + raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") + + process = psutil.Process(os.getpid()) + mem = process.memory_info() + module.mem_rss_pre_forward = mem.rss + return None + + @staticmethod + def _hook_rss_memory_post_forward(module, *args, **kwargs): + try: + import psutil + except ImportError: + raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") + + process = psutil.Process(os.getpid()) + mem = process.memory_info() + module.mem_rss_post_forward = mem.rss + mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward + module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) + return None + + def add_memory_hooks(self): + """ + Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. + + Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero + with `model.reset_memory_hooks_state()`. + """ + for module in self.modules(): + module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) + module.register_forward_hook(self._hook_rss_memory_post_forward) + self.reset_memory_hooks_state() + + def reset_memory_hooks_state(self): + """ + Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). + """ + for module in self.modules(): + module.mem_rss_diff = 0 + module.mem_rss_post_forward = 0 + module.mem_rss_pre_forward = 0 + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + return get_parameter_device(self) + + @property + def dtype(self) -> torch.dtype: + """ + `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). + """ + return get_parameter_dtype(self) + + def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: + """ + Invert an attention mask (e.g., switches 0. and 1.). + + Args: + encoder_attention_mask (`torch.Tensor`): An attention mask. + + Returns: + `torch.Tensor`: The inverted attention mask. + """ + if encoder_attention_mask.dim() == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if encoder_attention_mask.dim() == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow + # /transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = (encoder_extended_attention_mask == + # encoder_extended_attention_mask.transpose(-1, -2)) + encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min + + return encoder_extended_attention_mask + + @staticmethod + def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + else: + device = attention_mask.device + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + return extended_attention_mask + + def get_extended_attention_mask( + self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + + Returns: + `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + if dtype is None: + dtype = self.dtype + + if not (attention_mask.dim() == 2 and self.config.is_decoder): + # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder: + extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( + input_shape, attention_mask, device + ) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min + return extended_attention_mask + + def get_head_mask( + self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False + ) -> Tensor: + """ + Prepare the head mask if needed. + + Args: + head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): + The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). + num_hidden_layers (`int`): + The number of hidden layers in the model. + is_attention_chunked (`bool`, *optional*, defaults to `False`): + Whether or not the attentions scores are computed by chunks or not. + + Returns: + `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with + `[None]` for each layer. + """ + if head_mask is not None: + head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) + if is_attention_chunked is True: + head_mask = head_mask.unsqueeze(-1) + else: + head_mask = [None] * num_hidden_layers + + return head_mask + + def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): + """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" + head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility + return head_mask + + def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: + """ + Get number of (optionally, trainable or non-embeddings) parameters in the module. + + Args: + only_trainable (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of trainable parameters + + exclude_embeddings (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of non-embeddings parameters + + Returns: + `int`: The number of parameters. + """ + + if exclude_embeddings: + embedding_param_names = [ + f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) + ] + total_parameters = [ + parameter for name, parameter in self.named_parameters() if name not in embedding_param_names + ] + else: + total_parameters = list(self.parameters()) + + total_numel = [] + is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False) + + if is_loaded_in_4bit: + if is_bitsandbytes_available(): + import bitsandbytes as bnb + else: + raise ValueError( + "bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong" + " make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. " + ) + + for param in total_parameters: + if param.requires_grad or not only_trainable: + # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are + # used for the 4bit quantization (uint8 tensors are stored) + if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): + quant_storage = self.hf_quantizer.quantization_config.bnb_4bit_quant_storage + # For compatibility with older PT version - see: https://github.com/huggingface/peft/pull/1635 + nb_params = ( + quant_storage.itemsize if hasattr(quant_storage, "itemsize") else quant_storage.element_size() + ) + total_numel.append(param.numel() * 2 * nb_params) + else: + total_numel.append(param.numel()) + + return sum(total_numel) + + def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: + """ + Helper function to estimate the total number of tokens from the model inputs. + + Args: + inputs (`dict`): The model inputs. + + Returns: + `int`: The total number of tokens. + """ + if not hasattr(self, "warnings_issued"): + self.warnings_issued = {} + if self.main_input_name in input_dict: + return input_dict[self.main_input_name].numel() + elif "estimate_tokens" not in self.warnings_issued: + logger.warning( + "Could not estimate the number of tokens of the input, floating-point operations will not be computed" + ) + self.warnings_issued["estimate_tokens"] = True + return 0 + + def floating_point_ops( + self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True + ) -> int: + """ + Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a + batch with this transformer model. Default approximation neglects the quadratic dependency on the number of + tokens (valid if `12 * d_model << sequence_length`) as laid out in [this + paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter + re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. + + Args: + batch_size (`int`): + The batch size for the forward pass. + + sequence_length (`int`): + The number of tokens in each line of the batch. + + exclude_embeddings (`bool`, *optional*, defaults to `True`): + Whether or not to count embedding and softmax operations. + + Returns: + `int`: The number of floating-point operations. + """ + + return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) + + +class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): + r""" + Base class for all models. + + [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, + downloading and saving models as well as a few methods common to all models to: + + - resize the input embeddings, + - prune heads in the self-attention heads. + + Class attributes (overridden by derived classes): + + - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class + for this model architecture. + - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model, + taking as arguments: + + - **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint. + - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model. + - **path** (`str`) -- A path to the TensorFlow checkpoint. + + - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived + classes of the same architecture adding modules on top of the base model. + - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. + - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP + models, `pixel_values` for vision models and `input_values` for speech models). + """ + + config_class = None + base_model_prefix = "" + main_input_name = "input_ids" + model_tags = None + + _auto_class = None + _no_split_modules = None + _skip_keys_device_placement = None + _keep_in_fp32_modules = None + + # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing + # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings. + _keys_to_ignore_on_load_missing = None + # a list of `re` patterns of `state_dict` keys that should be removed from the list of + # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary + # warnings. + _keys_to_ignore_on_load_unexpected = None + # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't + # trained, but which are either deterministic or tied variables) + _keys_to_ignore_on_save = None + # a list of `state_dict` keys that are potentially tied to another key in the state_dict. + _tied_weights_keys = None + + is_parallelizable = False + supports_gradient_checkpointing = False + + # Flash Attention 2 support + _supports_flash_attn_2 = False + + # SDPA support + _supports_sdpa = False + + # Has support for a `Cache` instance as `past_key_values` + _supports_cache_class = False + + @property + def dummy_inputs(self) -> Dict[str, torch.Tensor]: + """ + `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. + """ + return {"input_ids": torch.tensor(DUMMY_INPUTS)} + + @property + def framework(self) -> str: + """ + :str: Identifies that this is a PyTorch model. + """ + return "pt" + + def __init__(self, config: PretrainedConfig, *inputs, **kwargs): + super().__init__() + if not isinstance(config, PretrainedConfig): + raise ValueError( + f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " + "`PretrainedConfig`. To create a model from a pretrained model use " + f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + # Save config and origin of the pretrained weights if given in model + config = self._autoset_attn_implementation( + config, torch_dtype=torch.get_default_dtype(), check_device_map=False + ) + self.config = config + + self.name_or_path = config.name_or_path + self.warnings_issued = {} + self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + # Overwrite the class attribute to make it an instance attribute, so models like + # `InstructBlipForConditionalGeneration` can dynamically update it without modifying the class attribute + # when a different component (e.g. language_model) is used. + self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules) + + def post_init(self): + """ + A method executed at the end of each Transformer model initialization, to execute code that needs the model's + modules properly initialized (such as weight initialization). + """ + self.init_weights() + self._backward_compatibility_gradient_checkpointing() + + def _backward_compatibility_gradient_checkpointing(self): + if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): + self.gradient_checkpointing_enable() + # Remove the attribute now that is has been consumed, so it's no saved in the config. + delattr(self.config, "gradient_checkpointing") + + def add_model_tags(self, tags: Union[List[str], str]) -> None: + r""" + Add custom tags into the model that gets pushed to the Hugging Face Hub. Will + not overwrite existing tags in the model. + + Args: + tags (`Union[List[str], str]`): + The desired tags to inject in the model + + Examples: + + ```python + from transformers import AutoModel + + model = AutoModel.from_pretrained("google-bert/bert-base-cased") + + model.add_model_tags(["custom", "custom-bert"]) + + # Push the model to your namespace with the name "my-custom-bert". + model.push_to_hub("my-custom-bert") + ``` + """ + if isinstance(tags, str): + tags = [tags] + + if self.model_tags is None: + self.model_tags = [] + + for tag in tags: + if tag not in self.model_tags: + self.model_tags.append(tag) + + @classmethod + def _from_config(cls, config, **kwargs): + """ + All context managers that the model should be initialized under go here. + + Args: + torch_dtype (`torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. + """ + torch_dtype = kwargs.pop("torch_dtype", None) + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) + + # override default dtype if needed + dtype_orig = None + if torch_dtype is not None: + dtype_orig = cls._set_default_torch_dtype(torch_dtype) + + config = copy.deepcopy(config) # We do not want to modify the config inplace in _from_config. + config._attn_implementation = kwargs.pop("attn_implementation", None) + config = cls._autoset_attn_implementation( + config, + use_flash_attention_2=use_flash_attention_2, + check_device_map=False, + torch_dtype=torch_dtype, + ) + + if is_deepspeed_zero3_enabled(): + import deepspeed + + logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") + # this immediately partitions the model across all gpus, to avoid the overhead in time + # and memory copying it on CPU or each GPU first + with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): + model = cls(config, **kwargs) + else: + model = cls(config, **kwargs) + + # restore default dtype if it was modified + if dtype_orig is not None: + torch.set_default_dtype(dtype_orig) + + return model + + @classmethod + def _autoset_attn_implementation( + cls, + config, + use_flash_attention_2: bool = False, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + ): + """ + Automatically checks and dispatches to a default attention implementation. In order of priority: + 1. An implementation specified in `config._attn_implementation` (due for example to the argument attn_implementation="sdpa" in from_pretrained). + 2. DEPRECATED: if use_flash_attention_2 is set to `True` and `flash_attn` is available, flash attention. (`LlamaFlashAttention` for example) + 3. SDPA implementation, if available and supported by the model type. (`LlamaSdpaAttention` for example) + 4. The default model's implementation otherwise (`LlamaAttention` for example) . + """ + # Here we use config._attn_implementation_internal to check whether the attention implementation was explicitely set by the user. + # The property `PretrainedConfig._attn_implementation` is never `None`, for backward compatibility (always fall back on "eager"). + # The `hasattr` here is used as some Transformers tests for some reason do not call PretrainedConfig __init__ (e.g. test_no_super_init_config_and_model) + requested_attn_implementation = None + if hasattr(config, "_attn_implementation_internal") and config._attn_implementation_internal is not None: + if config._attn_implementation != "flash_attention_2" and use_flash_attention_2: + raise ValueError( + f'Both attn_implementation="{config._attn_implementation}" and `use_flash_attention_2=True` were used when loading the model, which are not compatible.' + ' We recommend to just use `attn_implementation="flash_attention_2"` when loading the model.' + ) + + if config._attn_implementation not in ["eager", "sdpa", "flash_attention_2"]: + message = f'Specified `attn_implementation="{config._attn_implementation}"` is not supported. The only possible arguments are `attn_implementation="eager"` (manual attention implementation)' + if cls._supports_flash_attn_2: + message += ', `"attn_implementation=flash_attention_2"` (implementation using flash attention 2)' + if cls._supports_sdpa: + message += ', `"attn_implementation=sdpa"` (implementation using torch.nn.functional.scaled_dot_product_attention)' + raise ValueError(message + ".") + + # If a config is passed with a preset attn_implementation, we skip the automatic dispatch and use the user-provided config, with hard checks that the requested attention implementation is available. + requested_attn_implementation = config._attn_implementation_internal + + if use_flash_attention_2: + logger.warning_once( + 'The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.' + ) + config._attn_implementation = "flash_attention_2" + + if config._attn_implementation == "flash_attention_2": + cls._check_and_enable_flash_attn_2( + config, + torch_dtype=torch_dtype, + device_map=device_map, + hard_check_only=False, + check_device_map=check_device_map, + ) + elif requested_attn_implementation in [None, "sdpa"] and not is_torch_xla_available(): + # use_flash_attention_2 takes priority over SDPA, hence SDPA treated in this elif. + config = cls._check_and_enable_sdpa( + config, + hard_check_only=False if requested_attn_implementation is None else True, + ) + else: + config._attn_implementation = "eager" + + return config + + @classmethod + def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: + """ + Change the default dtype and return the previous one. This is needed when wanting to instantiate the model + under specific dtype. + + Args: + dtype (`torch.dtype`): + a floating dtype to set to. + + Returns: + `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was + modified. If it wasn't, returns `None`. + + Note `set_default_dtype` currently only works with floating-point types and asserts if for example, + `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. + """ + if not dtype.is_floating_point: + raise ValueError( + f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" + ) + + logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") + dtype_orig = torch.get_default_dtype() + torch.set_default_dtype(dtype) + return dtype_orig + + @property + def base_model(self) -> nn.Module: + """ + `torch.nn.Module`: The main body of the model. + """ + return getattr(self, self.base_model_prefix, self) + + @classmethod + def can_generate(cls) -> bool: + """ + Returns whether this model can generate sequences with `.generate()`. + + Returns: + `bool`: Whether this model can generate sequences with `.generate()`. + """ + # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. + # Alternativelly, the model can also have a custom `generate` function. + if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): + return False + return True + + @classmethod + def _check_and_enable_flash_attn_2( + cls, + config, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + hard_check_only: bool = False, + ) -> PretrainedConfig: + """ + Checks the availability of Flash Attention 2 and compatibility with the current model. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. + """ + if not cls._supports_flash_attn_2: + raise ValueError( + f"{cls.__name__} does not support Flash Attention 2.0 yet. Please request to add support where" + f" the model is hosted, on its model hub page: https://huggingface.co/{config._name_or_path}/discussions/new" + " or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new" + ) + + if not is_flash_attn_2_available(): + preface = "FlashAttention2 has been toggled on, but it cannot be used due to the following error:" + install_message = "Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2." + + if importlib.util.find_spec("flash_attn") is None: + raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}") + + flash_attention_version = version.parse(importlib.metadata.version("flash_attn")) + if torch.version.cuda: + if flash_attention_version < version.parse("2.1.0"): + raise ImportError( + f"{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}" + ) + else: + raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") + elif torch.version.hip: + if flash_attention_version < version.parse("2.0.4"): + raise ImportError( + f"{preface} you need flash_attn package version to be greater or equal than 2.0.4. Make sure to have that version installed - detected version {flash_attention_version}. {install_message}" + ) + else: + raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") + + _is_bettertransformer = getattr(cls, "use_bettertransformer", False) + + if _is_bettertransformer: + raise ValueError( + "Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()" + ) + + if torch_dtype is None: + logger.warning_once( + "You are attempting to use Flash Attention 2.0 without specifying a torch dtype. This might lead to unexpected behaviour" + ) + elif torch_dtype is not None and torch_dtype not in [torch.float16, torch.bfloat16]: + logger.warning_once( + "Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but" + f" the current dype in {cls.__name__} is {torch_dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator," + ' or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`' + ) + + # The check `torch.empty(0).device.type != "cuda"` is needed as the model may be initialized after `torch.set_default_device` has been called, + # or the model may be initialized under the context manager `with torch.device("cuda"):`. + if check_device_map and device_map is None and torch.empty(0).device.type != "cuda": + if torch.cuda.is_available(): + logger.warning_once( + "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU" + " after initializing it on CPU with `model.to('cuda')`." + ) + else: + raise ValueError( + "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU and with no GPU available. " + "This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map " + "or initialising the model on CPU and then moving it to GPU." + ) + elif ( + check_device_map + and device_map is not None + and isinstance(device_map, dict) + and ("cpu" in device_map.values() or "disk" in device_map.values()) + ): + raise ValueError( + "You are attempting to use Flash Attention 2.0 with a model dispatched on CPU or disk. This is not supported. Please make sure to " + "initialise the model on a GPU by passing a device_map that contains only GPU devices as keys." + ) + if not hard_check_only: + config._attn_implementation = "flash_attention_2" + return config + + @classmethod + def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig: + """ + Checks the availability of SDPA for a given model. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. + """ + if hard_check_only: + if not cls._supports_sdpa: + raise ValueError( + f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." + " Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe" + ' this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`' + ) + if not is_torch_sdpa_available(): + raise ImportError( + "PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.1.1." + ) + + if not is_torch_sdpa_available() or not cls._supports_sdpa: + return config + + _is_bettertransformer = getattr(cls, "use_bettertransformer", False) + if _is_bettertransformer: + return config + + if not hard_check_only: + config._attn_implementation = "sdpa" + return config + + def enable_input_require_grads(self): + """ + Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping + the model weights fixed. + """ + + def make_inputs_require_grads(module, input, output): + output.requires_grad_(True) + + self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) + + def disable_input_require_grads(self): + """ + Removes the `_require_grads_hook`. + """ + self._require_grads_hook.remove() + + def get_input_embeddings(self) -> nn.Module: + """ + Returns the model's input embeddings. + + Returns: + `nn.Module`: A torch module mapping vocabulary to hidden states. + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + return base_model.get_input_embeddings() + else: + raise NotImplementedError + + def set_input_embeddings(self, value: nn.Module): + """ + Set model's input embeddings. + + Args: + value (`nn.Module`): A module mapping vocabulary to hidden states. + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + base_model.set_input_embeddings(value) + else: + raise NotImplementedError + + def get_output_embeddings(self) -> nn.Module: + """ + Returns the model's output embeddings. + + Returns: + `nn.Module`: A torch module mapping hidden states to vocabulary. + """ + return None # Overwrite for models with output embeddings + + def _init_weights(self, module): + """ + Initialize the weights. This method should be overridden by derived class and is + the only initialization method that will be called when loading a checkpoint + using `from_pretrained`. Any attempt to initialize outside of this function + will be useless as the torch.nn.init function are all replaced with skip. + """ + pass + + def _initialize_weights(self, module): + """ + Initialize the weights if they are not already initialized. + """ + if getattr(module, "_is_hf_initialized", False): + return + self._init_weights(module) + module._is_hf_initialized = True + + def tie_weights(self): + """ + Tie the weights between the input embeddings and the output embeddings. + + If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the + weights instead. + """ + if getattr(self.config, "tie_word_embeddings", True): + output_embeddings = self.get_output_embeddings() + if output_embeddings is not None: + self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) + + if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False): + if hasattr(self, self.base_model_prefix): + self = getattr(self, self.base_model_prefix) + tied_weights = self._tie_encoder_decoder_weights( + self.encoder, self.decoder, self.base_model_prefix, "encoder" + ) + # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class + # attributed not an instance member, therefore modifying it will modify the entire class + # Leading to issues on subsequent calls by different tests or subsequent calls. + self._dynamic_tied_weights_keys = tied_weights + + for module in self.modules(): + if hasattr(module, "_tie_weights"): + module._tie_weights() + + @staticmethod + def _tie_encoder_decoder_weights( + encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, base_encoder_name: str + ): + uninitialized_encoder_weights: List[str] = [] + tied_weights: List[str] = [] + if decoder.__class__ != encoder.__class__: + logger.info( + f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder" + " weights are correctly initialized." + ) + + def tie_encoder_to_decoder_recursively( + decoder_pointer: nn.Module, + encoder_pointer: nn.Module, + module_name: str, + base_encoder_name: str, + uninitialized_encoder_weights: List[str], + depth=0, + total_decoder_name="", + total_encoder_name="", + ): + assert isinstance(decoder_pointer, nn.Module) and isinstance( + encoder_pointer, nn.Module + ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" + if hasattr(decoder_pointer, "weight"): + assert hasattr(encoder_pointer, "weight") + encoder_pointer.weight = decoder_pointer.weight + tied_weights.append(f"{base_encoder_name}{total_encoder_name}.weight") + if hasattr(decoder_pointer, "bias"): + assert hasattr(encoder_pointer, "bias") + tied_weights.append(f"{base_encoder_name}{total_encoder_name}.bias") + encoder_pointer.bias = decoder_pointer.bias + return + + encoder_modules = encoder_pointer._modules + decoder_modules = decoder_pointer._modules + if len(decoder_modules) > 0: + assert ( + len(encoder_modules) > 0 + ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" + + all_encoder_weights = {module_name + "/" + sub_name for sub_name in encoder_modules.keys()} + encoder_layer_pos = 0 + for name, module in decoder_modules.items(): + if name.isdigit(): + encoder_name = str(int(name) + encoder_layer_pos) + decoder_name = name + if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len( + encoder_modules + ) != len(decoder_modules): + # this can happen if the name corresponds to the position in a list module list of layers + # in this case the decoder has added a cross-attention that the encoder does not have + # thus skip this step and subtract one layer pos from encoder + encoder_layer_pos -= 1 + continue + elif name not in encoder_modules: + continue + elif depth > 500: + raise ValueError( + "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is" + " a circular dependency between two or more `nn.Modules` of your model." + ) + else: + decoder_name = encoder_name = name + tie_encoder_to_decoder_recursively( + decoder_modules[decoder_name], + encoder_modules[encoder_name], + module_name + "/" + name, + base_encoder_name, + uninitialized_encoder_weights, + depth=depth + 1, + total_encoder_name=f"{total_encoder_name}.{encoder_name}", + total_decoder_name=f"{total_decoder_name}.{decoder_name}", + ) + all_encoder_weights.remove(module_name + "/" + encoder_name) + + uninitialized_encoder_weights += list(all_encoder_weights) + + # tie weights recursively + tie_encoder_to_decoder_recursively( + decoder, encoder, base_model_prefix, base_encoder_name, uninitialized_encoder_weights + ) + + if len(uninitialized_encoder_weights) > 0: + logger.warning( + f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" + ) + return tied_weights + + def _tie_or_clone_weights(self, output_embeddings, input_embeddings): + """Tie or clone module weights depending of whether we are using TorchScript or not""" + if self.config.torchscript: + output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) + else: + output_embeddings.weight = input_embeddings.weight + + if getattr(output_embeddings, "bias", None) is not None: + output_embeddings.bias.data = nn.functional.pad( + output_embeddings.bias.data, + ( + 0, + output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], + ), + "constant", + 0, + ) + if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): + output_embeddings.out_features = input_embeddings.num_embeddings + + def _get_no_split_modules(self, device_map: str): + """ + Get the modules of the model that should not be spit when using device_map. We iterate through the modules to + get the underlying `_no_split_modules`. + + Args: + device_map (`str`): + The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] + + Returns: + `List[str]`: List of modules that should not be split + """ + _no_split_modules = set() + modules_to_check = [self] + while len(modules_to_check) > 0: + module = modules_to_check.pop(-1) + # if the module does not appear in _no_split_modules, we also check the children + if module.__class__.__name__ not in _no_split_modules: + if isinstance(module, PreTrainedModel): + if module._no_split_modules is None: + raise ValueError( + f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " + "class needs to implement the `_no_split_modules` attribute." + ) + else: + _no_split_modules = _no_split_modules | set(module._no_split_modules) + modules_to_check += list(module.children()) + return list(_no_split_modules) + + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None + ) -> nn.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + new_num_tokens (`int`, *optional*): + The new number of tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + Return: + `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. + """ + model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + if new_num_tokens is None and pad_to_multiple_of is None: + return model_embeds + + # Update base model and current model config + self.config.vocab_size = model_embeds.weight.shape[0] + self.vocab_size = model_embeds.weight.shape[0] + + # Tie weights again if needed + self.tie_weights() + + return model_embeds + + def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) + if hasattr(old_embeddings, "_hf_hook"): + hook = old_embeddings._hf_hook + add_hook_to_module(new_embeddings, hook) + old_embeddings_requires_grad = old_embeddings.weight.requires_grad + new_embeddings.requires_grad_(old_embeddings_requires_grad) + self.set_input_embeddings(new_embeddings) + is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None + + # Update new_num_tokens with the actual size of new_embeddings + if pad_to_multiple_of is not None: + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): + new_num_tokens = new_embeddings.weight.shape[0] + else: + new_num_tokens = new_embeddings.weight.shape[0] + + # if word embeddings are not tied, make sure that lm head is resized as well + if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: + old_lm_head = self.get_output_embeddings() + if isinstance(old_lm_head, torch.nn.Embedding): + new_lm_head = self._get_resized_embeddings(old_lm_head, new_num_tokens) + else: + new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) + if hasattr(old_lm_head, "_hf_hook"): + hook = old_lm_head._hf_hook + add_hook_to_module(new_lm_head, hook) + old_lm_head_requires_grad = old_lm_head.weight.requires_grad + new_lm_head.requires_grad_(old_lm_head_requires_grad) + self.set_output_embeddings(new_lm_head) + + return self.get_input_embeddings() + + def _get_resized_embeddings( + self, + old_embeddings: nn.Embedding, + new_num_tokens: Optional[int] = None, + pad_to_multiple_of: Optional[int] = None, + ) -> nn.Embedding: + """ + Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly + initialized vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_embeddings (`torch.nn.Embedding`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to + `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + + Return: + `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if + `new_num_tokens` is `None` + """ + + if pad_to_multiple_of is not None: + if not isinstance(pad_to_multiple_of, int): + raise ValueError( + f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer" + ) + if new_num_tokens is None: + new_num_tokens = old_embeddings.weight.shape[0] + new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of + else: + logger.info( + "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" + f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." + " For more details about this, or help on choosing the correct value for resizing, refer to this guide:" + " https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc" + ) + + if new_num_tokens is None: + return old_embeddings + + is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + else: + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + + if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + return old_embeddings + + if not isinstance(old_embeddings, nn.Embedding): + raise TypeError( + f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You" + " should either use a different resize function or make sure that `old_embeddings` are an instance of" + f" {nn.Embedding}." + ) + + # Build new embeddings + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_embeddings = nn.Embedding( + new_num_tokens, + old_embedding_dim, + device=old_embeddings.weight.device, + dtype=old_embeddings.weight.dtype, + ) + + # initialize all new embeddings (in particular added tokens) + self._init_weights(new_embeddings) + + # Copy token embeddings from the previous weights + + # numbers of tokens to copy + n = min(old_num_tokens, new_num_tokens) + + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + params = [old_embeddings.weight, new_embeddings.weight] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + else: + new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] + + return new_embeddings + + def _get_resized_lm_head( + self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False + ) -> nn.Linear: + """ + Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end + + Args: + old_lm_head (`torch.nn.Linear`): + Old lm head liner layer to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the linear matrix. + + Increasing the size will add newly initialized vectors at the end. Reducing the size will remove + vectors from the end. If not provided or `None`, just returns a pointer to the input tokens + `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults + to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, + vocab_size` else `vocab_size, lm_head_dim`. + + Return: + `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is + `None` + """ + if new_num_tokens is None: + return old_lm_head + + is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + else: + old_num_tokens, old_lm_head_dim = ( + old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + ) + + if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): + return old_lm_head + + if not isinstance(old_lm_head, nn.Linear): + raise TypeError( + f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You" + " should either use a different resize function or make sure that `old_lm_head` are an instance of" + f" {nn.Linear}." + ) + + # Build new lm head + new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) + has_new_lm_head_bias = old_lm_head.bias is not None + + # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init + # because the shape of the new embedding layer is used across various modeling files + # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading + # to errors when training. + new_lm_head = nn.Linear( + *new_lm_head_shape, + bias=has_new_lm_head_bias, + device=old_lm_head.weight.device, + dtype=old_lm_head.weight.dtype, + ) + + # initialize new lm head (in particular added tokens) + self._init_weights(new_lm_head) + + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] + with deepspeed.zero.GatheredParameters(params, modifier_rank=0): + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + else: + self._copy_lm_head_original_to_resized( + new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ) + + return new_lm_head + + def _copy_lm_head_original_to_resized( + self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias + ): + # Copy old lm head weights to new lm head + if not transposed: + new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] + else: + new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] + + # Copy bias weights to new lm head + if has_new_lm_head_bias: + new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] + + def resize_position_embeddings(self, new_num_position_embeddings: int): + raise NotImplementedError( + f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " + f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" + ) + + def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: + raise NotImplementedError( + f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " + f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" + ) + + def init_weights(self): + """ + If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any + initialization logic in `_init_weights`. + """ + # Prune heads if needed + if self.config.pruned_heads: + self.prune_heads(self.config.pruned_heads) + + if _init_weights: + # Initialize weights + self.apply(self._initialize_weights) + + # Tie weights should be skipped when not initializing all weights + # since from_pretrained(...) calls tie weights anyways + self.tie_weights() + + def prune_heads(self, heads_to_prune: Dict[int, List[int]]): + """ + Prunes heads of the base model. + + Arguments: + heads_to_prune (`Dict[int, List[int]]`): + Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads + to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on + layer 1 and heads 2 and 3 on layer 2. + """ + # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads + for layer, heads in heads_to_prune.items(): + union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) + self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON + + self.base_model._prune_heads(heads_to_prune) + + def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): + """ + Activates gradient checkpointing for the current model. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + + We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of + the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + + Args: + gradient_checkpointing_kwargs (dict, *optional*): + Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. + """ + if not self.supports_gradient_checkpointing: + raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") + + if gradient_checkpointing_kwargs is None: + gradient_checkpointing_kwargs = {"use_reentrant": True} + + gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) + + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` method + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) + else: + self.apply(partial(self._set_gradient_checkpointing, value=True)) + logger.warning( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + + if getattr(self, "_hf_peft_config_loaded", False): + # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True + # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 + # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate + # the gradients to make sure the gradient flows. + self.enable_input_require_grads() + + def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint): + is_gradient_checkpointing_set = False + + # Apply it on the top-level module in case the top-level modules supports it + # for example, LongT5Stack inherits from `PreTrainedModel`. + if hasattr(self, "gradient_checkpointing"): + self._gradient_checkpointing_func = gradient_checkpointing_func + self.gradient_checkpointing = enable + is_gradient_checkpointing_set = True + + for module in self.modules(): + if hasattr(module, "gradient_checkpointing"): + module._gradient_checkpointing_func = gradient_checkpointing_func + module.gradient_checkpointing = enable + is_gradient_checkpointing_set = True + + if not is_gradient_checkpointing_set: + raise ValueError( + f"{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute" + " `gradient_checkpointing` to modules of the model that uses checkpointing." + ) + + def gradient_checkpointing_disable(self): + """ + Deactivates gradient checkpointing for the current model. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + """ + if self.supports_gradient_checkpointing: + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` methid + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=False) + else: + logger.warning( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + self.apply(partial(self._set_gradient_checkpointing, value=False)) + + if getattr(self, "_hf_peft_config_loaded", False): + self.disable_input_require_grads() + + @property + def is_gradient_checkpointing(self) -> bool: + """ + Whether gradient checkpointing is activated for this model or not. + + Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint + activations". + """ + return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + state_dict: Optional[dict] = None, + save_function: Callable = torch.save, + push_to_hub: bool = False, + max_shard_size: Union[int, str] = "5GB", + safe_serialization: bool = True, + variant: Optional[str] = None, + token: Optional[Union[str, bool]] = None, + save_peft_format: bool = True, + **kwargs, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + [`~PreTrainedModel.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + state_dict (nested dictionary of `torch.Tensor`): + The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only + save parts of the model or if special precautions need to be taken when recovering the state dictionary + of a model (like when using model parallelism). + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + We default it to 5GB in order for models to be able to run easily on free-tier google colab instances + without CPU OOM issues. + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + save_peft_format (`bool`, *optional*, defaults to `True`): + For backward compatibility with PEFT library, in case adapter weights are attached to the model, all + keys of the state dict of adapters needs to be pre-pended with `base_model.model`. Advanced users can + disable this behaviours by setting `save_peft_format` to `False`. + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + ignore_metadata_errors = kwargs.pop("ignore_metadata_errors", False) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None: + kwargs["token"] = token + + _hf_peft_config_loaded = getattr(self, "_hf_peft_config_loaded", False) + + hf_quantizer = getattr(self, "hf_quantizer", None) + quantization_serializable = ( + hf_quantizer is not None and isinstance(hf_quantizer, HfQuantizer) and hf_quantizer.is_serializable + ) + + if hf_quantizer is not None and not _hf_peft_config_loaded and not quantization_serializable: + raise ValueError( + f"The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from" + " the logger on the traceback to understand the reason why the quantized model is not serializable." + ) + + if "save_config" in kwargs: + warnings.warn( + "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead." + ) + is_main_process = kwargs.pop("save_config") + if safe_serialization and not is_safetensors_available(): + raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + # Only save the model itself if we are using distributed training + model_to_save = unwrap_model(self) + + # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" + # we currently don't use this setting automatically, but may start to use with v5 + dtype = get_parameter_dtype(model_to_save) + model_to_save.config.torch_dtype = str(dtype).split(".")[1] + + # Attach architecture to the config + model_to_save.config.architectures = [model_to_save.__class__.__name__] + + # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=self.config) + + # Save the config + if is_main_process: + if not _hf_peft_config_loaded: + model_to_save.config.save_pretrained(save_directory) + if self.can_generate(): + # generation config built from the model config + the model config holds generation kwargs -> generate + # may revert to legacy behavior if the two don't match + if ( + model_to_save.generation_config._from_model_config + and model_to_save.config._has_non_default_generation_parameters() + ): + new_generation_config = GenerationConfig.from_model_config(model_to_save.config) + if new_generation_config != model_to_save.generation_config: + logger.warning( + "Your generation config was originally created from the model config, but the model " + "config has changed since then. Unless you pass the `generation_config` argument to this " + "model's `generate` calls, they will revert to the legacy behavior where the base " + "`generate` parameterization is loaded from the model config instead. " + "To avoid this behavior and this warning, we recommend you to overwrite the generation " + "config model attribute before calling the model's `save_pretrained`, preferably also " + "removing any generation kwargs from the model config. This warning will be raised to an " + "exception in v4.41." + ) + model_to_save.generation_config.save_pretrained(save_directory) + + if _hf_peft_config_loaded: + logger.info( + "Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved." + ) + state_dict = model_to_save.get_adapter_state_dict() + + if save_peft_format: + logger.info( + "To match the expected format of the PEFT library, all keys of the state dict of adapters will be pre-pended with `base_model.model`." + ) + peft_state_dict = {} + for key, value in state_dict.items(): + peft_state_dict[f"base_model.model.{key}"] = value + state_dict = peft_state_dict + + active_adapter = self.active_adapters() + + if len(active_adapter) > 1: + raise ValueError( + "Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one " + "by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`" + ) + active_adapter = active_adapter[0] + + current_peft_config = self.peft_config[active_adapter] + current_peft_config.save_pretrained(save_directory) + + # Save the model + if state_dict is None: + state_dict = model_to_save.state_dict() + + # Translate state_dict from smp to hf if saving with smp >= 1.10 + if IS_SAGEMAKER_MP_POST_1_10: + for smp_to_hf, _ in smp.state.module_manager.translate_functions: + state_dict = smp_to_hf(state_dict) + + # Handle the case where some state_dict keys shouldn't be saved + if self._keys_to_ignore_on_save is not None: + for ignore_key in self._keys_to_ignore_on_save: + if ignore_key in state_dict.keys(): + del state_dict[ignore_key] + if safe_serialization: + # Safetensors does not allow tensor aliasing. + # We're going to remove aliases before saving + ptrs = collections.defaultdict(list) + for name, tensor in state_dict.items(): + # Sometimes in the state_dict we have non-tensor objects. + # e.g. in bitsandbytes we have some `str` objects in the state_dict + if isinstance(tensor, torch.Tensor): + ptrs[id_tensor_storage(tensor)].append(name) + else: + # In the non-tensor case, fall back to the pointer of the object itself + ptrs[id(tensor)].append(name) + + # These are all the pointers of shared tensors. + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + error_names = [] + to_delete_names = set() + # Recursively descend to find tied weight keys + _tied_weights_keys = _get_tied_weight_keys(self) + for names in shared_ptrs.values(): + # Removing the keys which are declared as known duplicates on + # load. This allows to make sure the name which is kept is consistent. + if _tied_weights_keys is not None: + found = 0 + for name in sorted(names): + matches_pattern = any(re.search(pat, name) for pat in _tied_weights_keys) + if matches_pattern and name in state_dict: + found += 1 + if found < len(names): + to_delete_names.add(name) + # We are entering a place where the weights and the transformers configuration do NOT match. + shared_names, disjoint_names = _find_disjoint(shared_ptrs.values(), state_dict) + # Those are actually tensor sharing but disjoint from each other, we can safely clone them + # Reloaded won't have the same property, but it shouldn't matter in any meaningful way. + for name in disjoint_names: + state_dict[name] = state_dict[name].clone() + + # When not all duplicates have been cleaned, still remove those keys, but put a clear warning. + # If the link between tensors was done at runtime then `from_pretrained` will not get + # the key back leading to random tensor. A proper warning will be shown + # during reload (if applicable), but since the file is not necessarily compatible with + # the config, better show a proper warning. + shared_names, identical_names = _find_identical(shared_names, state_dict) + # delete tensors that have identical storage + for inames in identical_names: + known = inames.intersection(to_delete_names) + for name in known: + del state_dict[name] + unknown = inames.difference(to_delete_names) + if len(unknown) > 1: + error_names.append(unknown) + + if shared_names: + error_names.append(set(shared_names)) + + if len(error_names) > 0: + raise RuntimeError( + f"The weights trying to be saved contained shared tensors {error_names} that are mismatching the transformers base configuration. Try saving using `safe_serialization=False` or remove this tensor sharing.", + ) + + # Shard the model if it is too big. + if not _hf_peft_config_loaded: + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + weights_name = _add_variant(weights_name, variant) + else: + weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME + + shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") + + # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 + filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "") + reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") + + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + and is_main_process + and reg.fullmatch(filename_no_suffix) is not None + ): + os.remove(full_filename) + + # Save the model + for shard_file, shard in shards.items(): + if safe_serialization: + # At some point we will need to deal better with save_function (used for TPU and other distributed + # joyfulness), but for now this enough. + safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"}) + else: + save_function(shard, os.path.join(save_directory, shard_file)) + + if index is None: + path_to_weights = os.path.join(save_directory, weights_name) + logger.info(f"Model weights saved in {path_to_weights}") + else: + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + + if push_to_hub: + # Eventually create an empty model card + model_card = create_and_tag_model_card( + repo_id, self.model_tags, token=token, ignore_metadata_errors=ignore_metadata_errors + ) + + # Update model card if needed: + model_card.save(os.path.join(save_directory, "README.md")) + + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=token, + ) + + @wraps(PushToHubMixin.push_to_hub) + def push_to_hub(self, *args, **kwargs): + tags = self.model_tags if self.model_tags is not None else [] + + tags_kwargs = kwargs.get("tags", []) + if isinstance(tags_kwargs, str): + tags_kwargs = [tags_kwargs] + + for tag in tags_kwargs: + if tag not in tags: + tags.append(tag) + + if tags: + kwargs["tags"] = tags + return super().push_to_hub(*args, **kwargs) + + def get_memory_footprint(self, return_buffers=True): + r""" + Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. + Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the + PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 + + Arguments: + return_buffers (`bool`, *optional*, defaults to `True`): + Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers + are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch + norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 + """ + mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) + if return_buffers: + mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) + mem = mem + mem_bufs + return mem + + @wraps(torch.nn.Module.cuda) + def cuda(self, *args, **kwargs): + # Checks if the model has been loaded in 8-bit + if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + raise ValueError( + "Calling `cuda()` is not supported for `4-bit` or `8-bit` quantized models. Please use the model as it is, since the" + " model has already been set to the correct devices and casted to the correct `dtype`." + ) + else: + return super().cuda(*args, **kwargs) + + @wraps(torch.nn.Module.to) + def to(self, *args, **kwargs): + # Checks if the model has been loaded in 8-bit + if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: + raise ValueError( + "`.to` is not supported for `4-bit` or `8-bit` bitsandbytes models. Please use the model as it is, since the" + " model has already been set to the correct devices and casted to the correct `dtype`." + ) + elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ: + # For GPTQ models, we prevent users from casting the model to another dytpe to restrict unwanted behaviours. + # the correct API should be to load the model with the desired dtype directly through `from_pretrained`. + dtype_present_in_args = False + + if "dtype" not in kwargs: + for arg in args: + if isinstance(arg, torch.dtype): + dtype_present_in_args = True + break + else: + dtype_present_in_args = True + + if dtype_present_in_args: + raise ValueError( + "You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired" + " `dtype` by passing the correct `torch_dtype` argument." + ) + return super().to(*args, **kwargs) + + def half(self, *args): + # Checks if the model is quantized + if getattr(self, "is_quantized", False): + raise ValueError( + "`.half()` is not supported for quantized model. Please use the model as it is, since the" + " model has already been casted to the correct `dtype`." + ) + else: + return super().half(*args) + + def float(self, *args): + # Checks if the model is quantized + if getattr(self, "is_quantized", False): + raise ValueError( + "`.float()` is not supported for quantized model. Please use the model as it is, since the" + " model has already been casted to the correct `dtype`." + ) + else: + return super().float(*args) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + *model_args, + config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + ignore_mismatched_sizes: bool = False, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + use_safetensors: bool = None, + **kwargs, + ): + r""" + Instantiate a pretrained pytorch model from a pre-trained model configuration. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g, + `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to + `True`. + - `None` if you are both providing the configuration and state dictionary (resp. with keyword + arguments `config` and `state_dict`). + model_args (sequence of positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): + Can be either: + + - an instance of a class derived from [`PretrainedConfig`], + - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. + + Configuration for the model to use instead of an automatically loaded configuration. Configuration can + be automatically loaded when: + + - The model is a model provided by the library (loaded with the *model id* string of a pretrained + model). + - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the + save directory. + - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a + configuration JSON file named *config.json* is found in the directory. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + A state dictionary to use instead of a state dictionary loaded from saved weights file. + + This option can be used if you want to create a model from a pretrained configuration but load your own + weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and + [`~PreTrainedModel.from_pretrained`] is not a simpler option. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + from_tf (`bool`, *optional*, defaults to `False`): + Load the model weights from a TensorFlow checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + from_flax (`bool`, *optional*, defaults to `False`): + Load the model weights from a Flax checkpoint save file (see docstring of + `pretrained_model_name_or_path` argument). + ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): + Whether or not to raise an error if some of the weights from the checkpoint do not have the same size + as the weights of the model (if for instance, you are instantiating a model with 10 labels from a + checkpoint with 3 labels). + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + mirror (`str`, *optional*): + Mirror source to accelerate downloads in China. If you are from China and have an accessibility + problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. + Please refer to the mirror site for more information. + _fast_init(`bool`, *optional*, defaults to `True`): + Whether or not to disable fast initialization. + + + + One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ < + 4.6.0` for seeded model initialization. This argument will be removed at the next major version. See + [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information. + + + attn_implementation (`str`, *optional*): + The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. + + > Parameters for big model inference + + low_cpu_mem_usage(`bool`, *optional*): + Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + This is an experimental feature and a subject to change at any moment. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under a specific `dtype`. The different options + are: + + 1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified + `dtype`, ignoring the model's `config.torch_dtype` if one exists. If not specified + - the model will get loaded in `torch.float` (fp32). + + 2. `"auto"` - A `torch_dtype` entry in the `config.json` file of the model will be + attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in + the checkpoint that's of a floating point type and use that as `dtype`. This will load the model + using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how + the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. + + + + For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or + reach out to the authors and ask them to add this information to the model's card and to insert the + `torch_dtype` entry in `config.json` on the hub. + + + + device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank + like `1`) on which the model will be allocated, the device map will map the entire model to this + device. Passing `device_map = 0` means put the whole model on GPU 0. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_state_dict (`bool`, *optional*): + If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU + RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to + `True` when there is some disk offload. + offload_buffers (`bool`, *optional*): + Whether or not to offload the buffers with the model parameters. + quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): + A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g + bitsandbytes, gptq). There may be other quantization-related kwargs, including `load_in_4bit` and + `load_in_8bit`, which are parsed by QuantizationConfigParser. Supported only for bitsandbytes + quantizations and not preferred. consider inserting all such arguments into quantization_config + instead. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_tf` or `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` + is not installed, it will be set to `False`. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `**kwargs` will be directly passed to the + underlying model's `__init__` method (we assume all relevant updates to the configuration have + already been done) + - If a configuration is not provided, `kwargs` will be first passed to the configuration class + initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that + corresponds to a configuration attribute will be used to override said attribute with the + supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute + will be passed to the underlying model's `__init__` function. + + + + Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to + use this method in a firewalled environment. + + + + Examples: + + ```python + >>> from transformers import BertConfig, BertModel + + >>> # Download model and configuration from huggingface.co and cache. + >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased") + >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). + >>> model = BertModel.from_pretrained("./test/saved_model/") + >>> # Update configuration during loading. + >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) + >>> assert model.config.output_attentions == True + >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). + >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") + >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) + >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower) + >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", from_flax=True) + ``` + + * `low_cpu_mem_usage` algorithm: + + This is an experimental function that loads the model using ~1x model size CPU memory + + Here is how it works: + + 1. save which state_dict keys we have + 2. drop state_dict before the model is created, since the latter takes 1x model size CPU memory + 3. after the model has been instantiated switch to the meta device all params/buffers that + are going to be replaced from the loaded state_dict + 4. load state_dict 2nd time + 5. replace the params/buffers from the state_dict + + Currently, it can't handle deepspeed ZeRO stage 3 and ignores loading errors + + """ + state_dict = kwargs.pop("state_dict", None) + from_tf = kwargs.pop("from_tf", False) + from_flax = kwargs.pop("from_flax", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + output_loading_info = kwargs.pop("output_loading_info", False) + use_auth_token = kwargs.pop("use_auth_token", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + _ = kwargs.pop("mirror", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + _fast_init = kwargs.pop("_fast_init", True) + torch_dtype = kwargs.pop("torch_dtype", None) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", None) + device_map = kwargs.pop("device_map", None) + max_memory = kwargs.pop("max_memory", None) + offload_folder = kwargs.pop("offload_folder", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + offload_buffers = kwargs.pop("offload_buffers", False) + load_in_8bit = kwargs.pop("load_in_8bit", False) + load_in_4bit = kwargs.pop("load_in_4bit", False) + quantization_config = kwargs.pop("quantization_config", None) + subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) + variant = kwargs.pop("variant", None) + adapter_kwargs = kwargs.pop("adapter_kwargs", {}) + adapter_name = kwargs.pop("adapter_name", "default") + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) + + if is_fsdp_enabled(): + low_cpu_mem_usage = True + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + if token is not None and adapter_kwargs is not None and "token" not in adapter_kwargs: + adapter_kwargs["token"] = token + + if use_safetensors is None and not is_safetensors_available(): + use_safetensors = False + if trust_remote_code is True: + logger.warning( + "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" + " ignored." + ) + + if commit_hash is None: + if not isinstance(config, PretrainedConfig): + # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible + resolved_config_file = cached_file( + pretrained_model_name_or_path, + CONFIG_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + else: + commit_hash = getattr(config, "_commit_hash", None) + + if is_peft_available(): + _adapter_model_path = adapter_kwargs.pop("_adapter_model_path", None) + + if _adapter_model_path is None: + _adapter_model_path = find_adapter_config_file( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + _commit_hash=commit_hash, + **adapter_kwargs, + ) + if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): + with open(_adapter_model_path, "r", encoding="utf-8") as f: + _adapter_model_path = pretrained_model_name_or_path + pretrained_model_name_or_path = json.load(f)["base_model_name_or_path"] + else: + _adapter_model_path = None + + # change device_map into a map if we passed an int, a str or a torch.device + if isinstance(device_map, torch.device): + device_map = {"": device_map} + elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + try: + device_map = {"": torch.device(device_map)} + except RuntimeError: + raise ValueError( + "When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or " + f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}." + ) + elif isinstance(device_map, int): + if device_map < 0: + raise ValueError( + "You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' " + ) + else: + device_map = {"": device_map} + + if device_map is not None: + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + elif not low_cpu_mem_usage: + raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`") + + if low_cpu_mem_usage: + if is_deepspeed_zero3_enabled(): + raise ValueError( + "DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`." + ) + elif not is_accelerate_available(): + raise ImportError( + "Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install accelerate`" + ) + + # handling bnb config from kwargs, remove after `load_in_{4/8}bit` deprecation. + if load_in_4bit or load_in_8bit: + if quantization_config is not None: + raise ValueError( + "You can't pass `load_in_4bit`or `load_in_8bit` as a kwarg when passing " + "`quantization_config` argument at the same time." + ) + + # preparing BitsAndBytesConfig from kwargs + config_dict = {k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters} + config_dict = {**config_dict, "load_in_4bit": load_in_4bit, "load_in_8bit": load_in_8bit} + quantization_config, kwargs = BitsAndBytesConfig.from_dict( + config_dict=config_dict, return_unused_kwargs=True, **kwargs + ) + logger.warning( + "The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. " + "Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead." + ) + + from_pt = not (from_tf | from_flax) + + user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Load config if we don't provide a configuration + if not isinstance(config, PretrainedConfig): + config_path = config if config is not None else pretrained_model_name_or_path + config, model_kwargs = cls.config_class.from_pretrained( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + else: + # In case one passes a config to `from_pretrained` + "attn_implementation" + # override the `_attn_implementation` attribute to `attn_implementation` of the kwargs + # Please see: https://github.com/huggingface/transformers/issues/28038 + + # Overwrite `config._attn_implementation` by the one from the kwargs --> in auto-factory + # we pop attn_implementation from the kwargs but this handles the case where users + # passes manually the config to `from_pretrained`. + config = copy.deepcopy(config) + + kwarg_attn_imp = kwargs.pop("attn_implementation", None) + if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp: + config._attn_implementation = kwarg_attn_imp + model_kwargs = kwargs + + pre_quantized = getattr(config, "quantization_config", None) is not None + if pre_quantized or quantization_config is not None: + if pre_quantized: + config.quantization_config = AutoHfQuantizer.merge_quantization_configs( + config.quantization_config, quantization_config + ) + else: + config.quantization_config = quantization_config + hf_quantizer = AutoHfQuantizer.from_config(config.quantization_config, pre_quantized=pre_quantized) + else: + hf_quantizer = None + + if hf_quantizer is not None: + hf_quantizer.validate_environment( + torch_dtype=torch_dtype, from_tf=from_tf, from_flax=from_flax, device_map=device_map + ) + torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype) + device_map = hf_quantizer.update_device_map(device_map) + + # Force-set to `True` for more mem efficiency + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True + logger.warning("`low_cpu_mem_usage` was None, now set to True since model is quantized.") + is_quantized = hf_quantizer is not None + + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the + # index of the files. + is_sharded = False + sharded_metadata = None + # Load model + loading_info = None + + # Keep in fp32 modules + keep_in_fp32_modules = None + use_keep_in_fp32_modules = False + + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: + if from_tf and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + ): + # Load from a TF 1.0 checkpoint in priority if from_tf + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + elif from_tf and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) + ): + # Load from a TF 2.0 checkpoint in priority if from_tf + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) + elif from_flax and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + ): + # Load from a Flax checkpoint in priority if from_flax + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + elif use_safetensors is not False and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) + ) + elif use_safetensors is not False and os.path.isfile( + os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + ) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + ) + is_sharded = True + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) + ): + # Load from a PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant) + ) + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) + ): + # Load from a sharded PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant) + ) + is_sharded = True + # At this stage we don't have a weight file so we will raise an error. + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") + ) or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use" + " `from_tf=True` to load this model from those weights." + ) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`" + " to load this model from those weights." + ) + elif use_safetensors: + raise EnvironmentError( + f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory" + f" {pretrained_model_name_or_path}." + ) + else: + raise EnvironmentError( + f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}," + f" {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory" + f" {pretrained_model_name_or_path}." + ) + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): + archive_file = pretrained_model_name_or_path + is_local = True + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")): + if not from_tf: + raise ValueError( + f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " + "from_tf to True to load from this checkpoint." + ) + archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") + is_local = True + elif is_remote_url(pretrained_model_name_or_path): + filename = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) + else: + # set correct filename + if from_tf: + filename = TF2_WEIGHTS_NAME + elif from_flax: + filename = FLAX_WEIGHTS_NAME + elif use_safetensors is not False: + filename = _add_variant(SAFE_WEIGHTS_NAME, variant) + else: + filename = _add_variant(WEIGHTS_NAME, variant) + + try: + # Load from URL or cache if already cached + cached_file_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "resume_download": resume_download, + "local_files_only": local_files_only, + "token": token, + "user_agent": user_agent, + "revision": revision, + "subfolder": subfolder, + "_raise_exceptions_for_gated_repo": False, + "_raise_exceptions_for_missing_entries": False, + "_commit_hash": commit_hash, + } + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) + + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant): + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, + _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant), + **cached_file_kwargs, + ) + if resolved_archive_file is not None: + is_sharded = True + elif use_safetensors: + if revision == "main": + resolved_archive_file, revision, is_sharded = auto_conversion( + pretrained_model_name_or_path, **cached_file_kwargs + ) + cached_file_kwargs["revision"] = revision + if resolved_archive_file is None: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} " + "and thus cannot be loaded with `safetensors`. Please make sure that the model has " + "been saved with `safe_serialization=True` or do not set `use_safetensors=True`." + ) + else: + # This repo has no safetensors file of any kind, we switch to PyTorch. + filename = _add_variant(WEIGHTS_NAME, variant) + resolved_archive_file = cached_file( + pretrained_model_name_or_path, filename, **cached_file_kwargs + ) + if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant): + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, + _add_variant(WEIGHTS_INDEX_NAME, variant), + **cached_file_kwargs, + ) + if resolved_archive_file is not None: + is_sharded = True + + if resolved_archive_file is not None: + if filename in [WEIGHTS_NAME, WEIGHTS_INDEX_NAME]: + # If the PyTorch file was found, check if there is a safetensors file on the repository + # If there is no safetensors file on the repositories, start an auto conversion + safe_weights_name = SAFE_WEIGHTS_INDEX_NAME if is_sharded else SAFE_WEIGHTS_NAME + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "token": token, + } + cached_file_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "resume_download": resume_download, + "local_files_only": local_files_only, + "user_agent": user_agent, + "subfolder": subfolder, + "_raise_exceptions_for_gated_repo": False, + "_raise_exceptions_for_missing_entries": False, + "_commit_hash": commit_hash, + **has_file_kwargs, + } + if not has_file(pretrained_model_name_or_path, safe_weights_name, **has_file_kwargs): + Thread( + target=auto_conversion, + args=(pretrained_model_name_or_path,), + kwargs={"ignore_errors_during_conversion": True, **cached_file_kwargs}, + name="Thread-autoconversion", + ).start() + else: + # Otherwise, no PyTorch file was found, maybe there is a TF or Flax model file. + # We try those to give a helpful error message. + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "token": token, + } + if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights." + " Use `from_tf=True` to load this model from those weights." + ) + elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use" + " `from_flax=True` to load this model from those weights." + ) + elif variant is not None and has_file( + pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs + ): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant" + f" {variant}. Use `variant=None` to load this model from those weights." + ) + else: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or" + f" {FLAX_WEIGHTS_NAME}." + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception as e: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}," + f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." + ) from e + + if is_local: + logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file + else: + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") + else: + resolved_archive_file = None + + # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. + if is_sharded: + # rsolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. + resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( + pretrained_model_name_or_path, + resolved_archive_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + + if ( + is_safetensors_available() + and isinstance(resolved_archive_file, str) + and resolved_archive_file.endswith(".safetensors") + ): + with safe_open(resolved_archive_file, framework="pt") as f: + metadata = f.metadata() + + if metadata.get("format") == "pt": + pass + elif metadata.get("format") == "tf": + from_tf = True + logger.info("A TensorFlow safetensors file is being loaded in a PyTorch model.") + elif metadata.get("format") == "flax": + from_flax = True + logger.info("A Flax safetensors file is being loaded in a PyTorch model.") + elif metadata.get("format") == "mlx": + # This is a mlx file, we assume weights are compatible with pt + pass + else: + raise ValueError( + f"Incompatible safetensors file. File metadata is not ['pt', 'tf', 'flax', 'mlx'] but {metadata.get('format')}" + ) + + from_pt = not (from_tf | from_flax) + + # load pt weights early so that we know which dtype to init the model under + if from_pt: + if not is_sharded and state_dict is None: + # Time to load the checkpoint + state_dict = load_state_dict(resolved_archive_file) + + # set dtype to instantiate the model under: + # 1. If torch_dtype is not None, we use that dtype + # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first + # weights entry that is of a floating type - we assume all floating dtype weights are of the same dtype + # we also may have config.torch_dtype available, but we won't rely on it till v5 + dtype_orig = None + + if torch_dtype is not None: + if isinstance(torch_dtype, str): + if torch_dtype == "auto": + if hasattr(config, "torch_dtype") and config.torch_dtype is not None: + torch_dtype = config.torch_dtype + logger.info(f"Will use torch_dtype={torch_dtype} as defined in model's config object") + else: + if is_sharded and "dtype" in sharded_metadata: + torch_dtype = sharded_metadata["dtype"] + elif not is_sharded: + torch_dtype = get_state_dict_dtype(state_dict) + else: + one_state_dict = load_state_dict(resolved_archive_file[0]) + torch_dtype = get_state_dict_dtype(one_state_dict) + del one_state_dict # free CPU memory + logger.info( + "Since the `torch_dtype` attribute can't be found in model's config object, " + "will use torch_dtype={torch_dtype} as derived from model's weights" + ) + else: + raise ValueError( + f'`torch_dtype` can be either `torch.dtype` or `"auto"`, but received {torch_dtype}' + ) + dtype_orig = cls._set_default_torch_dtype(torch_dtype) + + # Check if `_keep_in_fp32_modules` is not None + use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and ( + (torch_dtype == torch.float16) or hasattr(hf_quantizer, "use_keep_in_fp32_modules") + ) + + if is_sharded: + loaded_state_dict_keys = sharded_metadata["all_checkpoint_keys"] + else: + loaded_state_dict_keys = list(state_dict.keys()) + if low_cpu_mem_usage or (use_keep_in_fp32_modules and is_accelerate_available()): + # In case some weights need to be kept in float32 and accelerate is not installed, + # we later on want to take the path where state_dict is not None, that is the one + # that do not require accelerate. + state_dict = None + + config.name_or_path = pretrained_model_name_or_path + + # Instantiate model. + init_contexts = [no_init_weights(_enable=_fast_init)] + + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") + init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts + elif low_cpu_mem_usage: + init_contexts.append(init_empty_weights()) + + config = copy.deepcopy(config) # We do not want to modify the config inplace in from_pretrained. + config = cls._autoset_attn_implementation( + config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch_dtype, device_map=device_map + ) + + with ContextManagers(init_contexts): + # Let's make sure we don't run the init function of buffer modules + model = cls(config, *model_args, **model_kwargs) + + # make sure we use the model's config since the __init__ call might have copied it + config = model.config + + # Check first if we are `from_pt` + if use_keep_in_fp32_modules: + if is_accelerate_available() and not is_deepspeed_zero3_enabled(): + low_cpu_mem_usage = True + keep_in_fp32_modules = model._keep_in_fp32_modules + else: + keep_in_fp32_modules = [] + + if hf_quantizer is not None: + hf_quantizer.preprocess_model( + model=model, device_map=device_map, keep_in_fp32_modules=keep_in_fp32_modules + ) + + # We store the original dtype for quantized models as we cannot easily retrieve it + # once the weights have been quantized + # Note that once you have loaded a quantized model, you can't change its dtype so this will + # remain a single source of truth + config._pre_quantization_dtype = torch_dtype + + if isinstance(device_map, str): + special_dtypes = {} + + if hf_quantizer is not None: + special_dtypes.update(hf_quantizer.get_special_dtypes_update(model, torch_dtype)) + + special_dtypes.update( + { + name: torch.float32 + for name, _ in model.named_parameters() + if any(m in name for m in keep_in_fp32_modules) + } + ) + + target_dtype = torch_dtype + + if hf_quantizer is not None: + target_dtype = hf_quantizer.adjust_target_dtype(target_dtype) + + no_split_modules = model._get_no_split_modules(device_map) + if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + + device_map_kwargs = {"no_split_module_classes": no_split_modules} + if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters: + device_map_kwargs["special_dtypes"] = special_dtypes + elif len(special_dtypes) > 0: + logger.warning( + "This model has some weights that should be kept in higher precision, you need to upgrade " + "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)." + ) + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + dtype=target_dtype, + low_zero=(device_map == "balanced_low_0"), + max_memory=max_memory, + **device_map_kwargs, + ) + else: + max_memory = get_max_memory(max_memory) + if hf_quantizer is not None: + max_memory = hf_quantizer.adjust_max_memory(max_memory) + device_map_kwargs["max_memory"] = max_memory + + # Make sure tied weights are tied before creating the device map. + model.tie_weights() + device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) + + if hf_quantizer is not None: + hf_quantizer.validate_environment(device_map=device_map) + + elif device_map is not None: + model.tie_weights() + tied_params = find_tied_parameters(model) + # check if we don't have tied param in different devices + check_tied_parameters_on_same_device(tied_params, device_map) + + if from_tf: + if resolved_archive_file.endswith(".index"): + # Load from a TensorFlow 1.X checkpoint - provided by original authors + model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' + else: + # Load from our TensorFlow 2.0 checkpoints + try: + from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model + + model, loading_info = load_tf2_checkpoint_in_pytorch_model( + model, resolved_archive_file, allow_missing_keys=True, output_loading_info=True + ) + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed." + " Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation" + " instructions." + ) + raise + elif from_flax: + try: + from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model + + model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) + except ImportError: + logger.error( + "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for" + " installation instructions." + ) + raise + elif from_pt: + # restore default dtype + if dtype_orig is not None: + torch.set_default_dtype(dtype_orig) + ( + model, + missing_keys, + unexpected_keys, + mismatched_keys, + offload_index, + error_msgs, + ) = cls._load_pretrained_model( + model, + state_dict, + loaded_state_dict_keys, # XXX: rename? + resolved_archive_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=ignore_mismatched_sizes, + sharded_metadata=sharded_metadata, + _fast_init=_fast_init, + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + dtype=torch_dtype, + hf_quantizer=hf_quantizer, + keep_in_fp32_modules=keep_in_fp32_modules, + ) + + # make sure token embedding weights are still tied if needed + model.tie_weights() + + # Set model in evaluation mode to deactivate DropOut modules by default + model.eval() + + # If it is a model with generation capabilities, attempt to load the generation config + if model.can_generate() and pretrained_model_name_or_path is not None: + try: + model.generation_config = GenerationConfig.from_pretrained( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + except OSError: + logger.info( + "Generation config file not found, using a generation config created from the model config." + ) + pass + + # Dispatch model with hooks on all devices if necessary + if device_map is not None: + device_map_kwargs = { + "device_map": device_map, + "offload_dir": offload_folder, + "offload_index": offload_index, + "offload_buffers": offload_buffers, + } + if "skip_keys" in inspect.signature(dispatch_model).parameters: + device_map_kwargs["skip_keys"] = model._skip_keys_device_placement + if not is_fsdp_enabled() and not is_deepspeed_zero3_enabled(): + dispatch_model(model, **device_map_kwargs) + + if hf_quantizer is not None: + hf_quantizer.postprocess_model(model) + model.hf_quantizer = hf_quantizer + + if _adapter_model_path is not None: + model.load_adapter( + _adapter_model_path, + adapter_name=adapter_name, + token=token, + adapter_kwargs=adapter_kwargs, + ) + + if output_loading_info: + if loading_info is None: + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + "error_msgs": error_msgs, + } + return model, loading_info + + return model + + @classmethod + def _load_pretrained_model( + cls, + model, + state_dict, + loaded_keys, + resolved_archive_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=False, + sharded_metadata=None, + _fast_init=True, + low_cpu_mem_usage=False, + device_map=None, + offload_folder=None, + offload_state_dict=None, + dtype=None, + hf_quantizer=None, + keep_in_fp32_modules=None, + ): + is_safetensors = False + is_quantized = hf_quantizer is not None + + if device_map is not None and "disk" in device_map.values(): + archive_file = ( + resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file + ) + is_safetensors = archive_file.endswith(".safetensors") + if offload_folder is None and not is_safetensors: + raise ValueError( + "The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`" + " for them. Alternatively, make sure you have `safetensors` installed if the model you are using" + " offers the weights in this format." + ) + if offload_folder is not None: + os.makedirs(offload_folder, exist_ok=True) + if offload_state_dict is None: + offload_state_dict = True + + is_sharded_safetensors = is_safetensors and sharded_metadata is not None + + # tie the model weights before retrieving the state_dict + model.tie_weights() + + # Retrieve missing & unexpected_keys + model_state_dict = model.state_dict() + expected_keys = list(model_state_dict.keys()) + prefix = model.base_model_prefix + + def _fix_key(key): + if "beta" in key: + return key.replace("beta", "bias") + if "gamma" in key: + return key.replace("gamma", "weight") + return key + + original_loaded_keys = loaded_keys + loaded_keys = [_fix_key(key) for key in loaded_keys] + + if len(prefix) > 0: + has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) + expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) + else: + has_prefix_module = False + expects_prefix_module = False + + # key re-naming operations are never done on the keys + # that are loaded, but always on the keys of the newly initialized model + remove_prefix_from_model = not has_prefix_module and expects_prefix_module + add_prefix_to_model = has_prefix_module and not expects_prefix_module + + if remove_prefix_from_model: + _prefix = f"{prefix}." + expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)] + expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys] + elif add_prefix_to_model: + expected_keys = [".".join([prefix, s]) for s in expected_keys] + + missing_keys = sorted(set(expected_keys) - set(loaded_keys)) + unexpected_keys = set(loaded_keys) - set(expected_keys) + # Remove nonpersistent buffers from unexpected keys: they are not in the state dict but will be in the model + # buffers + model_buffers = {n for n, _ in model.named_buffers()} + if remove_prefix_from_model: + model_buffers = {key[len(_prefix) :] if key.startswith(_prefix) else key for key in model_buffers} + elif add_prefix_to_model: + model_buffers = {".".join([prefix, key]) for key in model_buffers} + unexpected_keys = sorted(unexpected_keys - model_buffers) + + model.tie_weights() + if device_map is None and not is_fsdp_enabled() and not is_deepspeed_zero3_enabled(): + ptrs = collections.defaultdict(list) + for name, tensor in model.state_dict().items(): + id_tensor = id_tensor_storage(tensor) + ptrs[id_tensor].append(name) + + # These are all the pointers of shared tensors. + tied_params = [names for _, names in ptrs.items() if len(names) > 1] + else: + # id function doesn't work for meta tensor so we need this function + tied_params = find_tied_parameters(model) + + for group in tied_params: + if remove_prefix_from_model: + group = [key[len(_prefix) :] if key.startswith(_prefix) else key for key in group] + elif add_prefix_to_model: + group = [".".join([prefix, key]) for key in group] + missing_in_group = [k for k in missing_keys if k in group] + if len(missing_in_group) > 0 and len(missing_in_group) < len(group): + missing_keys = [k for k in missing_keys if k not in missing_in_group] + + # Some models may have keys that are not in the state by design, removing them before needlessly warning + # the user. + if cls._keys_to_ignore_on_load_missing is not None: + for pat in cls._keys_to_ignore_on_load_missing: + missing_keys = [k for k in missing_keys if re.search(pat, k) is None] + + if cls._keys_to_ignore_on_load_unexpected is not None: + for pat in cls._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if hf_quantizer is not None: + missing_keys = hf_quantizer.update_missing_keys(model, missing_keys, prefix) + + # retrieve weights on meta device and put them back on CPU. + # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step + if low_cpu_mem_usage: + for key in missing_keys: + if key in list(model_state_dict.keys()): + key = key + elif f"{prefix}.{key}" in list(model_state_dict.keys()): + key = f"{prefix}.{key}" + elif key.startswith(prefix) and ".".join(key.split(".")[1:]) in list(model_state_dict.keys()): + key = ".".join(key.split(".")[1:]) + param = model_state_dict[key] + + # upcast in fp32 if any + target_dtype = dtype + if ( + keep_in_fp32_modules is not None + and dtype == torch.float16 + and any( + module_to_keep_in_fp32 in key.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules + ) + ): + target_dtype = torch.float32 + + if param.device == torch.device("meta"): + value = torch.empty(*param.size(), dtype=target_dtype) + if ( + not is_quantized + or getattr(hf_quantizer, "requires_parameters_quantization", False) + or not hf_quantizer.check_quantized_param( + model, param_value=value, param_name=key, state_dict={} + ) + ): + set_module_tensor_to_device(model, key, "cpu", value) + else: + hf_quantizer.create_quantized_param(model, value, key, "cpu", state_dict, unexpected_keys) + + # retrieve uninitialized modules and initialize before maybe overriding that with the pretrained weights. + if _fast_init: + if not ignore_mismatched_sizes: + if remove_prefix_from_model: + _loaded_keys = [f"{prefix}.{k}" for k in loaded_keys] + elif add_prefix_to_model: + _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys] + else: + _loaded_keys = loaded_keys + not_initialized_submodules = set_initialized_submodules(model, _loaded_keys) + # If we're about to tie the output embeds to the input embeds we don't need to init them + if hasattr(model.config, "tie_word_embeddings") and model.config.tie_word_embeddings: + output_embeddings = model.get_output_embeddings() + if output_embeddings is not None: + # Still need to initialize if there is a bias term since biases are not tied. + if not hasattr(output_embeddings, "bias") or output_embeddings.bias is None: + output_embeddings._is_hf_initialized = True + else: + not_initialized_submodules = dict(model.named_modules()) + # This will only initialize submodules that are not marked as initialized by the line above. + if is_deepspeed_zero3_enabled() and not is_quantized: + import deepspeed + + not_initialized_parameters = list( + set( + itertools.chain.from_iterable( + submodule.parameters(recurse=False) for submodule in not_initialized_submodules.values() + ) + ) + ) + with deepspeed.zero.GatheredParameters(not_initialized_parameters, modifier_rank=0): + model.apply(model._initialize_weights) + else: + model.apply(model._initialize_weights) + + # Set some modules to fp32 if any + if keep_in_fp32_modules is not None: + for name, param in model.named_parameters(): + if any(module_to_keep_in_fp32 in name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules): + # param = param.to(torch.float32) does not work here as only in the local scope. + param.data = param.data.to(torch.float32) + + # Make sure we are able to load base models as well as derived models (with heads) + start_prefix = "" + model_to_load = model + if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module: + start_prefix = cls.base_model_prefix + "." + if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module: + model_to_load = getattr(model, cls.base_model_prefix) + base_model_expected_keys = list(model_to_load.state_dict().keys()) + if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys): + raise ValueError( + "The state dictionary of the model you are trying to load is corrupted. Are you sure it was " + "properly saved?" + ) + if device_map is not None: + device_map = {k.replace(f"{cls.base_model_prefix}.", ""): v for k, v in device_map.items()} + + def _find_mismatched_keys( + state_dict, + model_state_dict, + loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ): + mismatched_keys = [] + if ignore_mismatched_sizes: + for checkpoint_key in loaded_keys: + # If the checkpoint is sharded, we may not have the key here. + if checkpoint_key not in state_dict: + continue + model_key = checkpoint_key + if remove_prefix_from_model: + # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it. + model_key = f"{prefix}.{checkpoint_key}" + elif add_prefix_to_model: + # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it. + model_key = ".".join(checkpoint_key.split(".")[1:]) + + if ( + model_key in model_state_dict + and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape + ): + if ( + state_dict[checkpoint_key].shape[-1] == 1 + and state_dict[checkpoint_key].numel() * 2 == model_state_dict[model_key].numel() + ): + # This skips size mismatches for 4-bit weights. Two 4-bit values share an 8-bit container, causing size differences. + # Without matching with module type or paramter type it seems like a practical way to detect valid 4bit weights. + pass + else: + mismatched_keys.append( + (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) + ) + del state_dict[checkpoint_key] + return mismatched_keys + + if resolved_archive_file is not None: + folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1]) + else: + folder = None + if device_map is not None and is_safetensors: + param_device_map = expand_device_map(device_map, original_loaded_keys, start_prefix) + str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32" + if sharded_metadata is None: + archive_file = ( + resolved_archive_file[0] + if isinstance(resolved_archive_file, (list, tuple)) + else resolved_archive_file + ) + weight_map = {p: archive_file for p in original_loaded_keys} + else: + weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata["weight_map"].items()} + offload_index = { + p[len(start_prefix) :]: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype} + for p, f in weight_map.items() + if p.startswith(start_prefix) and param_device_map[p[len(start_prefix) :]] == "disk" + } + + if state_dict is not None: + # Whole checkpoint + mismatched_keys = _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ) + error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix) + offload_index = None + else: + # Sharded checkpoint or whole but low_cpu_mem_usage==True + + # This should always be a list but, just to be sure. + if not isinstance(resolved_archive_file, list): + resolved_archive_file = [resolved_archive_file] + + error_msgs = [] + mismatched_keys = [] + if not is_safetensors: + offload_index = {} if device_map is not None and "disk" in device_map.values() else None + if offload_state_dict: + state_dict_folder = tempfile.mkdtemp() + state_dict_index = {} + else: + state_dict_folder = None + state_dict_index = None + + if is_sharded_safetensors: + disk_only_shard_files = get_disk_only_shard_files( + device_map, sharded_metadata=sharded_metadata, start_prefix=start_prefix + ) + disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files] + else: + disk_only_shard_files = [] + + if len(resolved_archive_file) > 1: + resolved_archive_file = logging.tqdm(resolved_archive_file, desc="Loading checkpoint shards") + for shard_file in resolved_archive_file: + # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload. + if shard_file in disk_only_shard_files: + continue + state_dict = load_state_dict(shard_file, is_quantized=is_quantized) + + # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not + # matching the weights in the model. + mismatched_keys += _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + add_prefix_to_model, + remove_prefix_from_model, + ignore_mismatched_sizes, + ) + if low_cpu_mem_usage: + if is_fsdp_enabled() and not is_local_dist_rank_0() and not is_quantized: + for key, param in model_to_load.state_dict().items(): + if param.device == torch.device("meta"): + set_module_tensor_to_device( + model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) + ) + else: + new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( + model_to_load, + state_dict, + loaded_keys, + start_prefix, + expected_keys, + device_map=device_map, + offload_folder=offload_folder, + offload_index=offload_index, + state_dict_folder=state_dict_folder, + state_dict_index=state_dict_index, + dtype=dtype, + hf_quantizer=hf_quantizer, + is_safetensors=is_safetensors, + keep_in_fp32_modules=keep_in_fp32_modules, + unexpected_keys=unexpected_keys, + ) + error_msgs += new_error_msgs + else: + error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix) + + # force memory release + del state_dict + gc.collect() + + if offload_index is not None and len(offload_index) > 0: + if model != model_to_load: + # We need to add the prefix of the base model + prefix = cls.base_model_prefix + if not is_safetensors: + for weight_name in offload_index: + shutil.move( + os.path.join(offload_folder, f"{weight_name}.dat"), + os.path.join(offload_folder, f"{prefix}.{weight_name}.dat"), + ) + offload_index = {f"{prefix}.{key}": value for key, value in offload_index.items()} + if not is_safetensors: + save_offload_index(offload_index, offload_folder) + offload_index = None + + if offload_state_dict: + # Load back temporarily offloaded state dict + load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder) + shutil.rmtree(state_dict_folder) + + if len(error_msgs) > 0: + error_msg = "\n\t".join(error_msgs) + if "size mismatch" in error_msg: + error_msg += ( + "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." + ) + raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") + + if len(unexpected_keys) > 0: + archs = [] if model.config.architectures is None else model.config.architectures + warner = logger.warning if model.__class__.__name__ in archs else logger.info + warner( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" + " with another architecture (e.g. initializing a BertForSequenceClassification model from a" + " BertForPreTraining model).\n- This IS NOT expected if you are initializing" + f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" + " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." + ) + else: + logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" + f" was trained on, you can already use {model.__class__.__name__} for predictions without further" + " training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" + " to use it for predictions and inference." + ) + + return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs + + def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): + module_keys = {".".join(key.split(".")[:-1]) for key in names} + + # torch.nn.ParameterList is a special case where two parameter keywords + # are appended to the module name, *e.g.* bert.special_embeddings.0 + module_keys = module_keys.union( + {".".join(key.split(".")[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()} + ) + + retrieved_modules = [] + # retrieve all modules that has at least one missing weight name + for name, module in self.named_modules(): + if remove_prefix: + _prefix = f"{self.base_model_prefix}." + name = name[len(_prefix) :] if name.startswith(_prefix) else name + elif add_prefix: + name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix + + if name in module_keys: + retrieved_modules.append(module) + + return retrieved_modules + + @staticmethod + def _load_pretrained_model_low_mem( + model, loaded_state_dict_keys, resolved_archive_file, start_prefix="", hf_quantizer=None + ): + """ + This is an experimental function that loads the model using ~1.x model size CPU memory + + Before you call it do: + + 1. save which state_dict keys are available + 2. drop state_dict before model is created, since the latter takes 1x model size memory + + Here then we continue: + + 3. switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict + 4. load state_dict 2nd time + 5. replace the params/buffers from the state_dict + + Currently, it doesn't handle missing_keys, unexpected_keys, mismatched_keys. It can't handle deepspeed. To + handle bitsandbytes, needs non-empty hf_quantizer argument. + """ + + _move_model_to_meta(model, loaded_state_dict_keys, start_prefix) + state_dict = load_state_dict(resolved_archive_file) + expected_keys = loaded_state_dict_keys # plug for missing expected_keys. TODO: replace with proper keys + error_msgs = _load_state_dict_into_meta_model( + model, + state_dict, + loaded_state_dict_keys, + start_prefix, + expected_keys=expected_keys, + hf_quantizer=hf_quantizer, + ) + return error_msgs + + @classmethod + def register_for_auto_class(cls, auto_class="AutoModel"): + """ + Register this class with a given auto class. This should only be used for custom models as the ones in the + library are already mapped with an auto class. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): + The auto class to register this new model with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + def to_bettertransformer(self) -> "PreTrainedModel": + """ + Converts the model to use [PyTorch's native attention + implementation](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html), integrated to + Transformers through [Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). Only a + subset of all Transformers models are supported. + + PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested + tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog + post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). + + Returns: + [`PreTrainedModel`]: The model converted to BetterTransformer. + """ + if not is_optimum_available(): + raise ImportError("The package `optimum` is required to use Better Transformer.") + + from optimum.version import __version__ as optimum_version + + if version.parse(optimum_version) < version.parse("1.7.0"): + raise ImportError( + f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." + ) + + from optimum.bettertransformer import BetterTransformer + + return BetterTransformer.transform(self) + + def reverse_bettertransformer(self): + """ + Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is + used, for example in order to save the model. + + Returns: + [`PreTrainedModel`]: The model converted back to the original modeling. + """ + if not is_optimum_available(): + raise ImportError("The package `optimum` is required to use Better Transformer.") + + from optimum.version import __version__ as optimum_version + + if version.parse(optimum_version) < version.parse("1.7.0"): + raise ImportError( + f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." + ) + + from optimum.bettertransformer import BetterTransformer + + return BetterTransformer.reverse(self) + + def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): + """ + Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given. + """ + + # Skip the check during tracing. + if is_torch_fx_proxy(input_ids) or torch.jit.is_tracing() or is_torchdynamo_compiling(): + return + + if (attention_mask is not None) or (self.config.pad_token_id is None): + return + + # Check only the first and last input IDs to reduce overhead. + if self.config.pad_token_id in input_ids[:, [-1, 0]]: + warn_string = ( + "We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See " + "https://huggingface.co/docs/transformers/troubleshooting" + "#incorrect-output-when-padding-tokens-arent-masked." + ) + + # If the pad token is equal to either BOS, EOS, or SEP, we do not know whether the user should use an + # attention_mask or not. In this case, we should still show a warning because this is a rare case. + if ( + (self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id) + or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id) + or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id) + ): + warn_string += ( + f"\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical " + f"to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), " + f"or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded." + ) + + logger.warning_once(warn_string) + + @property + def _is_quantized_training_enabled(self): + warnings.warn( + "`_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead", + FutureWarning, + ) + + if not hasattr(self, "hf_quantizer"): + return False + + return self.hf_quantizer.is_trainable + + +PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) +if PreTrainedModel.push_to_hub.__doc__ is not None: + PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( + object="model", object_class="AutoModel", object_files="model file" + ) + + +class PoolerStartLogits(nn.Module): + """ + Compute SQuAD start logits from sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, 1) + + def forward( + self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + + Returns: + `torch.FloatTensor`: The start logits for SQuAD. + """ + x = self.dense(hidden_states).squeeze(-1) + + if p_mask is not None: + if get_parameter_dtype(self) == torch.float16: + x = x * (1 - p_mask) - 65500 * p_mask + else: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerEndLogits(nn.Module): + """ + Compute SQuAD end logits from sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` + to use. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dense_1 = nn.Linear(config.hidden_size, 1) + + def forward( + self, + hidden_states: torch.FloatTensor, + start_states: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + p_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + The hidden states of the first tokens for the labeled span. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + The position of the first token for the labeled span. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + + + + One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides + `start_states`. + + + + Returns: + `torch.FloatTensor`: The end logits for SQuAD. + """ + assert ( + start_states is not None or start_positions is not None + ), "One of start_states, start_positions should be not None" + if start_positions is not None: + slen, hsz = hidden_states.shape[-2:] + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) + start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) + + x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) + x = self.activation(x) + x = self.LayerNorm(x) + x = self.dense_1(x).squeeze(-1) + + if p_mask is not None: + if get_parameter_dtype(self) == torch.float16: + x = x * (1 - p_mask) - 65500 * p_mask + else: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerAnswerClass(nn.Module): + """ + Compute SQuAD 2.0 answer class from classification and start tokens hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model. + """ + + def __init__(self, config): + super().__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) + + def forward( + self, + hidden_states: torch.FloatTensor, + start_states: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + cls_index: Optional[torch.LongTensor] = None, + ) -> torch.FloatTensor: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + The final hidden states of the model. + start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + The hidden states of the first tokens for the labeled span. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + The position of the first token for the labeled span. + cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Position of the CLS token for each sentence in the batch. If `None`, takes the last token. + + + + One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides + `start_states`. + + + + Returns: + `torch.FloatTensor`: The SQuAD 2.0 answer class. + """ + # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. + hsz = hidden_states.shape[-1] + assert ( + start_states is not None or start_positions is not None + ), "One of start_states, start_positions should be not None" + if start_positions is not None: + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) + + if cls_index is not None: + cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) + else: + cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) + + x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) + x = self.activation(x) + x = self.dense_1(x).squeeze(-1) + + return x + + +@dataclass +class SquadHeadOutput(ModelOutput): + """ + Base class for outputs of question answering models using a [`~modeling_utils.SQuADHead`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): + Classification loss as the sum of start token, end token (and is_impossible if provided) classification + losses. + start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the top config.start_n_top start token possibilities (beam-search). + start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Indices for the top config.start_n_top start token possibilities (beam-search). + end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities + (beam-search). + end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). + cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): + Log probabilities for the `is_impossible` label of the answers. + + """ + + loss: Optional[torch.FloatTensor] = None + start_top_log_probs: Optional[torch.FloatTensor] = None + start_top_index: Optional[torch.LongTensor] = None + end_top_log_probs: Optional[torch.FloatTensor] = None + end_top_index: Optional[torch.LongTensor] = None + cls_logits: Optional[torch.FloatTensor] = None + + +class SQuADHead(nn.Module): + r""" + A SQuAD head inspired by XLNet. + + Args: + config ([`PretrainedConfig`]): + The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` + to use. + """ + + def __init__(self, config): + super().__init__() + self.start_n_top = config.start_n_top + self.end_n_top = config.end_n_top + + self.start_logits = PoolerStartLogits(config) + self.end_logits = PoolerEndLogits(config) + self.answer_class = PoolerAnswerClass(config) + + @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) + def forward( + self, + hidden_states: torch.FloatTensor, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + cls_index: Optional[torch.LongTensor] = None, + is_impossible: Optional[torch.LongTensor] = None, + p_mask: Optional[torch.FloatTensor] = None, + return_dict: bool = False, + ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): + Final hidden states of the model on the sequence tokens. + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Positions of the first token for the labeled span. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Positions of the last token for the labeled span. + cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Position of the CLS token for each sentence in the batch. If `None`, takes the last token. + is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Whether the question has a possible answer in the paragraph or not. + p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): + Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token + should be masked. + return_dict (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + """ + start_logits = self.start_logits(hidden_states, p_mask=p_mask) + + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, let's remove the dimension added by batch splitting + for x in (start_positions, end_positions, cls_index, is_impossible): + if x is not None and x.dim() > 1: + x.squeeze_(-1) + + # during training, compute the end logits based on the ground truth of the start position + end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) + + loss_fct = CrossEntropyLoss() + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if cls_index is not None and is_impossible is not None: + # Predict answerability from the representation of CLS and START + cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) + loss_fct_cls = nn.BCEWithLogitsLoss() + cls_loss = loss_fct_cls(cls_logits, is_impossible) + + # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss + total_loss += cls_loss * 0.5 + + return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) + + else: + # during inference, compute the end logits based on beam search + bsz, slen, hsz = hidden_states.size() + start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) + + start_top_log_probs, start_top_index = torch.topk( + start_log_probs, self.start_n_top, dim=-1 + ) # shape (bsz, start_n_top) + start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) + start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) + start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) + + hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( + start_states + ) # shape (bsz, slen, start_n_top, hsz) + p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None + end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) + end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) + + end_top_log_probs, end_top_index = torch.topk( + end_log_probs, self.end_n_top, dim=1 + ) # shape (bsz, end_n_top, start_n_top) + end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) + end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) + + start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) + cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) + + if not return_dict: + return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + else: + return SquadHeadOutput( + start_top_log_probs=start_top_log_probs, + start_top_index=start_top_index, + end_top_log_probs=end_top_log_probs, + end_top_index=end_top_index, + cls_logits=cls_logits, + ) + + +class SequenceSummary(nn.Module): + r""" + Compute a single vector summary of a sequence hidden states. + + Args: + config ([`PretrainedConfig`]): + The config used by the model. Relevant arguments in the config class of the model are (refer to the actual + config class of your model for the default values it uses): + + - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: + + - `"last"` -- Take the last token hidden state (like XLNet) + - `"first"` -- Take the first token hidden state (like Bert) + - `"mean"` -- Take the mean of all tokens hidden states + - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) + - `"attn"` -- Not implemented now, use multi-head attention + + - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. + - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes + (otherwise to `config.hidden_size`). + - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, + another string or `None` will add no activation. + - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. + - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. + """ + + def __init__(self, config: PretrainedConfig): + super().__init__() + + self.summary_type = getattr(config, "summary_type", "last") + if self.summary_type == "attn": + # We should use a standard multi-head attention module with absolute positional embedding for that. + # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 + # We can probably just use the multi-head attention module of PyTorch >=1.1.0 + raise NotImplementedError + + self.summary = Identity() + if hasattr(config, "summary_use_proj") and config.summary_use_proj: + if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: + num_classes = config.num_labels + else: + num_classes = config.hidden_size + self.summary = nn.Linear(config.hidden_size, num_classes) + + activation_string = getattr(config, "summary_activation", None) + self.activation: Callable = get_activation(activation_string) if activation_string else Identity() + + self.first_dropout = Identity() + if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: + self.first_dropout = nn.Dropout(config.summary_first_dropout) + + self.last_dropout = Identity() + if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: + self.last_dropout = nn.Dropout(config.summary_last_dropout) + + def forward( + self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None + ) -> torch.FloatTensor: + """ + Compute a single vector summary of a sequence hidden states. + + Args: + hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): + The hidden states of the last layer. + cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): + Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. + + Returns: + `torch.FloatTensor`: The summary of the sequence hidden states. + """ + if self.summary_type == "last": + output = hidden_states[:, -1] + elif self.summary_type == "first": + output = hidden_states[:, 0] + elif self.summary_type == "mean": + output = hidden_states.mean(dim=1) + elif self.summary_type == "cls_index": + if cls_index is None: + cls_index = torch.full_like( + hidden_states[..., :1, :], + hidden_states.shape[-2] - 1, + dtype=torch.long, + ) + else: + cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) + cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) + # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states + output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) + elif self.summary_type == "attn": + raise NotImplementedError + + output = self.first_dropout(output) + output = self.summary(output) + output = self.activation(output) + output = self.last_dropout(output) + + return output + + +def unwrap_model(model: nn.Module) -> nn.Module: + """ + Recursively unwraps a model from potential containers (as used in distributed training). + + Args: + model (`torch.nn.Module`): The model to unwrap. + """ + # since there could be multiple levels of wrapping, unwrap recursively + if hasattr(model, "module"): + return unwrap_model(model.module) + else: + return model + + +def expand_device_map(device_map, param_names, start_prefix): + """ + Expand a device map to return the correspondance parameter name to device. + """ + new_device_map = {} + param_names = [p[len(start_prefix) :] for p in param_names if p.startswith(start_prefix)] + for module, device in device_map.items(): + new_device_map.update( + {p: device for p in param_names if p == module or p.startswith(f"{module}.") or module == ""} + ) + return new_device_map + + +def get_disk_only_shard_files(device_map, sharded_metadata, start_prefix): + """ + Returns the list of shard files containing only weights offloaded to disk. + """ + + weight_map = { + p[len(start_prefix) :]: v for p, v in sharded_metadata["weight_map"].items() if p.startswith(start_prefix) + } + files_content = collections.defaultdict(list) + for weight_name, filename in weight_map.items(): + while len(weight_name) > 0 and weight_name not in device_map: + weight_name = ".".join(weight_name.split(".")[:-1]) + files_content[filename].append(device_map[weight_name]) + + return [fname for fname, devices in files_content.items() if set(devices) == {"disk"}] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/optimization.py b/llmeval-env/lib/python3.10/site-packages/transformers/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..3727784fba9eee6467fe4d1232fe09621af17b18 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/optimization.py @@ -0,0 +1,888 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for BERT model.""" + +import math +import warnings +from functools import partial +from typing import Callable, Iterable, Optional, Tuple, Union + +import torch +from torch import nn +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau + +from .trainer_pt_utils import LayerWiseDummyOptimizer, LayerWiseDummyScheduler +from .trainer_utils import SchedulerType +from .utils import logging +from .utils.versions import require_version + + +logger = logging.get_logger(__name__) + + +def _get_constant_lambda(_=None): + return 1 + + +def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + return LambdaLR(optimizer, _get_constant_lambda, last_epoch=last_epoch) + + +def get_reduce_on_plateau_schedule(optimizer: Optimizer, **kwargs): + """ + Create a schedule with a constant learning rate that decreases when a metric has stopped improving. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + kwargs (`dict`, *optional*): + Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau` + for possible parameters. + + Return: + `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule. + """ + + return ReduceLROnPlateau(optimizer, **kwargs) + + +def _get_constant_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + +def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps) + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +def _get_linear_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) + + +def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): + """ + Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after + a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial( + _get_linear_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_cosine_schedule_with_warmup_lr_lambda( + current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`float`, *optional*, defaults to 0.5): + The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 + following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial( + _get_cosine_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda( + current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + if progress >= 1.0: + return 0.0 + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) + + +def get_cosine_with_hard_restarts_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases + linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`int`, *optional*, defaults to 1): + The number of hard restarts to use. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + lr_lambda = partial( + _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_polynomial_decay_schedule_with_warmup_lr_lambda( + current_step: int, + *, + num_warmup_steps: int, + num_training_steps: int, + lr_end: float, + power: float, + lr_init: int, +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining**power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + +def get_polynomial_decay_schedule_with_warmup( + optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 +): + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be smaller than initial lr ({lr_init})") + + lr_lambda = partial( + _get_polynomial_decay_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + lr_end=lr_end, + power=power, + lr_init=lr_init, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def _get_inverse_sqrt_schedule_lr_lambda(current_step: int, *, num_warmup_steps: int, timescale: int = None): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + shift = timescale - num_warmup_steps + decay = 1.0 / math.sqrt((current_step + shift) / timescale) + return decay + + +def get_inverse_sqrt_schedule( + optimizer: Optimizer, num_warmup_steps: int, timescale: int = None, last_epoch: int = -1 +): + """ + Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a + warmup period which increases lr linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + timescale (`int`, *optional*, defaults to `num_warmup_steps`): + Time scale. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + # Note: this implementation is adapted from + # https://github.com/google-research/big_vision/blob/f071ce68852d56099437004fd70057597a95f6ef/big_vision/utils.py#L930 + + if timescale is None: + timescale = num_warmup_steps or 10_000 + + lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale) + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +def _get_cosine_schedule_with_warmup_lr_lambda( + current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float, min_lr_rate: float = 0.0 +): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + factor = 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)) + factor = factor * (1 - min_lr_rate) + min_lr_rate + return max(0, factor) + + +def get_cosine_with_min_lr_schedule_with_warmup( + optimizer: Optimizer, + num_warmup_steps: int, + num_training_steps: int, + num_cycles: float = 0.5, + last_epoch: int = -1, + min_lr: float = None, + min_lr_rate: float = None, +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to min_lr, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`float`, *optional*, defaults to 0.5): + The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 + following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + min_lr (`float`, *optional*): + The minimum learning rate to reach after the cosine schedule. + min_lr_rate (`float`, *optional*): + The minimum learning rate as a ratio of the initial learning rate. If set, `min_lr` should not be set. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + if min_lr is not None and min_lr_rate is not None: + raise ValueError("Only one of min_lr or min_lr_rate should be set") + elif min_lr is not None: + min_lr_rate = min_lr / optimizer.defaults["lr"] + elif min_lr_rate is None: + raise ValueError("One of min_lr or min_lr_rate should be set through the `lr_scheduler_kwargs`") + + lr_lambda = partial( + _get_cosine_schedule_with_warmup_lr_lambda, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + min_lr_rate=min_lr_rate, + ) + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +TYPE_TO_SCHEDULER_FUNCTION = { + SchedulerType.LINEAR: get_linear_schedule_with_warmup, + SchedulerType.COSINE: get_cosine_schedule_with_warmup, + SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, + SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, + SchedulerType.CONSTANT: get_constant_schedule, + SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, + SchedulerType.INVERSE_SQRT: get_inverse_sqrt_schedule, + SchedulerType.REDUCE_ON_PLATEAU: get_reduce_on_plateau_schedule, + SchedulerType.COSINE_WITH_MIN_LR: get_cosine_with_min_lr_schedule_with_warmup, +} + + +def get_scheduler( + name: Union[str, SchedulerType], + optimizer: Optimizer, + num_warmup_steps: Optional[int] = None, + num_training_steps: Optional[int] = None, + scheduler_specific_kwargs: Optional[dict] = None, +): + """ + Unified API to get any scheduler from its name. + + Args: + name (`str` or `SchedulerType`): + The name of the scheduler to use. + optimizer (`torch.optim.Optimizer`): + The optimizer that will be used during training. + num_warmup_steps (`int`, *optional*): + The number of warmup steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_training_steps (`int``, *optional*): + The number of training steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + scheduler_specific_kwargs (`dict`, *optional*): + Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler + parameters will cause the scheduler function to raise a TypeError. + """ + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + + # If a `LayerWiseDummyOptimizer` is passed we extract the optimizer dict and + # recursively call `get_scheduler` to get the proper schedulers on each parameter + if optimizer is not None and isinstance(optimizer, LayerWiseDummyOptimizer): + optimizer_dict = optimizer.optimizer_dict + scheduler_dict = {} + + for param in optimizer_dict.keys(): + scheduler_dict[param] = get_scheduler( + name, + optimizer=optimizer_dict[param], + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + ) + + def scheduler_hook(param): + # Since the optimizer hook has been already attached we only need to + # attach the scheduler hook + if param.grad is not None: + scheduler_dict[param].step() + + for param in optimizer_dict.keys(): + if param.requires_grad: + param.register_post_accumulate_grad_hook(scheduler_hook) + + return LayerWiseDummyScheduler() + + if name == SchedulerType.CONSTANT: + return schedule_func(optimizer) + + if scheduler_specific_kwargs is None: + scheduler_specific_kwargs = {} + + if name == SchedulerType.REDUCE_ON_PLATEAU: + return schedule_func(optimizer, **scheduler_specific_kwargs) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) + + if name == SchedulerType.INVERSE_SQRT: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + **scheduler_specific_kwargs, + ) + + +class AdamW(Optimizer): + """ + Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay + Regularization](https://arxiv.org/abs/1711.05101). + + Parameters: + params (`Iterable[nn.parameter.Parameter]`): + Iterable of parameters to optimize or dictionaries defining parameter groups. + lr (`float`, *optional*, defaults to 0.001): + The learning rate to use. + betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`): + Adam's betas parameters (b1, b2). + eps (`float`, *optional*, defaults to 1e-06): + Adam's epsilon for numerical stability. + weight_decay (`float`, *optional*, defaults to 0.0): + Decoupled weight decay to apply. + correct_bias (`bool`, *optional*, defaults to `True`): + Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`). + no_deprecation_warning (`bool`, *optional*, defaults to `False`): + A flag used to disable the deprecation warning (set to `True` to disable the warning). + """ + + def __init__( + self, + params: Iterable[nn.parameter.Parameter], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-6, + weight_decay: float = 0.0, + correct_bias: bool = True, + no_deprecation_warning: bool = False, + ): + if not no_deprecation_warning: + warnings.warn( + "This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch" + " implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this" + " warning", + FutureWarning, + ) + require_version("torch>=1.5.0") # add_ with alpha + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0") + defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias} + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure: Callable = None): + """ + Performs a single optimization step. + + Arguments: + closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + beta1, beta2 = group["betas"] + + state["step"] += 1 + + # Decay the first and second moment running average coefficient + # In-place operations to update the averages at the same time + exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) + denom = exp_avg_sq.sqrt().add_(group["eps"]) + + step_size = group["lr"] + if group["correct_bias"]: # No bias correction for Bert + bias_correction1 = 1.0 - beta1 ** state["step"] + bias_correction2 = 1.0 - beta2 ** state["step"] + step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + # Add weight decay at the end (fixed version) + if group["weight_decay"] > 0.0: + p.add_(p, alpha=(-group["lr"] * group["weight_decay"])) + + return loss + + +class Adafactor(Optimizer): + """ + AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: + https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + + Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that + this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and + `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (`Iterable[nn.parameter.Parameter]`): + Iterable of parameters to optimize or dictionaries defining parameter groups. + lr (`float`, *optional*): + The external learning rate. + eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`): + Regularization constants for square gradient and parameter scale respectively + clip_threshold (`float`, *optional*, defaults to 1.0): + Threshold of root mean square of final gradient update + decay_rate (`float`, *optional*, defaults to -0.8): + Coefficient used to compute running averages of square + beta1 (`float`, *optional*): + Coefficient used for computing running averages of gradient + weight_decay (`float`, *optional*, defaults to 0.0): + Weight decay (L2 penalty) + scale_parameter (`bool`, *optional*, defaults to `True`): + If True, learning rate is scaled by root mean square + relative_step (`bool`, *optional*, defaults to `True`): + If True, time-dependent learning rate is computed instead of external learning rate + warmup_init (`bool`, *optional*, defaults to `False`): + Time-dependent learning rate computation depends on whether warm-up initialization is being used + + This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. + + Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): + + - Training without LR warmup or clip_threshold is not recommended. + + - use scheduled LR warm-up to fixed LR + - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) + - Disable relative updates + - Use scale_parameter=False + - Additional optimizer operations like gradient clipping should not be used alongside Adafactor + + Example: + + ```python + Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) + ``` + + Others reported the following combination to work well: + + ```python + Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) + ``` + + When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] + scheduler as following: + + ```python + from transformers.optimization import Adafactor, AdafactorSchedule + + optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) + lr_scheduler = AdafactorSchedule(optimizer) + trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) + ``` + + Usage: + + ```python + # replace AdamW with Adafactor + optimizer = Adafactor( + model.parameters(), + lr=1e-3, + eps=(1e-30, 1e-3), + clip_threshold=1.0, + decay_rate=-0.8, + beta1=None, + weight_decay=0.0, + relative_step=False, + scale_parameter=False, + warmup_init=False, + ) + ```""" + + def __init__( + self, + params, + lr=None, + eps=(1e-30, 1e-3), + clip_threshold=1.0, + decay_rate=-0.8, + beta1=None, + weight_decay=0.0, + scale_parameter=True, + relative_step=True, + warmup_init=False, + ): + require_version("torch>=1.5.0") # add_ with alpha + if lr is not None and relative_step: + raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") + if warmup_init and not relative_step: + raise ValueError("`warmup_init=True` requires `relative_step=True`") + + defaults = { + "lr": lr, + "eps": eps, + "clip_threshold": clip_threshold, + "decay_rate": decay_rate, + "beta1": beta1, + "weight_decay": weight_decay, + "scale_parameter": scale_parameter, + "relative_step": relative_step, + "warmup_init": warmup_init, + } + super().__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + rel_step_sz = param_group["lr"] + if param_group["relative_step"]: + min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 + rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) + param_scale = 1.0 + if param_group["scale_parameter"]: + param_scale = max(param_group["eps"][1], param_state["RMS"]) + return param_scale * rel_step_sz + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group["beta1"] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + @staticmethod + def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): + # copy from fairseq's adafactor implementation: + # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError("Adafactor does not support sparse gradients.") + + state = self.state[p] + grad_shape = grad.shape + + factored, use_first_moment = self._get_options(group, grad_shape) + # State Initialization + if len(state) == 0: + state["step"] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(grad) + if factored: + state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) + state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) + else: + state["exp_avg_sq"] = torch.zeros_like(grad) + + state["RMS"] = 0 + else: + if use_first_moment: + state["exp_avg"] = state["exp_avg"].to(grad) + if factored: + state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) + state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) + else: + state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) + + p_data_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_data_fp32 = p_data_fp32.float() + + state["step"] += 1 + state["RMS"] = self._rms(p_data_fp32) + lr = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) + update = (grad**2) + group["eps"][0] + if factored: + exp_avg_sq_row = state["exp_avg_sq_row"] + exp_avg_sq_col = state["exp_avg_sq_col"] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state["exp_avg_sq"] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) + update.mul_(lr) + + if use_first_moment: + exp_avg = state["exp_avg"] + exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) + update = exp_avg + + if group["weight_decay"] != 0: + p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) + + p_data_fp32.add_(-update) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_data_fp32) + + return loss + + +class AdafactorSchedule(LambdaLR): + """ + Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g., + for logging), this class creates a proxy object that retrieves the current lr values from the optimizer. + + It returns `initial_lr` during startup and the actual `lr` during stepping. + """ + + def __init__(self, optimizer, initial_lr=0.0): + def lr_lambda(_): + return initial_lr + + for group in optimizer.param_groups: + group["initial_lr"] = initial_lr + super().__init__(optimizer, lr_lambda) + for group in optimizer.param_groups: + del group["initial_lr"] + + def get_lr(self): + opt = self.optimizer + lrs = [ + opt._get_lr(group, opt.state[group["params"][0]]) + for group in opt.param_groups + if group["params"][0].grad is not None + ] + if len(lrs) == 0: + lrs = self.base_lrs # if called before stepping + return lrs + + +def get_adafactor_schedule(optimizer, initial_lr=0.0): + """ + Get a proxy schedule for [`~optimization.Adafactor`] + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + initial_lr (`float`, *optional*, defaults to 0.0): + Initial lr + + Return: + [`~optimization.Adafactor`] proxy schedule object. + + + """ + return AdafactorSchedule(optimizer, initial_lr) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/optimization_tf.py b/llmeval-env/lib/python3.10/site-packages/transformers/optimization_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..25023430ed303f32dd91268a4304c4903b80007e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/optimization_tf.py @@ -0,0 +1,380 @@ +# Copyright 2019 The TensorFlow Authors, The Hugging Face Team. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions and classes related to optimization (weight updates).""" + + +import re +from typing import Callable, List, Optional, Union + +import tensorflow as tf + + +try: + from tf_keras.optimizers.legacy import Adam +except (ImportError, ModuleNotFoundError): + from tensorflow.keras.optimizers.legacy import Adam + +from .modeling_tf_utils import keras + + +# This block because Keras loves randomly moving things to different places - this changed somewhere between 2.10 - 2.15 +if hasattr(keras.optimizers.schedules, "learning_rate_schedule"): + schedules = keras.optimizers.schedules.learning_rate_schedule +else: + schedules = keras.optimizers.schedules + + +class WarmUp(schedules.LearningRateSchedule): + """ + Applies a warmup schedule on a given learning rate decay schedule. + + Args: + initial_learning_rate (`float`): + The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end + of the warmup). + decay_schedule_fn (`Callable`): + The schedule function to apply after the warmup for the rest of training. + warmup_steps (`int`): + The number of steps for the warmup part of training. + power (`float`, *optional*, defaults to 1.0): + The power to use for the polynomial warmup (defaults is a linear warmup). + name (`str`, *optional*): + Optional name prefix for the returned tensors during the schedule. + """ + + def __init__( + self, + initial_learning_rate: float, + decay_schedule_fn: Callable, + warmup_steps: int, + power: float = 1.0, + name: str = None, + ): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.warmup_steps = warmup_steps + self.power = power + self.decay_schedule_fn = decay_schedule_fn + self.name = name + + def __call__(self, step): + with tf.name_scope(self.name or "WarmUp") as name: + # Implements polynomial warmup. i.e., if global_step < warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + global_step_float = tf.cast(step, tf.float32) + warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) + warmup_percent_done = global_step_float / warmup_steps_float + warmup_learning_rate = self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power) + return tf.cond( + global_step_float < warmup_steps_float, + lambda: warmup_learning_rate, + lambda: self.decay_schedule_fn(step - self.warmup_steps), + name=name, + ) + + def get_config(self): + return { + "initial_learning_rate": self.initial_learning_rate, + "decay_schedule_fn": self.decay_schedule_fn, + "warmup_steps": self.warmup_steps, + "power": self.power, + "name": self.name, + } + + +def create_optimizer( + init_lr: float, + num_train_steps: int, + num_warmup_steps: int, + min_lr_ratio: float = 0.0, + adam_beta1: float = 0.9, + adam_beta2: float = 0.999, + adam_epsilon: float = 1e-8, + adam_clipnorm: Optional[float] = None, + adam_global_clipnorm: Optional[float] = None, + weight_decay_rate: float = 0.0, + power: float = 1.0, + include_in_weight_decay: Optional[List[str]] = None, +): + """ + Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay. + + Args: + init_lr (`float`): + The desired learning rate at the end of the warmup phase. + num_train_steps (`int`): + The total number of training steps. + num_warmup_steps (`int`): + The number of warmup steps. + min_lr_ratio (`float`, *optional*, defaults to 0): + The final learning rate at the end of the linear decay will be `init_lr * min_lr_ratio`. + adam_beta1 (`float`, *optional*, defaults to 0.9): + The beta1 to use in Adam. + adam_beta2 (`float`, *optional*, defaults to 0.999): + The beta2 to use in Adam. + adam_epsilon (`float`, *optional*, defaults to 1e-8): + The epsilon to use in Adam. + adam_clipnorm (`float`, *optional*, defaults to `None`): + If not `None`, clip the gradient norm for each weight tensor to this value. + adam_global_clipnorm (`float`, *optional*, defaults to `None`) + If not `None`, clip gradient norm to this value. When using this argument, the norm is computed over all + weight tensors, as if they were concatenated into a single vector. + weight_decay_rate (`float`, *optional*, defaults to 0): + The weight decay to use. + power (`float`, *optional*, defaults to 1.0): + The power to use for PolynomialDecay. + include_in_weight_decay (`List[str]`, *optional*): + List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is + applied to all parameters except bias and layer norm parameters. + """ + # Implements linear decay of the learning rate. + lr_schedule = schedules.PolynomialDecay( + initial_learning_rate=init_lr, + decay_steps=num_train_steps - num_warmup_steps, + end_learning_rate=init_lr * min_lr_ratio, + power=power, + ) + if num_warmup_steps: + lr_schedule = WarmUp( + initial_learning_rate=init_lr, + decay_schedule_fn=lr_schedule, + warmup_steps=num_warmup_steps, + ) + if weight_decay_rate > 0.0: + optimizer = AdamWeightDecay( + learning_rate=lr_schedule, + weight_decay_rate=weight_decay_rate, + beta_1=adam_beta1, + beta_2=adam_beta2, + epsilon=adam_epsilon, + clipnorm=adam_clipnorm, + global_clipnorm=adam_global_clipnorm, + exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], + include_in_weight_decay=include_in_weight_decay, + ) + else: + optimizer = keras.optimizers.Adam( + learning_rate=lr_schedule, + beta_1=adam_beta1, + beta_2=adam_beta2, + epsilon=adam_epsilon, + clipnorm=adam_clipnorm, + global_clipnorm=adam_global_clipnorm, + ) + # We return the optimizer and the LR scheduler in order to better track the + # evolution of the LR independently of the optimizer. + return optimizer, lr_schedule + + +class AdamWeightDecay(Adam): + """ + Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the + loss function is *not* the correct way of using L2 regularization/weight decay with Adam, since that will interact + with the m and v parameters in strange ways as shown in [Decoupled Weight Decay + Regularization](https://arxiv.org/abs/1711.05101). + + Instead we want to decay the weights in a manner that doesn't interact with the m/v parameters. This is equivalent + to adding the square of the weights to the loss with plain (non-momentum) SGD. + + Args: + learning_rate (`Union[float, LearningRateSchedule]`, *optional*, defaults to 0.001): + The learning rate to use or a schedule. + beta_1 (`float`, *optional*, defaults to 0.9): + The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates. + beta_2 (`float`, *optional*, defaults to 0.999): + The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates. + epsilon (`float`, *optional*, defaults to 1e-07): + The epsilon parameter in Adam, which is a small constant for numerical stability. + amsgrad (`bool`, *optional*, defaults to `False`): + Whether to apply AMSGrad variant of this algorithm or not, see [On the Convergence of Adam and + Beyond](https://arxiv.org/abs/1904.09237). + weight_decay_rate (`float`, *optional*, defaults to 0.0): + The weight decay to apply. + include_in_weight_decay (`List[str]`, *optional*): + List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is + applied to all parameters by default (unless they are in `exclude_from_weight_decay`). + exclude_from_weight_decay (`List[str]`, *optional*): + List of the parameter names (or re patterns) to exclude from applying weight decay to. If a + `include_in_weight_decay` is passed, the names in it will supersede this list. + name (`str`, *optional*, defaults to `"AdamWeightDecay"`): + Optional name for the operations created when applying gradients. + kwargs (`Dict[str, Any]`, *optional*): + Keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by + norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time + inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use + `learning_rate` instead. + """ + + def __init__( + self, + learning_rate: Union[float, schedules.LearningRateSchedule] = 0.001, + beta_1: float = 0.9, + beta_2: float = 0.999, + epsilon: float = 1e-7, + amsgrad: bool = False, + weight_decay_rate: float = 0.0, + include_in_weight_decay: Optional[List[str]] = None, + exclude_from_weight_decay: Optional[List[str]] = None, + name: str = "AdamWeightDecay", + **kwargs, + ): + super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs) + self.weight_decay_rate = weight_decay_rate + self._include_in_weight_decay = include_in_weight_decay + self._exclude_from_weight_decay = exclude_from_weight_decay + + @classmethod + def from_config(cls, config): + """Creates an optimizer from its config with WarmUp custom object.""" + custom_objects = {"WarmUp": WarmUp} + return super(AdamWeightDecay, cls).from_config(config, custom_objects=custom_objects) + + def _prepare_local(self, var_device, var_dtype, apply_state): + super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state) + apply_state[(var_device, var_dtype)]["weight_decay_rate"] = tf.constant( + self.weight_decay_rate, name="adam_weight_decay_rate" + ) + + def _decay_weights_op(self, var, learning_rate, apply_state): + do_decay = self._do_use_weight_decay(var.name) + if do_decay: + return var.assign_sub( + learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"], + use_locking=self._use_locking, + ) + return tf.no_op() + + def apply_gradients(self, grads_and_vars, name=None, **kwargs): + grads, tvars = list(zip(*grads_and_vars)) + return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), name=name, **kwargs) + + def _get_lr(self, var_device, var_dtype, apply_state): + """Retrieves the learning rate with the given state.""" + if apply_state is None: + return self._decayed_lr_t[var_dtype], {} + + apply_state = apply_state or {} + coefficients = apply_state.get((var_device, var_dtype)) + if coefficients is None: + coefficients = self._fallback_apply_state(var_device, var_dtype) + apply_state[(var_device, var_dtype)] = coefficients + + return coefficients["lr_t"], {"apply_state": apply_state} + + def _resource_apply_dense(self, grad, var, apply_state=None): + lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) + decay = self._decay_weights_op(var, lr_t, apply_state) + with tf.control_dependencies([decay]): + return super(AdamWeightDecay, self)._resource_apply_dense(grad, var, **kwargs) + + def _resource_apply_sparse(self, grad, var, indices, apply_state=None): + lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) + decay = self._decay_weights_op(var, lr_t, apply_state) + with tf.control_dependencies([decay]): + return super(AdamWeightDecay, self)._resource_apply_sparse(grad, var, indices, **kwargs) + + def get_config(self): + config = super().get_config() + config.update({"weight_decay_rate": self.weight_decay_rate}) + return config + + def _do_use_weight_decay(self, param_name): + """Whether to use L2 weight decay for `param_name`.""" + if self.weight_decay_rate == 0: + return False + + if self._include_in_weight_decay: + for r in self._include_in_weight_decay: + if re.search(r, param_name) is not None: + return True + + if self._exclude_from_weight_decay: + for r in self._exclude_from_weight_decay: + if re.search(r, param_name) is not None: + return False + return True + + +# Extracted from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py +class GradientAccumulator: + """ + Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a + replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should + then call `.gradients`, scale the gradients if required, and pass the result to `apply_gradients`. + """ + + # We use the ON_READ synchronization policy so that no synchronization is + # performed on assignment. To get the value, we call .value() which returns the + # value on the current replica without synchronization. + + def __init__(self): + """Initializes the accumulator.""" + self._gradients = [] + self._accum_steps = None + + @property + def step(self): + """Number of accumulated steps.""" + if self._accum_steps is None: + self._accum_steps = tf.Variable( + tf.constant(0, dtype=tf.int64), + trainable=False, + synchronization=tf.VariableSynchronization.ON_READ, + aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, + ) + + return self._accum_steps.value() + + @property + def gradients(self): + """The accumulated gradients on the current replica.""" + if not self._gradients: + raise ValueError("The accumulator should be called first to initialize the gradients") + return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] + + def __call__(self, gradients): + """Accumulates `gradients` on the current replica.""" + if not self._gradients: + _ = self.step # Create the step variable. + self._gradients.extend( + [ + tf.Variable( + tf.zeros_like(gradient), + trainable=False, + synchronization=tf.VariableSynchronization.ON_READ, + aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, + ) + if gradient is not None + else gradient + for gradient in gradients + ] + ) + if len(gradients) != len(self._gradients): + raise ValueError(f"Expected {len(self._gradients)} gradients, but got {len(gradients)}") + + for accum_gradient, gradient in zip(self._gradients, gradients): + if accum_gradient is not None and gradient is not None: + accum_gradient.assign_add(gradient) + + self._accum_steps.assign_add(1) + + def reset(self): + """Resets the accumulated gradients on the current replica.""" + if not self._gradients: + return + self._accum_steps.assign(0) + for gradient in self._gradients: + if gradient is not None: + gradient.assign(tf.zeros_like(gradient)) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/testing_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/testing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8297cb981ef1fbb1a4d152cea348ed51b2ca811c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/testing_utils.py @@ -0,0 +1,2451 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import doctest +import functools +import importlib +import inspect +import logging +import multiprocessing +import os +import re +import shlex +import shutil +import subprocess +import sys +import tempfile +import time +import unittest +from collections import defaultdict +from collections.abc import Mapping +from functools import wraps +from io import StringIO +from pathlib import Path +from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union +from unittest import mock +from unittest.mock import patch + +import urllib3 + +from transformers import logging as transformers_logging + +from .integrations import ( + is_clearml_available, + is_optuna_available, + is_ray_available, + is_sigopt_available, + is_tensorboard_available, + is_wandb_available, +) +from .integrations.deepspeed import is_deepspeed_available +from .utils import ( + ACCELERATE_MIN_VERSION, + is_accelerate_available, + is_apex_available, + is_aqlm_available, + is_auto_awq_available, + is_auto_gptq_available, + is_av_available, + is_bitsandbytes_available, + is_bs4_available, + is_cv2_available, + is_cython_available, + is_decord_available, + is_detectron2_available, + is_essentia_available, + is_faiss_available, + is_flash_attn_2_available, + is_flax_available, + is_fsdp_available, + is_ftfy_available, + is_g2p_en_available, + is_galore_torch_available, + is_ipex_available, + is_jieba_available, + is_jinja_available, + is_jumanpp_available, + is_keras_nlp_available, + is_levenshtein_available, + is_librosa_available, + is_natten_available, + is_nltk_available, + is_onnx_available, + is_optimum_available, + is_pandas_available, + is_peft_available, + is_phonemizer_available, + is_pretty_midi_available, + is_pyctcdecode_available, + is_pytesseract_available, + is_pytest_available, + is_pytorch_quantization_available, + is_quanto_available, + is_rjieba_available, + is_sacremoses_available, + is_safetensors_available, + is_scipy_available, + is_sentencepiece_available, + is_seqio_available, + is_soundfile_availble, + is_spacy_available, + is_sudachi_available, + is_sudachi_projection_available, + is_tensorflow_probability_available, + is_tensorflow_text_available, + is_tf2onnx_available, + is_tf_available, + is_timm_available, + is_tokenizers_available, + is_torch_available, + is_torch_bf16_available_on_device, + is_torch_bf16_cpu_available, + is_torch_bf16_gpu_available, + is_torch_fp16_available_on_device, + is_torch_neuroncore_available, + is_torch_npu_available, + is_torch_sdpa_available, + is_torch_tensorrt_fx_available, + is_torch_tf32_available, + is_torch_xla_available, + is_torch_xpu_available, + is_torchaudio_available, + is_torchdynamo_available, + is_torchvision_available, + is_vision_available, + strtobool, +) + + +if is_accelerate_available(): + from accelerate.state import AcceleratorState, PartialState + + +if is_pytest_available(): + from _pytest.doctest import ( + Module, + _get_checker, + _get_continue_on_failure, + _get_runner, + _is_mocked, + _patch_unwrap_mock_aware, + get_optionflags, + ) + from _pytest.outcomes import skip + from _pytest.pathlib import import_path + from pytest import DoctestItem +else: + Module = object + DoctestItem = object + + +SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy" +DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown" +DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer" +# Used to test Auto{Config, Model, Tokenizer} model_type detection. + +# Used to test the hub +USER = "__DUMMY_TRANSFORMERS_USER__" +ENDPOINT_STAGING = "https://hub-ci.huggingface.co" + +# Not critical, only usable on the sandboxed CI instance. +TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL" + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = strtobool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +def parse_int_from_env(key, default=None): + try: + value = os.environ[key] + except KeyError: + _value = default + else: + try: + _value = int(value) + except ValueError: + raise ValueError(f"If set, {key} must be a int.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True) +_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True) +_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False) +_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False) +_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None) +_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True) +_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False) +_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False) + + +def is_pt_tf_cross_test(test_case): + """ + Decorator marking a test as a test that control interactions between PyTorch and TensorFlow. + + PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable + to a truthy value and selecting the is_pt_tf_cross_test pytest mark. + + """ + if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available(): + return unittest.skip("test is PT+TF test")(test_case) + else: + try: + import pytest # We don't need a hard dependency on pytest in the main library + except ImportError: + return test_case + else: + return pytest.mark.is_pt_tf_cross_test()(test_case) + + +def is_pt_flax_cross_test(test_case): + """ + Decorator marking a test as a test that control interactions between PyTorch and Flax + + PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment + variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark. + + """ + if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available(): + return unittest.skip("test is PT+FLAX test")(test_case) + else: + try: + import pytest # We don't need a hard dependency on pytest in the main library + except ImportError: + return test_case + else: + return pytest.mark.is_pt_flax_cross_test()(test_case) + + +def is_staging_test(test_case): + """ + Decorator marking a test as a staging test. + + Those tests will run using the staging environment of huggingface.co instead of the real model hub. + """ + if not _run_staging: + return unittest.skip("test is staging test")(test_case) + else: + try: + import pytest # We don't need a hard dependency on pytest in the main library + except ImportError: + return test_case + else: + return pytest.mark.is_staging_test()(test_case) + + +def is_pipeline_test(test_case): + """ + Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be + skipped. + """ + if not _run_pipeline_tests: + return unittest.skip("test is pipeline test")(test_case) + else: + try: + import pytest # We don't need a hard dependency on pytest in the main library + except ImportError: + return test_case + else: + return pytest.mark.is_pipeline_test()(test_case) + + +def is_tool_test(test_case): + """ + Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped. + """ + if not _run_tool_tests: + return unittest.skip("test is a tool test")(test_case) + else: + try: + import pytest # We don't need a hard dependency on pytest in the main library + except ImportError: + return test_case + else: + return pytest.mark.is_tool_test()(test_case) + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def tooslow(test_case): + """ + Decorator marking a test as too slow. + + Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as + these will not be tested by the CI. + + """ + return unittest.skip("test is too slow")(test_case) + + +def custom_tokenizers(test_case): + """ + Decorator marking a test for a custom tokenizer. + + Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS + environment variable to a truthy value to run them. + """ + return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case) + + +def require_bs4(test_case): + """ + Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed. + """ + return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case) + + +def require_galore_torch(test_case): + """ + Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed. + https://github.com/jiaweizzhao/GaLore + """ + return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case) + + +def require_cv2(test_case): + """ + Decorator marking a test that requires OpenCV. + + These tests are skipped when OpenCV isn't installed. + + """ + return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case) + + +def require_levenshtein(test_case): + """ + Decorator marking a test that requires Levenshtein. + + These tests are skipped when Levenshtein isn't installed. + + """ + return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case) + + +def require_nltk(test_case): + """ + Decorator marking a test that requires NLTK. + + These tests are skipped when NLTK isn't installed. + + """ + return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case) + + +def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION): + """ + Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed. + """ + return unittest.skipUnless( + is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}" + )(test_case) + + +def require_fsdp(test_case, min_version: str = "1.12.0"): + """ + Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed. + """ + return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")( + test_case + ) + + +def require_g2p_en(test_case): + """ + Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed. + """ + return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case) + + +def require_safetensors(test_case): + """ + Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed. + """ + return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case) + + +def require_rjieba(test_case): + """ + Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed. + """ + return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case) + + +def require_jieba(test_case): + """ + Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed. + """ + return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case) + + +def require_jinja(test_case): + """ + Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed. + """ + return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case) + + +def require_tf2onnx(test_case): + return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case) + + +def require_onnx(test_case): + return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires Timm. + + These tests are skipped when Timm isn't installed. + + """ + return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case) + + +def require_natten(test_case): + """ + Decorator marking a test that requires NATTEN. + + These tests are skipped when NATTEN isn't installed. + + """ + return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. + + These tests are skipped when PyTorch isn't installed. + + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_flash_attn(test_case): + """ + Decorator marking a test that requires Flash Attention. + + These tests are skipped when Flash Attention isn't installed. + + """ + return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case) + + +def require_torch_sdpa(test_case): + """ + Decorator marking a test that requires PyTorch's SDPA. + + These tests are skipped when requirements are not met (torch version). + """ + return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case) + + +def require_read_token(fn): + """ + A decorator that loads the HF token for tests that require to load gated models. + """ + token = os.getenv("HF_HUB_READ_TOKEN") + + @wraps(fn) + def _inner(*args, **kwargs): + with patch("huggingface_hub.utils._headers.get_token", return_value=token): + return fn(*args, **kwargs) + + return _inner + + +def require_peft(test_case): + """ + Decorator marking a test that requires PEFT. + + These tests are skipped when PEFT isn't installed. + + """ + return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case) + + +def require_torchvision(test_case): + """ + Decorator marking a test that requires Torchvision. + + These tests are skipped when Torchvision isn't installed. + + """ + return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case) + + +def require_torch_or_tf(test_case): + """ + Decorator marking a test that requires PyTorch or TensorFlow. + + These tests are skipped when neither PyTorch not TensorFlow is installed. + + """ + return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")( + test_case + ) + + +def require_intel_extension_for_pytorch(test_case): + """ + Decorator marking a test that requires Intel Extension for PyTorch. + + These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch + version. + + """ + return unittest.skipUnless( + is_ipex_available(), + "test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see" + " https://github.com/intel/intel-extension-for-pytorch", + )(test_case) + + +def require_tensorflow_probability(test_case): + """ + Decorator marking a test that requires TensorFlow probability. + + These tests are skipped when TensorFlow probability isn't installed. + + """ + return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")( + test_case + ) + + +def require_torchaudio(test_case): + """ + Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed. + """ + return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case) + + +def require_tf(test_case): + """ + Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed. + """ + return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_sentencepiece(test_case): + """ + Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed. + """ + return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case) + + +def require_sacremoses(test_case): + """ + Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed. + """ + return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case) + + +def require_seqio(test_case): + """ + Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed. + """ + return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case) + + +def require_scipy(test_case): + """ + Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed. + """ + return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case) + + +def require_tokenizers(test_case): + """ + Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed. + """ + return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case) + + +def require_tensorflow_text(test_case): + """ + Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't + installed. + """ + return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case) + + +def require_keras_nlp(test_case): + """ + Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed. + """ + return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case) + + +def require_pandas(test_case): + """ + Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed. + """ + return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case) + + +def require_pytesseract(test_case): + """ + Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed. + """ + return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case) + + +def require_pytorch_quantization(test_case): + """ + Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch + Quantization Toolkit isn't installed. + """ + return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")( + test_case + ) + + +def require_vision(test_case): + """ + Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't + installed. + """ + return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case) + + +def require_ftfy(test_case): + """ + Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed. + """ + return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case) + + +def require_spacy(test_case): + """ + Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed. + """ + return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case) + + +def require_decord(test_case): + """ + Decorator marking a test that requires decord. These tests are skipped when decord isn't installed. + """ + return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case) + + +def require_torch_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without + multiple GPUs. + + To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_torch_multi_accelerator(test_case): + """ + Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine + without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain + multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")( + test_case + ) + + +def require_torch_non_multi_gpu(test_case): + """ + Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch). + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case) + + +def require_torch_non_multi_accelerator(test_case): + """ + Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch). + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case) + + +def require_torch_up_to_2_gpus(test_case): + """ + Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch). + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case) + + +def require_torch_up_to_2_accelerators(test_case): + """ + Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch). + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators") + (test_case) + + +def require_torch_xla(test_case): + """ + Decorator marking a test that requires TorchXLA (in PyTorch). + """ + return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case) + + +def require_torch_neuroncore(test_case): + """ + Decorator marking a test that requires NeuronCore (in PyTorch). + """ + return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")( + test_case + ) + + +def require_torch_npu(test_case): + """ + Decorator marking a test that requires NPU (in PyTorch). + """ + return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case) + + +def require_torch_multi_npu(test_case): + """ + Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without + multiple NPUs. + + To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu" + """ + if not is_torch_npu_available(): + return unittest.skip("test requires PyTorch NPU")(test_case) + + return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case) + + +def require_torch_xpu(test_case): + """ + Decorator marking a test that requires XPU and IPEX. + + These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch + version. + """ + return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case) + + +def require_torch_multi_xpu(test_case): + """ + Decorator marking a test that requires a multi-XPU setup with IPEX and at least one XPU device. These tests are + skipped on a machine without IPEX or multiple XPUs. + + To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu" + """ + if not is_torch_xpu_available(): + return unittest.skip("test requires IPEX and at least one XPU device")(test_case) + + return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case) + + +if is_torch_available(): + # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode + import torch + + if "TRANSFORMERS_TEST_BACKEND" in os.environ: + backend = os.environ["TRANSFORMERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its" + f" traceback):\n{e}" + ) from e + + if "TRANSFORMERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"] + if torch_device == "cuda" and not torch.cuda.is_available(): + raise ValueError( + f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment." + ) + if torch_device == "xpu" and not is_torch_xpu_available(): + raise ValueError( + f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment." + ) + if torch_device == "npu" and not is_torch_npu_available(): + raise ValueError( + f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment." + ) + + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}" + ) from e + elif torch.cuda.is_available(): + torch_device = "cuda" + elif _run_third_party_device_tests and is_torch_npu_available(): + torch_device = "npu" + elif _run_third_party_device_tests and is_torch_xpu_available(): + torch_device = "xpu" + else: + torch_device = "cpu" +else: + torch_device = None + +if is_tf_available(): + import tensorflow as tf + +if is_flax_available(): + import jax + + jax_device = jax.default_backend() +else: + jax_device = None + + +def require_torchdynamo(test_case): + """Decorator marking a test that requires TorchDynamo""" + return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case) + + +def require_torch_tensorrt_fx(test_case): + """Decorator marking a test that requires Torch-TensorRT FX""" + return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case) + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case) + + +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accessible accelerator and PyTorch.""" + return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")( + test_case + ) + + +def require_torch_fp16(test_case): + """Decorator marking a test that requires a device that supports fp16""" + return unittest.skipUnless( + is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support" + )(test_case) + + +def require_torch_bf16(test_case): + """Decorator marking a test that requires a device that supports bf16""" + return unittest.skipUnless( + is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support" + )(test_case) + + +def require_torch_bf16_gpu(test_case): + """Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0""" + return unittest.skipUnless( + is_torch_bf16_gpu_available(), + "test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0", + )(test_case) + + +def require_torch_bf16_cpu(test_case): + """Decorator marking a test that requires torch>=1.10, using CPU.""" + return unittest.skipUnless( + is_torch_bf16_cpu_available(), + "test requires torch>=1.10, using CPU", + )(test_case) + + +def require_torch_tf32(test_case): + """Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7.""" + return unittest.skipUnless( + is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7" + )(test_case) + + +def require_detectron2(test_case): + """Decorator marking a test that requires detectron2.""" + return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case) + + +def require_faiss(test_case): + """Decorator marking a test that requires faiss.""" + return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case) + + +def require_optuna(test_case): + """ + Decorator marking a test that requires optuna. + + These tests are skipped when optuna isn't installed. + + """ + return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case) + + +def require_ray(test_case): + """ + Decorator marking a test that requires Ray/tune. + + These tests are skipped when Ray/tune isn't installed. + + """ + return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case) + + +def require_sigopt(test_case): + """ + Decorator marking a test that requires SigOpt. + + These tests are skipped when SigOpt isn't installed. + + """ + return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case) + + +def require_wandb(test_case): + """ + Decorator marking a test that requires wandb. + + These tests are skipped when wandb isn't installed. + + """ + return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case) + + +def require_clearml(test_case): + """ + Decorator marking a test requires clearml. + + These tests are skipped when clearml isn't installed. + + """ + return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case) + + +def require_soundfile(test_case): + """ + Decorator marking a test that requires soundfile + + These tests are skipped when soundfile isn't installed. + + """ + return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case) + + +def require_deepspeed(test_case): + """ + Decorator marking a test that requires deepspeed + """ + return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case) + + +def require_apex(test_case): + """ + Decorator marking a test that requires apex + """ + return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case) + + +def require_aqlm(test_case): + """ + Decorator marking a test that requires aqlm + """ + return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case) + + +def require_av(test_case): + """ + Decorator marking a test that requires av + """ + return unittest.skipUnless(is_av_available(), "test requires av")(test_case) + + +def require_bitsandbytes(test_case): + """ + Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed. + """ + if is_bitsandbytes_available() and is_torch_available(): + try: + import pytest + + return pytest.mark.bitsandbytes(test_case) + except ImportError: + return test_case + else: + return unittest.skip("test requires bitsandbytes and torch")(test_case) + + +def require_optimum(test_case): + """ + Decorator for optimum dependency + """ + return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case) + + +def require_tensorboard(test_case): + """ + Decorator for `tensorboard` dependency + """ + return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard") + + +def require_auto_gptq(test_case): + """ + Decorator for auto_gptq dependency + """ + return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case) + + +def require_auto_awq(test_case): + """ + Decorator for auto_awq dependency + """ + return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case) + + +def require_quanto(test_case): + """ + Decorator for quanto dependency + """ + return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case) + + +def require_phonemizer(test_case): + """ + Decorator marking a test that requires phonemizer + """ + return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case) + + +def require_pyctcdecode(test_case): + """ + Decorator marking a test that requires pyctcdecode + """ + return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case) + + +def require_librosa(test_case): + """ + Decorator marking a test that requires librosa + """ + return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case) + + +def require_essentia(test_case): + """ + Decorator marking a test that requires essentia + """ + return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case) + + +def require_pretty_midi(test_case): + """ + Decorator marking a test that requires pretty_midi + """ + return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case) + + +def cmd_exists(cmd): + return shutil.which(cmd) is not None + + +def require_usr_bin_time(test_case): + """ + Decorator marking a test that requires `/usr/bin/time` + """ + return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case) + + +def require_sudachi(test_case): + """ + Decorator marking a test that requires sudachi + """ + return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case) + + +def require_sudachi_projection(test_case): + """ + Decorator marking a test that requires sudachi_projection + """ + return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")( + test_case + ) + + +def require_jumanpp(test_case): + """ + Decorator marking a test that requires jumanpp + """ + return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case) + + +def require_cython(test_case): + """ + Decorator marking a test that requires jumanpp + """ + return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case) + + +def get_gpu_count(): + """ + Return the number of available gpus (regardless of whether torch, tf or jax is used) + """ + if is_torch_available(): + import torch + + return torch.cuda.device_count() + elif is_tf_available(): + import tensorflow as tf + + return len(tf.config.list_physical_devices("GPU")) + elif is_flax_available(): + import jax + + return jax.device_count() + else: + return 0 + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return os.path.join(tests_dir, append_path) + else: + return tests_dir + + +# +# Helper functions for dealing with testing text outputs +# The original code came from: +# https://github.com/fastai/fastai/blob/master/tests/utils/text.py + + +# When any function contains print() calls that get overwritten, like progress bars, +# a special care needs to be applied, since under pytest -s captured output (capsys +# or contextlib.redirect_stdout) contains any temporary printed strings, followed by +# \r's. This helper function ensures that the buffer will contain the same output +# with and without -s in pytest, by turning: +# foo bar\r tar mar\r final message +# into: +# final message +# it can handle a single string or a multiline buffer +def apply_print_resets(buf): + return re.sub(r"^.*\r", "", buf, 0, re.M) + + +def assert_screenout(out, what): + out_pr = apply_print_resets(out).lower() + match_str = out_pr.find(what.lower()) + assert match_str != -1, f"expecting to find {what} in output: f{out_pr}" + + +class CaptureStd: + """ + Context manager to capture: + + - stdout: replay it, clean it up and make it available via `obj.out` + - stderr: replay it and make it available via `obj.err` + + Args: + out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not. + err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not. + replay (`bool`, *optional*, defaults to `True`): Whether to replay or not. + By default each captured stream gets replayed back on context's exit, so that one can see what the test was + doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to + disable this feature. + + Examples: + + ```python + # to capture stdout only with auto-replay + with CaptureStdout() as cs: + print("Secret message") + assert "message" in cs.out + + # to capture stderr only with auto-replay + import sys + + with CaptureStderr() as cs: + print("Warning: ", file=sys.stderr) + assert "Warning" in cs.err + + # to capture both streams with auto-replay + with CaptureStd() as cs: + print("Secret message") + print("Warning: ", file=sys.stderr) + assert "message" in cs.out + assert "Warning" in cs.err + + # to capture just one of the streams, and not the other, with auto-replay + with CaptureStd(err=False) as cs: + print("Secret message") + assert "message" in cs.out + # but best use the stream-specific subclasses + + # to capture without auto-replay + with CaptureStd(replay=False) as cs: + print("Secret message") + assert "message" in cs.out + ```""" + + def __init__(self, out=True, err=True, replay=True): + self.replay = replay + + if out: + self.out_buf = StringIO() + self.out = "error: CaptureStd context is unfinished yet, called too early" + else: + self.out_buf = None + self.out = "not capturing stdout" + + if err: + self.err_buf = StringIO() + self.err = "error: CaptureStd context is unfinished yet, called too early" + else: + self.err_buf = None + self.err = "not capturing stderr" + + def __enter__(self): + if self.out_buf: + self.out_old = sys.stdout + sys.stdout = self.out_buf + + if self.err_buf: + self.err_old = sys.stderr + sys.stderr = self.err_buf + + return self + + def __exit__(self, *exc): + if self.out_buf: + sys.stdout = self.out_old + captured = self.out_buf.getvalue() + if self.replay: + sys.stdout.write(captured) + self.out = apply_print_resets(captured) + + if self.err_buf: + sys.stderr = self.err_old + captured = self.err_buf.getvalue() + if self.replay: + sys.stderr.write(captured) + self.err = captured + + def __repr__(self): + msg = "" + if self.out_buf: + msg += f"stdout: {self.out}\n" + if self.err_buf: + msg += f"stderr: {self.err}\n" + return msg + + +# in tests it's the best to capture only the stream that's wanted, otherwise +# it's easy to miss things, so unless you need to capture both streams, use the +# subclasses below (less typing). Or alternatively, configure `CaptureStd` to +# disable the stream you don't need to test. + + +class CaptureStdout(CaptureStd): + """Same as CaptureStd but captures only stdout""" + + def __init__(self, replay=True): + super().__init__(err=False, replay=replay) + + +class CaptureStderr(CaptureStd): + """Same as CaptureStd but captures only stderr""" + + def __init__(self, replay=True): + super().__init__(out=False, replay=replay) + + +class CaptureLogger: + """ + Context manager to capture `logging` streams + + Args: + logger: 'logging` logger object + + Returns: + The captured output is available via `self.out` + + Example: + + ```python + >>> from transformers import logging + >>> from transformers.testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("transformers.models.bart.tokenization_bart") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +@contextlib.contextmanager +def LoggingLevel(level): + """ + This is a context manager to temporarily change transformers modules logging level to the desired value and have it + restored to the original setting at the end of the scope. + + Example: + + ```python + with LoggingLevel(logging.INFO): + AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times + ``` + """ + orig_level = transformers_logging.get_verbosity() + try: + transformers_logging.set_verbosity(level) + yield + finally: + transformers_logging.set_verbosity(orig_level) + + +@contextlib.contextmanager +# adapted from https://stackoverflow.com/a/64789046/9201239 +def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]: + """ + Temporary add given path to `sys.path`. + + Usage : + + ```python + with ExtendSysPath("/path/to/dir"): + mymodule = importlib.import_module("mymodule") + ``` + """ + + path = os.fspath(path) + try: + sys.path.insert(0, path) + yield + finally: + sys.path.remove(path) + + +class TestCasePlus(unittest.TestCase): + """ + This class extends *unittest.TestCase* with additional features. + + Feature 1: A set of fully resolved important file and dir path accessors. + + In tests often we need to know where things are relative to the current test file, and it's not trivial since the + test could be invoked from more than one directory or could reside in sub-directories with different depths. This + class solves this problem by sorting out all the basic paths and provides easy accessors to them: + + - `pathlib` objects (all fully resolved): + + - `test_file_path` - the current test file path (=`__file__`) + - `test_file_dir` - the directory containing the current test file + - `tests_dir` - the directory of the `tests` test suite + - `examples_dir` - the directory of the `examples` test suite + - `repo_root_dir` - the directory of the repository + - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides) + + - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects: + + - `test_file_path_str` + - `test_file_dir_str` + - `tests_dir_str` + - `examples_dir_str` + - `repo_root_dir_str` + - `src_dir_str` + + Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test. + + 1. Create a unique temporary dir: + + ```python + def test_whatever(self): + tmp_dir = self.get_auto_remove_tmp_dir() + ``` + + `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the + test. + + + 2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't + empty it after the test. + + ```python + def test_whatever(self): + tmp_dir = self.get_auto_remove_tmp_dir("./xxx") + ``` + + This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests + didn't leave any data in there. + + 3. You can override the first two options by directly overriding the `before` and `after` args, leading to the + following behavior: + + `before=True`: the temporary dir will always be cleared at the beginning of the test. + + `before=False`: if the temporary dir already existed, any existing files will remain there. + + `after=True`: the temporary dir will always be deleted at the end of the test. + + `after=False`: the temporary dir will always be left intact at the end of the test. + + Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are + allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem + will get nuked. i.e. please always pass paths that start with `./` + + Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested + otherwise. + + Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This + is useful for invoking external programs from the test suite - e.g. distributed training. + + + ```python + def test_whatever(self): + env = self.get_env() + ```""" + + def setUp(self): + # get_auto_remove_tmp_dir feature: + self.teardown_tmp_dirs = [] + + # figure out the resolved paths for repo_root, tests, examples, etc. + self._test_file_path = inspect.getfile(self.__class__) + path = Path(self._test_file_path).resolve() + self._test_file_dir = path.parents[0] + for up in [1, 2, 3]: + tmp_dir = path.parents[up] + if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir(): + break + if tmp_dir: + self._repo_root_dir = tmp_dir + else: + raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}") + self._tests_dir = self._repo_root_dir / "tests" + self._examples_dir = self._repo_root_dir / "examples" + self._src_dir = self._repo_root_dir / "src" + + @property + def test_file_path(self): + return self._test_file_path + + @property + def test_file_path_str(self): + return str(self._test_file_path) + + @property + def test_file_dir(self): + return self._test_file_dir + + @property + def test_file_dir_str(self): + return str(self._test_file_dir) + + @property + def tests_dir(self): + return self._tests_dir + + @property + def tests_dir_str(self): + return str(self._tests_dir) + + @property + def examples_dir(self): + return self._examples_dir + + @property + def examples_dir_str(self): + return str(self._examples_dir) + + @property + def repo_root_dir(self): + return self._repo_root_dir + + @property + def repo_root_dir_str(self): + return str(self._repo_root_dir) + + @property + def src_dir(self): + return self._src_dir + + @property + def src_dir_str(self): + return str(self._src_dir) + + def get_env(self): + """ + Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's + invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training. + + It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally + the preset `PYTHONPATH` if any (all full resolved paths). + + """ + env = os.environ.copy() + paths = [self.src_dir_str] + if "/examples" in self.test_file_dir_str: + paths.append(self.examples_dir_str) + else: + paths.append(self.tests_dir_str) + paths.append(env.get("PYTHONPATH", "")) + + env["PYTHONPATH"] = ":".join(paths) + return env + + def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None): + """ + Args: + tmp_dir (`string`, *optional*): + if `None`: + + - a unique temporary path will be created + - sets `before=True` if `before` is `None` + - sets `after=True` if `after` is `None` + else: + + - `tmp_dir` will be created + - sets `before=True` if `before` is `None` + - sets `after=False` if `after` is `None` + before (`bool`, *optional*): + If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the + `tmp_dir` already exists, any existing files will remain there. + after (`bool`, *optional*): + If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents + intact at the end of the test. + + Returns: + tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir + """ + if tmp_dir is not None: + # defining the most likely desired behavior for when a custom path is provided. + # this most likely indicates the debug mode where we want an easily locatable dir that: + # 1. gets cleared out before the test (if it already exists) + # 2. is left intact after the test + if before is None: + before = True + if after is None: + after = False + + # using provided path + path = Path(tmp_dir).resolve() + + # to avoid nuking parts of the filesystem, only relative paths are allowed + if not tmp_dir.startswith("./"): + raise ValueError( + f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`" + ) + + # ensure the dir is empty to start with + if before is True and path.exists(): + shutil.rmtree(tmp_dir, ignore_errors=True) + + path.mkdir(parents=True, exist_ok=True) + + else: + # defining the most likely desired behavior for when a unique tmp path is auto generated + # (not a debug mode), here we require a unique tmp dir that: + # 1. is empty before the test (it will be empty in this situation anyway) + # 2. gets fully removed after the test + if before is None: + before = True + if after is None: + after = True + + # using unique tmp dir (always empty, regardless of `before`) + tmp_dir = tempfile.mkdtemp() + + if after is True: + # register for deletion + self.teardown_tmp_dirs.append(tmp_dir) + + return tmp_dir + + def python_one_liner_max_rss(self, one_liner_str): + """ + Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the + program. + + Args: + one_liner_str (`string`): + a python one liner code that gets passed to `python -c` + + Returns: + max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run. + + Requirements: + this helper needs `/usr/bin/time` to be installed (`apt install time`) + + Example: + + ``` + one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")' + max_rss = self.python_one_liner_max_rss(one_liner_str) + ``` + """ + + if not cmd_exists("/usr/bin/time"): + raise ValueError("/usr/bin/time is required, install with `apt install time`") + + cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'") + with CaptureStd() as cs: + execute_subprocess_async(cmd, env=self.get_env()) + # returned data is in KB so convert to bytes + max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024 + return max_rss + + def tearDown(self): + # get_auto_remove_tmp_dir feature: remove registered temp dirs + for path in self.teardown_tmp_dirs: + shutil.rmtree(path, ignore_errors=True) + self.teardown_tmp_dirs = [] + if is_accelerate_available(): + AcceleratorState._reset_state() + PartialState._reset_state() + + # delete all the env variables having `ACCELERATE` in them + for k in list(os.environ.keys()): + if "ACCELERATE" in k: + del os.environ[k] + + +def mockenv(**kwargs): + """ + this is a convenience wrapper, that allows this :: + + @mockenv(RUN_SLOW=True, USE_TF=False) def test_something(): + run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False) + + """ + return mock.patch.dict(os.environ, kwargs) + + +# from https://stackoverflow.com/a/34333710/9201239 +@contextlib.contextmanager +def mockenv_context(*remove, **update): + """ + Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv + + The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations. + + Args: + remove: Environment variables to remove. + update: Dictionary of environment variables and values to add/update. + """ + env = os.environ + update = update or {} + remove = remove or [] + + # List of environment variables being updated or removed. + stomped = (set(update.keys()) | set(remove)) & set(env.keys()) + # Environment variables and values to restore on exit. + update_after = {k: env[k] for k in stomped} + # Environment variables and values to remove on exit. + remove_after = frozenset(k for k in update if k not in env) + + try: + env.update(update) + [env.pop(k, None) for k in remove] + yield + finally: + env.update(update_after) + [env.pop(k) for k in remove_after] + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal + changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-` + plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = f"reports/{id}" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + + # Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it + # takes > 10 minutes (as this part doesn't generate any output on the terminal). + # (also, it seems there is no useful information in this report, and we rarely need to read it) + # with open(report_files["passes"], "w") as f: + # tr._tw = create_terminal_writer(config, f) + # tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# --- distributed testing functions --- # + +# adapted from https://stackoverflow.com/a/59041913/9201239 +import asyncio # noqa + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + _read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")), + _read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + # check that the subprocess actually did run and produced some output, should the test rely on + # the remote side to do the testing + if not result.stdout and not result.stderr: + raise RuntimeError(f"'{cmd_str}' produced no output.") + + return result + + +def pytest_xdist_worker_id(): + """ + Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 + if `-n 1` or `pytest-xdist` isn't being used. + """ + worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") + worker = re.sub(r"^gw", "", worker, 0, re.M) + return int(worker) + + +def get_torch_dist_unique_port(): + """ + Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument. + + Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same + port at once. + """ + port = 29500 + uniq_delta = pytest_xdist_worker_id() + return port + uniq_delta + + +def nested_simplify(obj, decimals=3): + """ + Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test + within tests. + """ + import numpy as np + + if isinstance(obj, list): + return [nested_simplify(item, decimals) for item in obj] + if isinstance(obj, tuple): + return tuple([nested_simplify(item, decimals) for item in obj]) + elif isinstance(obj, np.ndarray): + return nested_simplify(obj.tolist()) + elif isinstance(obj, Mapping): + return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()} + elif isinstance(obj, (str, int, np.int64)): + return obj + elif obj is None: + return obj + elif is_torch_available() and isinstance(obj, torch.Tensor): + return nested_simplify(obj.tolist(), decimals) + elif is_tf_available() and tf.is_tensor(obj): + return nested_simplify(obj.numpy().tolist()) + elif isinstance(obj, float): + return round(obj, decimals) + elif isinstance(obj, (np.int32, np.float32)): + return nested_simplify(obj.item(), decimals) + else: + raise Exception(f"Not supported: {type(obj)}") + + +def check_json_file_has_correct_format(file_path): + with open(file_path, "r") as f: + lines = f.readlines() + if len(lines) == 1: + # length can only be 1 if dict is empty + assert lines[0] == "{}" + else: + # otherwise make sure json has correct format (at least 3 lines) + assert len(lines) >= 3 + # each key one line, ident should be 2, min length is 3 + assert lines[0].strip() == "{" + for line in lines[1:-1]: + left_indent = len(lines[1]) - len(lines[1].lstrip()) + assert left_indent == 2 + assert lines[-1].strip() == "}" + + +def to_2tuple(x): + if isinstance(x, collections.abc.Iterable): + return x + return (x, x) + + +# These utils relate to ensuring the right error message is received when running scripts +class SubprocessCallException(Exception): + pass + + +def run_command(command: List[str], return_stdout=False): + """ + Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture + if an error occured while running `command` + """ + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT) + if return_stdout: + if hasattr(output, "decode"): + output = output.decode("utf-8") + return output + except subprocess.CalledProcessError as e: + raise SubprocessCallException( + f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" + ) from e + + +class RequestCounter: + """ + Helper class that will count all requests made online. + + Might not be robust if urllib3 changes its logging format but should be good enough for us. + + Usage: + ```py + with RequestCounter() as counter: + _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") + assert counter["GET"] == 0 + assert counter["HEAD"] == 1 + assert counter.total_calls == 1 + ``` + """ + + def __enter__(self): + self._counter = defaultdict(int) + self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug) + self.mock = self.patcher.start() + return self + + def __exit__(self, *args, **kwargs) -> None: + for call in self.mock.call_args_list: + log = call.args[0] % call.args[1:] + for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"): + if method in log: + self._counter[method] += 1 + break + self.patcher.stop() + + def __getitem__(self, key: str) -> int: + return self._counter[key] + + @property + def total_calls(self) -> int: + return sum(self._counter.values()) + + +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests. They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(test_func_ref): + @functools.wraps(test_func_ref) + def wrapper(*args, **kwargs): + retry_count = 1 + + while retry_count < max_attempts: + try: + return test_func_ref(*args, **kwargs) + + except Exception as err: + print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return test_func_ref(*args, **kwargs) + + return wrapper + + return decorator + + +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f'{results["error"]}') + + +""" +The following contains utils to run the documentation tests without having to overwrite any files. + +The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is +made as a print would otherwise fail the corresonding line. + +To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules +""" + + +def preprocess_string(string, skip_cuda_tests): + """Prepare a docstring or a `.md` file to be run by doctest. + + The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of + its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a + cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for + `string`. + """ + codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)" + codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string) + is_cuda_found = False + for i, codeblock in enumerate(codeblocks): + if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock: + codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock) + if ( + (">>>" in codeblock or "..." in codeblock) + and re.search(r"cuda|to\(0\)|device=0", codeblock) + and skip_cuda_tests + ): + is_cuda_found = True + break + + modified_string = "" + if not is_cuda_found: + modified_string = "".join(codeblocks) + + return modified_string + + +class HfDocTestParser(doctest.DocTestParser): + """ + Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This + means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also + added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line. + + Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough. + """ + + # This regular expression is used to find doctest examples in a + # string. It defines three groups: `source` is the source code + # (including leading indentation and prompts); `indent` is the + # indentation of the first (PS1) line of the source code; and + # `want` is the expected output (including leading indentation). + # fmt: off + _EXAMPLE_RE = re.compile(r''' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + # !!!!!!!!!!! HF Specific !!!!!!!!!!! + (?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line) + # !!!!!!!!!!! HF Specific !!!!!!!!!!! + (?:\n|$) # Match a new line or end of string + )*) + ''', re.MULTILINE | re.VERBOSE + ) + # fmt: on + + # !!!!!!!!!!! HF Specific !!!!!!!!!!! + skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False)) + # !!!!!!!!!!! HF Specific !!!!!!!!!!! + + def parse(self, string, name=""): + """ + Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before + calling `super().parse` + """ + string = preprocess_string(string, self.skip_cuda_tests) + return super().parse(string, name) + + +class HfDoctestModule(Module): + """ + Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering + tests. + """ + + def collect(self) -> Iterable[DoctestItem]: + class MockAwareDocTestFinder(doctest.DocTestFinder): + """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug. + + https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532 + """ + + def _find_lineno(self, obj, source_lines): + """Doctest code does not take into account `@property`, this + is a hackish way to fix it. https://bugs.python.org/issue17446 + + Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be + reported upstream. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None: + if _is_mocked(obj): + return + with _patch_unwrap_mock_aware(): + # Type ignored because this is a private function. + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen + ) + + if self.path.name == "conftest.py": + module = self.config.pluginmanager._importconftest( + self.path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, + ) + else: + try: + module = import_path( + self.path, + root=self.config.rootpath, + mode=self.config.getoption("importmode"), + ) + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + skip("unable to import module %r" % self.path) + else: + raise + + # !!!!!!!!!!! HF Specific !!!!!!!!!!! + finder = MockAwareDocTestFinder(parser=HfDocTestParser()) + # !!!!!!!!!!! HF Specific !!!!!!!!!!! + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests and cuda + yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test) + + +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against `None` + # instead at user level. + if fn is None: + return None + return fn(*args, **kwargs) + + +if is_torch_available(): + # Mappings from device names to callable functions to support device agnostic + # testing. + BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed} + BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None} + BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1} + + +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +if is_torch_available(): + # If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries + # into device to function mappings. + if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError( + f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}" + ) + + # Try to strip extension for later import – also verifies we are importing a + # python file. + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + # Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early. + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError as e: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e + + if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tf_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..75e302947e8066d0a29a77abf641c7409e6d2ec1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/tf_utils.py @@ -0,0 +1,267 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Union + +import numpy as np +import tensorflow as tf + +from .feature_extraction_utils import BatchFeature +from .tokenization_utils_base import BatchEncoding +from .utils import logging + + +logger = logging.get_logger(__name__) + + +def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: + """ + Deal with dynamic shape in tensorflow cleanly. + + Args: + tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. + + Returns: + `List[int]`: The shape of the tensor as a list. + """ + if isinstance(tensor, np.ndarray): + return list(tensor.shape) + + dynamic = tf.shape(tensor) + + if tensor.shape == tf.TensorShape(None): + return dynamic + + static = tensor.shape.as_list() + + return [dynamic[i] if s is None else s for i, s in enumerate(static)] + + +def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor: + """ + Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is + meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be + removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that + `softmax(x) = softmax(x + c)` (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html). + + Args: + logits (`tf.Tensor`): + Must be one of the following types: half, float32, float64. + axis (`int`, *optional*): + The dimension softmax would be performed on. The default is -1 which indicates the last dimension. + name (`str`, *optional*): + A name for the operation. + + Returns: + `tf.Tensor`: + A Tensor. Has the same type and shape as logits. + """ + # TODO: When the issue linked above gets sorted, add a check on TF version here and use the original function if + # it has the fix. After we drop the support for unfixed versions, remove this function. + return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name) + + +def functional_layernorm(inputs, weight, bias, epsilon=1e-5, axis=-1): + # This is a very simplified functional layernorm, designed to duplicate + # the functionality of PyTorch nn.functional.layer_norm when this is needed to port + # models in Transformers. + + if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(axis, int): + raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis.") + + # Get mean and variance on the axis to be normalized + mean, variance = tf.nn.moments(inputs, axes=[axis], keepdims=True) + + if axis != -1: + # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions + # on every dimension except axis + shape = [1] * inputs.shape.rank + shape[axis] = shape_list(inputs)[axis] + weight = tf.reshape(weight, shape) + bias = tf.reshape(bias, shape) + + # Compute layer normalization using the batch_normalization + # function. + outputs = tf.nn.batch_normalization( + inputs, + mean, + variance, + offset=bias, + scale=weight, + variance_epsilon=epsilon, + ) + return outputs + + +def flatten(input, start_dim=0, end_dim=-1): + # Replicates the behavior of torch.flatten in TF + + # If end_dim or start_dim is negative, count them from the end + if end_dim < 0: + end_dim += input.shape.rank + if start_dim < 0: + start_dim += input.shape.rank + + if start_dim == end_dim: + return input + + in_shape = tf.shape(input) + flattened_dim = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1]) + out_shape = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0) + return tf.reshape(input, out_shape) + + +def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor: + """ + Invert an attention mask (e.g., switches 0. and 1.). + + Args: + encoder_attention_mask (`torch.Tensor`): An attention mask. + + Returns: + `tf.Tensor`: The inverted attention mask. + """ + if not isinstance(encoder_attention_mask, tf.Tensor): + encoder_attention_mask = tf.convert_to_tensor(encoder_attention_mask) # Catches stray NumPy inputs + if encoder_attention_mask.shape.rank == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if encoder_attention_mask.shape.rank == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow + # /transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = (encoder_extended_attention_mask == + # encoder_extended_attention_mask.transpose(-1, -2)) + encoder_extended_attention_mask = ( + tf.cast(1, encoder_attention_mask.dtype) - encoder_extended_attention_mask + ) * encoder_extended_attention_mask.dtype.min + + return encoder_extended_attention_mask + + +def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str = "input_ids") -> None: + """ + `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning + zeros instead. This function adds a check against that dangerous silent behavior. + + Args: + tensor (`tf.Tensor`): The tensor of indices to check. + embed_dim (`int`): The embedding dimension. + tensor_name (`str`, *optional*): The name of the tensor to use in the error message. + """ + tf.debugging.assert_less( + tensor, + tf.cast(embed_dim, dtype=tensor.dtype), + message=( + f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding " + f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." + ), + ) + + +def save_attributes_to_hdf5_group(group, name, data): + """Saves attributes (data) of the specified name into the HDF5 group. + + This method deals with an inherent problem of HDF5 file which is not able to store data larger than + HDF5_OBJECT_HEADER_LIMIT bytes. + + Args: + group: A pointer to a HDF5 group. + name: A name of the attributes to save. + data: Attributes data to store. + + Raises: + RuntimeError: If any single attribute is too large to be saved. + + Copied from Keras to Transformers to avoid versioning issues. + """ + HDF5_OBJECT_HEADER_LIMIT = 64512 + # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` + # because in that case even chunking the array would not make the saving + # possible. + bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] + + # Expecting this to never be true. + if bad_attributes: + raise RuntimeError( + "The following attributes cannot be saved to HDF5 file because " + f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " + f"bytes: {bad_attributes}" + ) + + data_npy = np.asarray(data) + + num_chunks = 1 + chunked_data = np.array_split(data_npy, num_chunks) + + # This will never loop forever thanks to the test above. + while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): + num_chunks += 1 + chunked_data = np.array_split(data_npy, num_chunks) + + if num_chunks > 1: + for chunk_id, chunk_data in enumerate(chunked_data): + group.attrs["%s%d" % (name, chunk_id)] = chunk_data + else: + group.attrs[name] = data + + +def load_attributes_from_hdf5_group(group, name): + """Loads attributes of the specified name from the HDF5 group. + + This method deals with an inherent problem of HDF5 file which is not able to store data larger than + HDF5_OBJECT_HEADER_LIMIT bytes. + + Args: + group: A pointer to a HDF5 group. + name: A name of the attributes to load. + + Returns: + data: Attributes data. + + Copied from Keras to Transformers to avoid versioning issues. + """ + if name in group.attrs: + data = [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[name]] + else: + data = [] + chunk_id = 0 + while "%s%d" % (name, chunk_id) in group.attrs: + data.extend( + [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs["%s%d" % (name, chunk_id)]] + ) + chunk_id += 1 + return data + + +def expand_1d(data): + """Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s. + Copied from Keras to here to avoid versioning issues.""" + + def _expand_single_1d_tensor(t): + if isinstance(t, tf.Tensor) and t.shape.rank == 1: + return tf.expand_dims(t, axis=-1) + return t + + return tf.nest.map_structure(_expand_single_1d_tensor, data) + + +def convert_batch_encoding(*args, **kwargs): + # Convert HF BatchEncoding/BatchFeature objects in the inputs to dicts that Keras understands + if args and isinstance(args[0], (BatchEncoding, BatchFeature)): + args = list(args) + args[0] = dict(args[0]) + elif "x" in kwargs and isinstance(kwargs["x"], (BatchEncoding, BatchFeature)): + kwargs["x"] = dict(kwargs["x"]) + return args, kwargs diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/time_series_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/time_series_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02eddd72cebd3562702cb1ea9439f313bc01642a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/time_series_utils.py @@ -0,0 +1,225 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Time series distributional output classes and utilities. +""" +from typing import Callable, Dict, Optional, Tuple + +import torch +from torch import nn +from torch.distributions import ( + AffineTransform, + Distribution, + Independent, + NegativeBinomial, + Normal, + StudentT, + TransformedDistribution, +) + + +class AffineTransformed(TransformedDistribution): + def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0): + self.scale = 1.0 if scale is None else scale + self.loc = 0.0 if loc is None else loc + + super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)]) + + @property + def mean(self): + """ + Returns the mean of the distribution. + """ + return self.base_dist.mean * self.scale + self.loc + + @property + def variance(self): + """ + Returns the variance of the distribution. + """ + return self.base_dist.variance * self.scale**2 + + @property + def stddev(self): + """ + Returns the standard deviation of the distribution. + """ + return self.variance.sqrt() + + +class ParameterProjection(nn.Module): + def __init__( + self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs + ) -> None: + super().__init__(**kwargs) + self.args_dim = args_dim + self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) + self.domain_map = domain_map + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: + params_unbounded = [proj(x) for proj in self.proj] + + return self.domain_map(*params_unbounded) + + +class LambdaLayer(nn.Module): + def __init__(self, function): + super().__init__() + self.function = function + + def forward(self, x, *args): + return self.function(x, *args) + + +class DistributionOutput: + distribution_class: type + in_features: int + args_dim: Dict[str, int] + + def __init__(self, dim: int = 1) -> None: + self.dim = dim + self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim} + + def _base_distribution(self, distr_args): + if self.dim == 1: + return self.distribution_class(*distr_args) + else: + return Independent(self.distribution_class(*distr_args), 1) + + def distribution( + self, + distr_args, + loc: Optional[torch.Tensor] = None, + scale: Optional[torch.Tensor] = None, + ) -> Distribution: + distr = self._base_distribution(distr_args) + if loc is None and scale is None: + return distr + else: + return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim) + + @property + def event_shape(self) -> Tuple: + r""" + Shape of each individual event contemplated by the distributions that this object constructs. + """ + return () if self.dim == 1 else (self.dim,) + + @property + def event_dim(self) -> int: + r""" + Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object + constructs. + """ + return len(self.event_shape) + + @property + def value_in_support(self) -> float: + r""" + A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By + default 0.0. This value will be used when padding data series. + """ + return 0.0 + + def get_parameter_projection(self, in_features: int) -> nn.Module: + r""" + Return the parameter projection layer that maps the input to the appropriate parameters of the distribution. + """ + return ParameterProjection( + in_features=in_features, + args_dim=self.args_dim, + domain_map=LambdaLayer(self.domain_map), + ) + + def domain_map(self, *args: torch.Tensor): + r""" + Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the + correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a + distribution of the right event_shape. + """ + raise NotImplementedError() + + @staticmethod + def squareplus(x: torch.Tensor) -> torch.Tensor: + r""" + Helper to map inputs to the positive orthant by applying the square-plus operation. Reference: + https://twitter.com/jon_barron/status/1387167648669048833 + """ + return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0 + + +class StudentTOutput(DistributionOutput): + """ + Student-T distribution output class. + """ + + args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} + distribution_class: type = StudentT + + @classmethod + def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): + scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) + df = 2.0 + cls.squareplus(df) + return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) + + +class NormalOutput(DistributionOutput): + """ + Normal distribution output class. + """ + + args_dim: Dict[str, int] = {"loc": 1, "scale": 1} + distribution_class: type = Normal + + @classmethod + def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): + scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) + return loc.squeeze(-1), scale.squeeze(-1) + + +class NegativeBinomialOutput(DistributionOutput): + """ + Negative Binomial distribution output class. + """ + + args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} + distribution_class: type = NegativeBinomial + + @classmethod + def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): + total_count = cls.squareplus(total_count) + return total_count.squeeze(-1), logits.squeeze(-1) + + def _base_distribution(self, distr_args) -> Distribution: + total_count, logits = distr_args + if self.dim == 1: + return self.distribution_class(total_count=total_count, logits=logits) + else: + return Independent(self.distribution_class(total_count=total_count, logits=logits), 1) + + # Overwrites the parent class method. We cannot scale using the affine + # transformation since negative binomial should return integers. Instead + # we scale the parameters. + def distribution( + self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None + ) -> Distribution: + total_count, logits = distr_args + + if scale is not None: + # See scaling property of Gamma. + logits += scale.log() + + return self._base_distribution((total_count, logits)) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils_base.py b/llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7d56ed204423c0b46c488073dab1eecf67b7fb98 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils_base.py @@ -0,0 +1,4117 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user +fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary +of output with special method for the Fast tokenizers) +""" + +import copy +import json +import os +import re +import warnings +from collections import UserDict +from collections.abc import Mapping, Sized +from contextlib import contextmanager +from dataclasses import dataclass +from functools import lru_cache +from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union + +import numpy as np +from packaging import version + +from . import __version__ +from .dynamic_module_utils import custom_object_save +from .utils import ( + ExplicitEnum, + PaddingStrategy, + PushToHubMixin, + TensorType, + add_end_docstrings, + add_model_info_to_auto_map, + cached_file, + copy_func, + download_url, + extract_commit_hash, + is_flax_available, + is_jax_tensor, + is_mlx_available, + is_numpy_array, + is_offline_mode, + is_remote_url, + is_tf_available, + is_tf_tensor, + is_tokenizers_available, + is_torch_available, + is_torch_device, + is_torch_tensor, + logging, + requires_backends, + to_py_obj, +) + + +if TYPE_CHECKING: + if is_torch_available(): + import torch + if is_tf_available(): + import tensorflow as tf + if is_flax_available(): + import jax.numpy as jnp # noqa: F401 + from .pipelines.conversational import Conversation + + +if is_tokenizers_available(): + from tokenizers import AddedToken + from tokenizers import Encoding as EncodingFast +else: + + @dataclass(frozen=False, eq=True) + class AddedToken: + """ + AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the + way it should behave. + + The `normalized` will default to `not special` if it is not specified, similarly to the definition in + `tokenizers`. + """ + + def __init__( + self, content: str, single_word=False, lstrip=False, rstrip=False, special=False, normalized=None + ): + self.content = content + self.single_word = single_word + self.lstrip = lstrip + self.rstrip = rstrip + self.special = special + self.normalized = normalized if normalized is not None else not special + + def __getstate__(self): + return self.__dict__ + + def __str__(self): + return self.content + + @dataclass + class EncodingFast: + """This is dummy class because without the `tokenizers` library we don't have these objects anyway""" + + pass + + +logger = logging.get_logger(__name__) + +VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input +LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER + +# Define type aliases and NamedTuples +TextInput = str +PreTokenizedInput = List[str] +EncodedInput = List[int] +TextInputPair = Tuple[str, str] +PreTokenizedInputPair = Tuple[List[str], List[str]] +EncodedInputPair = Tuple[List[int], List[int]] + + +# Slow tokenizers used to be saved in three separated files +SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" +ADDED_TOKENS_FILE = "added_tokens.json" +TOKENIZER_CONFIG_FILE = "tokenizer_config.json" + +# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file +FULL_TOKENIZER_FILE = "tokenizer.json" +_re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json") + + +class TruncationStrategy(ExplicitEnum): + """ + Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in + an IDE. + """ + + ONLY_FIRST = "only_first" + ONLY_SECOND = "only_second" + LONGEST_FIRST = "longest_first" + DO_NOT_TRUNCATE = "do_not_truncate" + + +class CharSpan(NamedTuple): + """ + Character span in the original string. + + Args: + start (`int`): Index of the first character in the original string. + end (`int`): Index of the character following the last character in the original string. + """ + + start: int + end: int + + +class TokenSpan(NamedTuple): + """ + Token span in an encoded string (list of tokens). + + Args: + start (`int`): Index of the first token in the span. + end (`int`): Index of the token following the last token in the span. + """ + + start: int + end: int + + +class BatchEncoding(UserDict): + """ + Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`], + [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and + [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc). + + This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes + utility methods to map from word/character space to token space. + + Args: + data (`dict`, *optional*): + Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods + ('input_ids', 'attention_mask', etc.). + encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*): + If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character + space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this + information. + tensor_type (`Union[None, str, TensorType]`, *optional*): + You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at + initialization. + prepend_batch_axis (`bool`, *optional*, defaults to `False`): + Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). + n_sequences (`Optional[int]`, *optional*): + You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at + initialization. + """ + + def __init__( + self, + data: Optional[Dict[str, Any]] = None, + encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None, + tensor_type: Union[None, str, TensorType] = None, + prepend_batch_axis: bool = False, + n_sequences: Optional[int] = None, + ): + super().__init__(data) + + if isinstance(encoding, EncodingFast): + encoding = [encoding] + + self._encodings = encoding + + if n_sequences is None and encoding is not None and len(encoding): + n_sequences = encoding[0].n_sequences + + self._n_sequences = n_sequences + + self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis) + + @property + def n_sequences(self) -> Optional[int]: + """ + `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this + [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of + sentences) + """ + return self._n_sequences + + @property + def is_fast(self) -> bool: + """ + `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`] + or not. + """ + return self._encodings is not None + + def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]: + """ + If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', + etc.). + + If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`. + + If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.) + with the constraint of slice. + """ + if isinstance(item, str): + return self.data[item] + elif self._encodings is not None: + return self._encodings[item] + elif isinstance(item, slice): + return {key: self.data[key][item] for key in self.data.keys()} + else: + raise KeyError( + "Invalid key. Only three types of key are available: " + "(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting." + ) + + def __getattr__(self, item: str): + try: + return self.data[item] + except KeyError: + raise AttributeError + + def __getstate__(self): + return {"data": self.data, "encodings": self._encodings} + + def __setstate__(self, state): + if "data" in state: + self.data = state["data"] + + if "encodings" in state: + self._encodings = state["encodings"] + + def keys(self): + return self.data.keys() + + def values(self): + return self.data.values() + + def items(self): + return self.data.items() + + # After this point: + # Extended properties and methods only available for fast (Rust-based) tokenizers + # provided by HuggingFace tokenizers library. + + @property + def encodings(self) -> Optional[List[EncodingFast]]: + """ + `Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if + the input was tokenized through Python (i.e., not a fast) tokenizer. + """ + return self._encodings + + def tokens(self, batch_index: int = 0) -> List[str]: + """ + Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to + integer indices) at a given batch index (only works for the output of a fast tokenizer). + + Args: + batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. + + Returns: + `List[str]`: The list of tokens at that index. + """ + if not self._encodings: + raise ValueError( + "tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" + " class)." + ) + return self._encodings[batch_index].tokens + + def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]: + """ + Return a list mapping the tokens to the id of their original sentences: + + - `None` for special tokens added around or between sequences, + - `0` for tokens corresponding to words in the first sequence, + - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly + encoded. + + Args: + batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. + + Returns: + `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added + by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding + sequence. + """ + if not self._encodings: + raise ValueError( + "sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" + " class)." + ) + return self._encodings[batch_index].sequence_ids + + def words(self, batch_index: int = 0) -> List[Optional[int]]: + """ + Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. + + Args: + batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. + + Returns: + `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the + tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word + (several tokens will be mapped to the same word index if they are parts of that word). + """ + if not self._encodings: + raise ValueError( + "words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" + " class)." + ) + warnings.warn( + "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, " + "but more self-explanatory `BatchEncoding.word_ids()` property.", + FutureWarning, + ) + return self.word_ids(batch_index) + + def word_ids(self, batch_index: int = 0) -> List[Optional[int]]: + """ + Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. + + Args: + batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. + + Returns: + `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the + tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word + (several tokens will be mapped to the same word index if they are parts of that word). + """ + if not self._encodings: + raise ValueError( + "word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" + " class)." + ) + return self._encodings[batch_index].word_ids + + def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: + """ + Get the index of the sequence represented by the given token. In the general use case, this method returns `0` + for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair + + Can be called as: + + - `self.token_to_sequence(token_index)` if batch size is 1 + - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1 + + This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., + words are defined by the user). In this case it allows to easily associate encoded tokens with provided + tokenized words. + + Args: + batch_or_token_index (`int`): + Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of + the token in the sequence. + token_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the + sequence. + + Returns: + `int`: Index of the word in the input sequence. + """ + + if not self._encodings: + raise ValueError("token_to_sequence() is not available when using Python based tokenizers") + if token_index is not None: + batch_index = batch_or_token_index + else: + batch_index = 0 + token_index = batch_or_token_index + if batch_index < 0: + batch_index = self._batch_size + batch_index + if token_index < 0: + token_index = self._seq_len + token_index + return self._encodings[batch_index].token_to_sequence(token_index) + + def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: + """ + Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch. + + Can be called as: + + - `self.token_to_word(token_index)` if batch size is 1 + - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1 + + This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., + words are defined by the user). In this case it allows to easily associate encoded tokens with provided + tokenized words. + + Args: + batch_or_token_index (`int`): + Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of + the token in the sequence. + token_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the + sequence. + + Returns: + `int`: Index of the word in the input sequence. + """ + + if not self._encodings: + raise ValueError("token_to_word() is not available when using Python based tokenizers") + if token_index is not None: + batch_index = batch_or_token_index + else: + batch_index = 0 + token_index = batch_or_token_index + if batch_index < 0: + batch_index = self._batch_size + batch_index + if token_index < 0: + token_index = self._seq_len + token_index + return self._encodings[batch_index].token_to_word(token_index) + + def word_to_tokens( + self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 + ) -> Optional[TokenSpan]: + """ + Get the encoded token span corresponding to a word in a sequence of the batch. + + Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with: + + - **start** -- Index of the first token. + - **end** -- Index of the token following the last token. + + Can be called as: + + - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1 + - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to + 1 + + This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words + are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized + words. + + Args: + batch_or_word_index (`int`): + Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of + the word in the sequence. + word_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the + sequence. + sequence_index (`int`, *optional*, defaults to 0): + If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 + or 1) the provided word index belongs to. + + Returns: + ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns + `None` if no tokens correspond to the word. This can happen especially when the token is a special token + that has been used to format the tokenization. For example when we add a class token at the very beginning + of the tokenization. + """ + + if not self._encodings: + raise ValueError("word_to_tokens() is not available when using Python based tokenizers") + if word_index is not None: + batch_index = batch_or_word_index + else: + batch_index = 0 + word_index = batch_or_word_index + if batch_index < 0: + batch_index = self._batch_size + batch_index + if word_index < 0: + word_index = self._seq_len + word_index + span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index) + return TokenSpan(*span) if span is not None else None + + def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan: + """ + Get the character span corresponding to an encoded token in a sequence of the batch. + + Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with: + + - **start** -- Index of the first character in the original string associated to the token. + - **end** -- Index of the character following the last character in the original string associated to the + token. + + Can be called as: + + - `self.token_to_chars(token_index)` if batch size is 1 + - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1 + + Args: + batch_or_token_index (`int`): + Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of + the token in the sequence. + token_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in + the sequence. + + Returns: + [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token + (e.g. , ) doesn't correspond to any chars in the origin string. + """ + + if not self._encodings: + raise ValueError("token_to_chars() is not available when using Python based tokenizers") + if token_index is not None: + batch_index = batch_or_token_index + else: + batch_index = 0 + token_index = batch_or_token_index + span_indices = self._encodings[batch_index].token_to_chars(token_index) + + return CharSpan(*span_indices) if span_indices is not None else None + + def char_to_token( + self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0 + ) -> int: + """ + Get the index of the token in the encoded output comprising a character in the original string for a sequence + of the batch. + + Can be called as: + + - `self.char_to_token(char_index)` if batch size is 1 + - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1 + + This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words + are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized + words. + + Args: + batch_or_char_index (`int`): + Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of + the word in the sequence + char_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the + sequence. + sequence_index (`int`, *optional*, defaults to 0): + If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 + or 1) the provided character index belongs to. + + + Returns: + `int`: Index of the token. + """ + + if not self._encodings: + raise ValueError("char_to_token() is not available when using Python based tokenizers") + if char_index is not None: + batch_index = batch_or_char_index + else: + batch_index = 0 + char_index = batch_or_char_index + return self._encodings[batch_index].char_to_token(char_index, sequence_index) + + def word_to_chars( + self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 + ) -> CharSpan: + """ + Get the character span in the original string corresponding to given word in a sequence of the batch. + + Character spans are returned as a CharSpan NamedTuple with: + + - start: index of the first character in the original string + - end: index of the character following the last character in the original string + + Can be called as: + + - `self.word_to_chars(word_index)` if batch size is 1 + - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1 + + Args: + batch_or_word_index (`int`): + Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of + the word in the sequence + word_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the + sequence. + sequence_index (`int`, *optional*, defaults to 0): + If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 + or 1) the provided word index belongs to. + + Returns: + `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan + are NamedTuple with: + + - start: index of the first character associated to the token in the original string + - end: index of the character following the last character associated to the token in the original + string + """ + + if not self._encodings: + raise ValueError("word_to_chars() is not available when using Python based tokenizers") + if word_index is not None: + batch_index = batch_or_word_index + else: + batch_index = 0 + word_index = batch_or_word_index + return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index))) + + def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int: + """ + Get the word in the original string corresponding to a character in the original string of a sequence of the + batch. + + Can be called as: + + - `self.char_to_word(char_index)` if batch size is 1 + - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1 + + This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words + are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized + words. + + Args: + batch_or_char_index (`int`): + Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of + the character in the original string. + char_index (`int`, *optional*): + If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the + original string. + sequence_index (`int`, *optional*, defaults to 0): + If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 + or 1) the provided character index belongs to. + + + Returns: + `int` or `List[int]`: Index or indices of the associated encoded token(s). + """ + + if not self._encodings: + raise ValueError("char_to_word() is not available when using Python based tokenizers") + if char_index is not None: + batch_index = batch_or_char_index + else: + batch_index = 0 + char_index = batch_or_char_index + return self._encodings[batch_index].char_to_word(char_index, sequence_index) + + def convert_to_tensors( + self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False + ): + """ + Convert the inner content to tensors. + + Args: + tensor_type (`str` or [`~utils.TensorType`], *optional*): + The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If + `None`, no modification is done. + prepend_batch_axis (`int`, *optional*, defaults to `False`): + Whether or not to add the batch dimension during the conversion. + """ + if tensor_type is None: + return self + + # Convert to TensorType + if not isinstance(tensor_type, TensorType): + tensor_type = TensorType(tensor_type) + + # Get a function reference for the correct framework + if tensor_type == TensorType.TENSORFLOW: + if not is_tf_available(): + raise ImportError( + "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." + ) + import tensorflow as tf + + as_tensor = tf.constant + is_tensor = tf.is_tensor + elif tensor_type == TensorType.PYTORCH: + if not is_torch_available(): + raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") + import torch + + is_tensor = torch.is_tensor + + def as_tensor(value, dtype=None): + if isinstance(value, list) and isinstance(value[0], np.ndarray): + return torch.tensor(np.array(value)) + return torch.tensor(value) + + elif tensor_type == TensorType.JAX: + if not is_flax_available(): + raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") + import jax.numpy as jnp # noqa: F811 + + as_tensor = jnp.array + is_tensor = is_jax_tensor + + elif tensor_type == TensorType.MLX: + if not is_mlx_available(): + raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.") + import mlx.core as mx + + as_tensor = mx.array + + def is_tensor(obj): + return isinstance(obj, mx.array) + else: + + def as_tensor(value, dtype=None): + if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): + value_lens = [len(val) for val in value] + if len(set(value_lens)) > 1 and dtype is None: + # we have a ragged list so handle explicitly + value = as_tensor([np.asarray(val) for val in value], dtype=object) + return np.asarray(value, dtype=dtype) + + is_tensor = is_numpy_array + + # Do the tensor conversion in batch + for key, value in self.items(): + try: + if prepend_batch_axis: + value = [value] + + if not is_tensor(value): + tensor = as_tensor(value) + + # Removing this for now in favor of controlling the shape with `prepend_batch_axis` + # # at-least2d + # if tensor.ndim > 2: + # tensor = tensor.squeeze(0) + # elif tensor.ndim < 2: + # tensor = tensor[None, :] + + self[key] = tensor + except Exception as e: + if key == "overflowing_tokens": + raise ValueError( + "Unable to create tensor returning overflowing tokens of different lengths. " + "Please see if a fast version of this tokenizer is available to have this feature available." + ) from e + raise ValueError( + "Unable to create tensor, you should probably activate truncation and/or padding with" + " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your" + f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is" + " expected)." + ) from e + + return self + + def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding": + """ + Send all values to device by calling `v.to(device)` (PyTorch only). + + Args: + device (`str` or `torch.device`): The device to put the tensors on. + + Returns: + [`BatchEncoding`]: The same instance after modification. + """ + requires_backends(self, ["torch"]) + + # This check catches things like APEX blindly calling "to" on all inputs to a module + # Otherwise it passes the casts down and casts the LongTensor containing the token idxs + # into a HalfTensor + if isinstance(device, str) or is_torch_device(device) or isinstance(device, int): + self.data = {k: v.to(device=device) for k, v in self.data.items()} + else: + logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.") + return self + + +class SpecialTokensMixin: + """ + A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to + special tokens. In particular, this class hold the attributes which can be used to directly access these special + tokens in a model-independent manner and allow to set and update the special tokens. + + Args: + bos_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing the beginning of a sentence. + eos_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing the end of a sentence. + unk_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing an out-of-vocabulary token. + sep_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token separating two different sentences in the same input (used by BERT for instance). + pad_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by + attention mechanisms or loss computation. + cls_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing the class of the input (used by BERT for instance). + mask_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing a masked token (used by masked-language modeling pretraining objectives, like + BERT). + additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): + A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be + skipped when decoding if `skip_special_tokens` is set to `True`. + """ + + SPECIAL_TOKENS_ATTRIBUTES = [ + "bos_token", + "eos_token", + "unk_token", + "sep_token", + "pad_token", + "cls_token", + "mask_token", + "additional_special_tokens", + ] + + def __init__(self, verbose=False, **kwargs): + self._bos_token = None + self._eos_token = None + self._unk_token = None + self._sep_token = None + self._pad_token = None + self._cls_token = None + self._mask_token = None + self._pad_token_type_id = 0 + self._additional_special_tokens = [] + self.verbose = verbose + + # We directly set the hidden value to allow initialization with special tokens + # which are not yet in the vocabulary. Necessary for serialization/de-serialization + # TODO clean this up at some point (probably by switching to fast tokenizers) + + for key, value in kwargs.items(): + if value is None: + continue + if key in self.SPECIAL_TOKENS_ATTRIBUTES: + if key == "additional_special_tokens": + assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple" + assert all( + isinstance(t, (str, AddedToken)) for t in value + ), "One of the tokens is not a string or an AddedToken" + setattr(self, key, value) + elif isinstance(value, (str, AddedToken)): + setattr(self, key, value) + else: + raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}") + + def sanitize_special_tokens(self) -> int: + """ + The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in + transformers v5. + """ + logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.") + return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) + + def add_special_tokens( + self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True + ) -> int: + """ + Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If + special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the + current vocabulary). + + When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the + model so that its embedding matrix matches the tokenizer. + + In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. + + Using `add_special_tokens` will ensure your special tokens can be used in several ways: + + - Special tokens can be skipped when decoding using `skip_special_tokens = True`. + - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. + - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This + makes it easy to develop model-agnostic training and fine-tuning scripts. + + When possible, special tokens are already registered for provided pretrained models (for instance + [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be + `''`). + + Args: + special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`): + Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, + `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`]. + + Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer + assign the index of the `unk_token` to them). + replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): + If `True`, the existing list of additional special tokens will be replaced by the list provided in + `special_tokens_dict`. Otherwise, `self._additional_special_tokens` is just extended. In the former + case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged + as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the + `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous + `additional_special_tokens` are still added tokens, and will not be split by the model. + + Returns: + `int`: Number of tokens added to the vocabulary. + + Examples: + + ```python + # Let's see how to add a new classification token to GPT-2 + tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") + model = GPT2Model.from_pretrained("openai-community/gpt2") + + special_tokens_dict = {"cls_token": ""} + + num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) + print("We have added", num_added_toks, "tokens") + # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. + model.resize_token_embeddings(len(tokenizer)) + + assert tokenizer.cls_token == "" + ```""" + if not special_tokens_dict: + return 0 + + added_tokens = [] + for key, value in special_tokens_dict.items(): + assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token" + + if self.verbose: + logger.info(f"Assigning {value} to the {key} key of the tokenizer") + + if key == "additional_special_tokens": + assert isinstance(value, (list, tuple)) and all( + isinstance(t, (str, AddedToken)) for t in value + ), f"Tokens {value} for key {key} should all be str or AddedToken instances" + + to_add = [] + for token in value: + if isinstance(token, str): + # for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this + token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True) + if not replace_additional_special_tokens and str(token) in self.additional_special_tokens: + continue + to_add.append(token) + if replace_additional_special_tokens and len(to_add) > 0: + setattr(self, key, list(to_add)) + else: + self._additional_special_tokens.extend(to_add) + added_tokens += to_add + + else: + if not isinstance(value, (str, AddedToken)): + raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance") + if isinstance(value, (str)): + # for legacy purpose we default to stripping. `False` depends on this + value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True) + if isinstance(value, AddedToken): + setattr(self, key, value) + if value not in added_tokens: + added_tokens.append(value) + + # if we are adding tokens that were not part of the vocab, we ought to add them + added_tokens = self.add_tokens(added_tokens, special_tokens=True) + return added_tokens + + def add_tokens( + self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False + ) -> int: + """ + Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to + it with indices starting from length of the current vocabulary and and will be isolated before the tokenization + algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore + not treated in the same way. + + Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix + of the model so that its embedding matrix matches the tokenizer. + + In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. + + Args: + new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`): + Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string + token to let you personalize its behavior: whether this token should only match against a single word, + whether this token should strip all potential whitespaces on the left side, whether this token should + strip all potential whitespaces on the right side, etc. + special_tokens (`bool`, *optional*, defaults to `False`): + Can be used to specify if the token is a special token. This mostly change the normalization behavior + (special tokens like CLS or [MASK] are usually not lower-cased for instance). + + See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. + + Returns: + `int`: Number of tokens added to the vocabulary. + + Examples: + + ```python + # Let's see how to increase the vocabulary of Bert model and tokenizer + tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") + model = BertModel.from_pretrained("google-bert/bert-base-uncased") + + num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) + print("We have added", num_added_toks, "tokens") + # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. + model.resize_token_embeddings(len(tokenizer)) + ```""" + if not new_tokens: + return 0 + + if not isinstance(new_tokens, (list, tuple)): + new_tokens = [new_tokens] + + return self._add_tokens(new_tokens, special_tokens=special_tokens) + + def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: + raise NotImplementedError + + @property + def bos_token(self) -> str: + """ + `str`: Beginning of sentence token. Log an error if used while not having been set. + """ + if self._bos_token is None: + if self.verbose: + logger.error("Using bos_token, but it is not set yet.") + return None + return str(self._bos_token) + + @property + def eos_token(self) -> str: + """ + `str`: End of sentence token. Log an error if used while not having been set. + """ + if self._eos_token is None: + if self.verbose: + logger.error("Using eos_token, but it is not set yet.") + return None + return str(self._eos_token) + + @property + def unk_token(self) -> str: + """ + `str`: Unknown token. Log an error if used while not having been set. + """ + if self._unk_token is None: + if self.verbose: + logger.error("Using unk_token, but it is not set yet.") + return None + return str(self._unk_token) + + @property + def sep_token(self) -> str: + """ + `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not + having been set. + """ + if self._sep_token is None: + if self.verbose: + logger.error("Using sep_token, but it is not set yet.") + return None + return str(self._sep_token) + + @property + def pad_token(self) -> str: + """ + `str`: Padding token. Log an error if used while not having been set. + """ + if self._pad_token is None: + if self.verbose: + logger.error("Using pad_token, but it is not set yet.") + return None + return str(self._pad_token) + + @property + def cls_token(self) -> str: + """ + `str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full + depth of the model. Log an error if used while not having been set. + """ + if self._cls_token is None: + if self.verbose: + logger.error("Using cls_token, but it is not set yet.") + return None + return str(self._cls_token) + + @property + def mask_token(self) -> str: + """ + `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not + having been set. + """ + if self._mask_token is None: + if self.verbose: + logger.error("Using mask_token, but it is not set yet.") + return None + return str(self._mask_token) + + @property + def additional_special_tokens(self) -> List[str]: + """ + `List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been + set. + """ + if self._additional_special_tokens is None: + if self.verbose: + logger.error("Using additional_special_tokens, but it is not set yet.") + return None + return [str(tok) for tok in self._additional_special_tokens] + + @bos_token.setter + def bos_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the BOS token") + self._bos_token = value + + @eos_token.setter + def eos_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the EOS token") + self._eos_token = value + + @unk_token.setter + def unk_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the UNK token") + self._unk_token = value + + @sep_token.setter + def sep_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the SEP token") + self._sep_token = value + + @pad_token.setter + def pad_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the PAD token") + self._pad_token = value + + @cls_token.setter + def cls_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the CLS token") + self._cls_token = value + + @mask_token.setter + def mask_token(self, value): + if not isinstance(value, (str, AddedToken)) and value is not None: + raise ValueError("Cannot set a non-string value as the MASK token") + self._mask_token = value + + @additional_special_tokens.setter + def additional_special_tokens(self, value): + self._additional_special_tokens = value if value is not None else None + + @property + def bos_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not + been set. + """ + if self._bos_token is None: + return None + return self.convert_tokens_to_ids(self.bos_token) + + @property + def eos_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been + set. + """ + if self._eos_token is None: + return None + return self.convert_tokens_to_ids(self.eos_token) + + @property + def unk_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set. + """ + if self._unk_token is None: + return None + return self.convert_tokens_to_ids(self.unk_token) + + @property + def sep_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input + sequence. Returns `None` if the token has not been set. + """ + if self._sep_token is None: + return None + return self.convert_tokens_to_ids(self.sep_token) + + @property + def pad_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set. + """ + if self._pad_token is None: + return None + return self.convert_tokens_to_ids(self.pad_token) + + @property + def pad_token_type_id(self) -> int: + """ + `int`: Id of the padding token type in the vocabulary. + """ + return self._pad_token_type_id + + @property + def cls_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence + leveraging self-attention along the full depth of the model. + + Returns `None` if the token has not been set. + """ + if self._cls_token is None: + return None + return self.convert_tokens_to_ids(self.cls_token) + + @property + def mask_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language + modeling. Returns `None` if the token has not been set. + """ + if self._mask_token is None: + return None + return self.convert_tokens_to_ids(self.mask_token) + + @property + def additional_special_tokens_ids(self) -> List[int]: + """ + `List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having + been set. + """ + return self.convert_tokens_to_ids(self.additional_special_tokens) + + @bos_token_id.setter + def bos_token_id(self, value): + self._bos_token = self.convert_ids_to_tokens(value) if value is not None else None + + @eos_token_id.setter + def eos_token_id(self, value): + self._eos_token = self.convert_ids_to_tokens(value) if value is not None else None + + @unk_token_id.setter + def unk_token_id(self, value): + self._unk_token = self.convert_ids_to_tokens(value) if value is not None else None + + @sep_token_id.setter + def sep_token_id(self, value): + self._sep_token = self.convert_ids_to_tokens(value) if value is not None else None + + @pad_token_id.setter + def pad_token_id(self, value): + self._pad_token = self.convert_ids_to_tokens(value) if value is not None else None + + @cls_token_id.setter + def cls_token_id(self, value): + self._cls_token = self.convert_ids_to_tokens(value) if value is not None else None + + @mask_token_id.setter + def mask_token_id(self, value): + self._mask_token = self.convert_ids_to_tokens(value) if value is not None else None + + @additional_special_tokens_ids.setter + def additional_special_tokens_ids(self, values): + self._additional_special_tokens = [self.convert_ids_to_tokens(value) for value in values] + + @property + def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]: + """ + `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`, + `unk_token`, etc.) to their values (`''`, `''`, etc.). + + Convert potential tokens of `tokenizers.AddedToken` type to string. + """ + set_attr = {} + for attr in self.SPECIAL_TOKENS_ATTRIBUTES: + attr_value = getattr(self, attr) + if attr_value: + set_attr[attr] = attr_value + return set_attr + + @property + def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]: + """ + `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping + special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`''`, `''`, etc.). + + Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how + special tokens are tokenized. + """ + set_attr = {} + for attr in self.SPECIAL_TOKENS_ATTRIBUTES: + attr_value = getattr(self, "_" + attr) + if attr_value: + set_attr[attr] = attr_value + return set_attr + + @property + def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]: + """ + `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`''`, `''`, etc.), the order has + nothing to do with the index of each tokens. If you want to know the correct indices, check + `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`. + + Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how + special tokens are tokenized. + """ + all_tokens = [] + seen = set() + for value in self.special_tokens_map_extended.values(): + if isinstance(value, (list, tuple)): + tokens_to_add = [token for token in value if str(token) not in seen] + else: + tokens_to_add = [value] if str(value) not in seen else [] + seen.update(map(str, tokens_to_add)) + all_tokens.extend(tokens_to_add) + return all_tokens + + @property + def all_special_tokens(self) -> List[str]: + """ + `List[str]`: A list of the unique special tokens (`''`, `''`, ..., etc.). + + Convert tokens of `tokenizers.AddedToken` type to string. + """ + all_toks = [str(s) for s in self.all_special_tokens_extended] + return all_toks + + @property + def all_special_ids(self) -> List[int]: + """ + `List[int]`: List the ids of the special tokens(`''`, `''`, etc.) mapped to class attributes. + """ + all_toks = self.all_special_tokens + all_ids = self.convert_tokens_to_ids(all_toks) + return all_ids + + +ENCODE_KWARGS_DOCSTRING = r""" + add_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to add special tokens when encoding the sequences. This will use the underlying + `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are + automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens + automatically. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): + Activates and controls padding. Accepts the following values: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): + Activates and controls truncation. Accepts the following values: + + - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or + to the maximum acceptable input length for the model if that argument is not provided. This will + truncate token by token, removing a token from the longest sequence in the pair if a pair of + sequences (or a batch of pairs) is provided. + - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths + greater than the model maximum admissible input size). + max_length (`int`, *optional*): + Controls the maximum length to use by one of the truncation/padding parameters. + + If left unset or set to `None`, this will use the predefined model maximum length if a maximum length + is required by one of the truncation/padding parameters. If the model has no specific maximum input + length (like XLNet) truncation/padding to a maximum length will be deactivated. + stride (`int`, *optional*, defaults to 0): + If set to a number along with `max_length`, the overflowing tokens returned when + `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence + returned to provide some overlap between truncated and overflowing sequences. The value of this + argument defines the number of overlapping tokens. + is_split_into_words (`bool`, *optional*, defaults to `False`): + Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the + tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) + which it will tokenize. This is useful for NER or token classification. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. +""" + +ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" + return_token_type_ids (`bool`, *optional*): + Whether to return token type IDs. If left to the default, will return the token type IDs according to + the specific tokenizer's default, defined by the `return_outputs` attribute. + + [What are token type IDs?](../glossary#token-type-ids) + return_attention_mask (`bool`, *optional*): + Whether to return the attention mask. If left to the default, will return the attention mask according + to the specific tokenizer's default, defined by the `return_outputs` attribute. + + [What are attention masks?](../glossary#attention-mask) + return_overflowing_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch + of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead + of returning overflowing tokens. + return_special_tokens_mask (`bool`, *optional*, defaults to `False`): + Whether or not to return special tokens mask information. + return_offsets_mapping (`bool`, *optional*, defaults to `False`): + Whether or not to return `(char_start, char_end)` for each token. + + This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using + Python's tokenizer, this method will raise `NotImplementedError`. + return_length (`bool`, *optional*, defaults to `False`): + Whether or not to return the lengths of the encoded inputs. + verbose (`bool`, *optional*, defaults to `True`): + Whether or not to print more information and warnings. + **kwargs: passed to the `self.tokenize()` method + + Return: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. + + [What are input IDs?](../glossary#input-ids) + + - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or + if *"token_type_ids"* is in `self.model_input_names`). + + [What are token type IDs?](../glossary#token-type-ids) + + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). + + [What are attention masks?](../glossary#attention-mask) + + - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and + `return_overflowing_tokens=True`). + - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and + `return_overflowing_tokens=True`). + - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying + regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). + - **length** -- The length of the inputs (when `return_length=True`) +""" + + +INIT_TOKENIZER_DOCSTRING = r""" + Class attributes (overridden by derived classes) + + - **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each + vocabulary file required by the model, and as associated values, the filename for saving the associated file + (string). + - **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the + high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the + low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the + associated pretrained vocabulary file. + - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model. + - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied. + Should be `'right'` or `'left'`. + - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation + applied. Should be `'right'` or `'left'`. + + Args: + model_max_length (`int`, *optional*): + The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is + loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the + value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will + default to VERY_LARGE_INTEGER (`int(1e30)`). + padding_side (`str`, *optional*): + The side on which the model should have padding applied. Should be selected between ['right', 'left']. + Default value is picked from the class attribute of the same name. + truncation_side (`str`, *optional*): + The side on which the model should have truncation applied. Should be selected between ['right', 'left']. + Default value is picked from the class attribute of the same name. + chat_template (`str`, *optional*): + A Jinja template string that will be used to format lists of chat messages. See + https://huggingface.co/docs/transformers/chat_templating for a full description. + model_input_names (`List[string]`, *optional*): + The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or + `"attention_mask"`). Default value is picked from the class attribute of the same name. + bos_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and + `self.bos_token_id`. + eos_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing the end of a sentence. Will be associated to `self.eos_token` and + `self.eos_token_id`. + unk_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and + `self.unk_token_id`. + sep_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token separating two different sentences in the same input (used by BERT for instance). Will be + associated to `self.sep_token` and `self.sep_token_id`. + pad_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by + attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`. + cls_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing the class of the input (used by BERT for instance). Will be associated to + `self.cls_token` and `self.cls_token_id`. + mask_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token representing a masked token (used by masked-language modeling pretraining objectives, like + BERT). Will be associated to `self.mask_token` and `self.mask_token_id`. + additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): + A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with + `skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end + of the vocabulary. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): + Whether or not the model should cleanup the spaces that were added when splitting the input text during the + tokenization process. + split_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the special tokens should be split during the tokenization process. The default behavior is + to not split special tokens. This means that if `` is the `bos_token`, then `tokenizer.tokenize("") = + ['`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("")` will be give `['<', + 's', '>']`. This argument is only supported for `slow` tokenizers for the moment. +""" + + +@add_end_docstrings(INIT_TOKENIZER_DOCSTRING) +class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): + """ + Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]. + + Handles shared (mostly boiler plate) methods for those two classes. + """ + + vocab_files_names: Dict[str, str] = {} + pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} + _auto_class: Optional[str] = None + + # first name has to correspond to main model input name + # to make sure `tokenizer.pad(...)` works correctly + model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"] + padding_side: str = "right" + truncation_side: str = "right" + slow_tokenizer_class = None + + def __init__(self, **kwargs): + # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) + self.init_inputs = () + self.init_kwargs = copy.deepcopy(kwargs) + self.name_or_path = kwargs.pop("name_or_path", "") + self._processor_class = kwargs.pop("processor_class", None) + + # For backward compatibility we fallback to set model_max_length from max_len if provided + model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None)) + self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER + + # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it + # is changed. + self.padding_side = kwargs.pop("padding_side", self.padding_side) + if self.padding_side not in ["right", "left"]: + raise ValueError( + f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" + ) + + self.truncation_side = kwargs.pop("truncation_side", self.truncation_side) + if self.truncation_side not in ["right", "left"]: + raise ValueError( + f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}" + ) + + self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) + + # By default, cleaning tokenization spaces for both fast and slow tokenizers + self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", True) + + # By default, do not split special tokens for both fast and slow tokenizers + self.split_special_tokens = kwargs.pop("split_special_tokens", False) + + self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging). + self._in_target_context_manager = False + + # Stores a Jinja template that formats chat histories into tokenizable strings + self.chat_template = kwargs.pop("chat_template", None) + if isinstance(self.chat_template, (list, tuple)): + # Chat templates are stored as lists of dicts with fixed key names, + # we reconstruct that into a single dict while loading them. + self.chat_template = {template["name"]: template["template"] for template in self.chat_template} + + super().__init__(**kwargs) + + @property + def max_len_single_sentence(self) -> int: + """ + `int`: The maximum length of a sentence that can be fed to the model. + """ + return self.model_max_length - self.num_special_tokens_to_add(pair=False) + + @property + def max_len_sentences_pair(self) -> int: + """ + `int`: The maximum combined length of a pair of sentences that can be fed to the model. + """ + return self.model_max_length - self.num_special_tokens_to_add(pair=True) + + @max_len_single_sentence.setter + def max_len_single_sentence(self, value) -> int: + # For backward compatibility, allow to try to setup 'max_len_single_sentence'. + if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose: + if not self.deprecation_warnings.get("max_len_single_sentence", False): + logger.warning( + "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." + ) + self.deprecation_warnings["max_len_single_sentence"] = True + else: + raise ValueError( + "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." + ) + + @max_len_sentences_pair.setter + def max_len_sentences_pair(self, value) -> int: + # For backward compatibility, allow to try to setup 'max_len_sentences_pair'. + if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose: + if not self.deprecation_warnings.get("max_len_sentences_pair", False): + logger.warning( + "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up." + ) + self.deprecation_warnings["max_len_sentences_pair"] = True + else: + raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.") + + def _set_processor_class(self, processor_class: str): + """Sets processor class as an attribute.""" + self._processor_class = processor_class + + @property + def added_tokens_decoder(self) -> Dict[int, AddedToken]: + raise NotImplementedError() + + def __repr__(self) -> str: + added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()]) + return ( + f"{self.__class__.__name__}(name_or_path='{self.name_or_path}'," + f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast}," + f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}'," + f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}), " + " added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}" + ) + + def __len__(self) -> int: + raise NotImplementedError() + + def get_vocab(self) -> Dict[str, int]: + """ + Returns the vocabulary as a dictionary of token to index. + + `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the + vocab. + + Returns: + `Dict[str, int]`: The vocabulary. + """ + raise NotImplementedError() + + def apply_chat_template( + self, + conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]], "Conversation"], + chat_template: Optional[str] = None, + add_generation_prompt: bool = False, + tokenize: bool = True, + padding: bool = False, + truncation: bool = False, + max_length: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_dict: bool = False, + tokenizer_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]: + """ + Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token + ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to + determine the format and control tokens to use when converting. When chat_template is None, it will fall back + to the default_chat_template specified at the class level. + + Args: + conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]], "Conversation"]): A list of dicts + with "role" and "content" keys, representing the chat history so far. + chat_template (str, *optional*): A Jinja template to use for this conversion. If + this is not passed, the model's default chat template will be used instead. + add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate + the start of an assistant message. This is useful when you want to generate a response from the model. + Note that this argument will be passed to the chat template, and so it must be supported in the + template for this argument to have any effect. + tokenize (`bool`, defaults to `True`): + Whether to tokenize the output. If `False`, the output will be a string. + padding (`bool`, defaults to `False`): + Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. + truncation (`bool`, defaults to `False`): + Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. + max_length (`int`, *optional*): + Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If + not specified, the tokenizer's `max_length` attribute will be used as a default. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable + values are: + - `'tf'`: Return TensorFlow `tf.Tensor` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + return_dict (`bool`, defaults to `False`): + Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. + tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer. + **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template. + + Returns: + `Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This + output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is + set, will return a dict of tokenizer outputs instead. + """ + + if return_dict and not tokenize: + raise ValueError( + "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict " + "of tokenizer outputs to return." + ) + + if tokenizer_kwargs is None: + tokenizer_kwargs = {} + + # First, handle the cases when the model has a dict of multiple templates + if isinstance(self.chat_template, dict) or ( + self.chat_template is None and isinstance(self.default_chat_template, dict) + ): + template_dict = self.chat_template or self.default_chat_template + if chat_template is not None and chat_template in template_dict: + # The user can pass the name of a template to the chat template argument instead of an entire template + chat_template = template_dict[chat_template] + elif chat_template is None and "default" in template_dict: + chat_template = template_dict["default"] + elif chat_template is None: + raise ValueError( + "This model has multiple chat templates with no default specified! Please either pass a chat " + "template or the name of the template you wish to use to the `chat_template` argument. Available " + f"template names are {sorted(template_dict.keys())}." + ) + elif chat_template is None: + # These are the cases when the model has a single template + # priority: `chat_template` argument > `tokenizer.chat_template` > `tokenizer.default_chat_template + if self.chat_template is not None: + chat_template = self.chat_template + else: + chat_template = self.default_chat_template + + # Compilation function uses a cache to avoid recompiling the same template + compiled_template = self._compile_jinja_template(chat_template) + + if isinstance(conversation, (list, tuple)) and ( + isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages") + ): + conversations = conversation + is_batched = True + else: + conversations = [conversation] + is_batched = False + + rendered = [] + template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present + for chat in conversations: + if hasattr(chat, "messages"): + # Indicates it's a Conversation object + chat = chat.messages + rendered_chat = compiled_template.render( + messages=chat, add_generation_prompt=add_generation_prompt, **template_kwargs + ) + rendered.append(rendered_chat) + + if not is_batched: + rendered = rendered[0] + + if tokenize: + out = self( + rendered, + padding=padding, + truncation=truncation, + max_length=max_length, + add_special_tokens=False, + return_tensors=return_tensors, + **tokenizer_kwargs, + ) + if return_dict: + return out + else: + return out["input_ids"] + else: + return rendered + + @lru_cache + def _compile_jinja_template(self, chat_template): + try: + import jinja2 + from jinja2.exceptions import TemplateError + from jinja2.sandbox import ImmutableSandboxedEnvironment + except ImportError: + raise ImportError("apply_chat_template requires jinja2 to be installed.") + + if version.parse(jinja2.__version__) < version.parse("3.0.0"): + raise ImportError( + "apply_chat_template requires jinja2>=3.0.0 to be installed. Your version is " f"{jinja2.__version__}." + ) + + def raise_exception(message): + raise TemplateError(message) + + jinja_env = ImmutableSandboxedEnvironment(trim_blocks=True, lstrip_blocks=True) + jinja_env.globals["raise_exception"] = raise_exception + return jinja_env.from_string(chat_template) + + @property + def default_chat_template(self): + """ + This template formats inputs in the standard ChatML format. See + https://github.com/openai/openai-python/blob/main/chatml.md + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using a default chat template " + "that implements the ChatML format (without BOS/EOS tokens!). If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + return ( + "{% for message in messages %}" + "{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "{{ '<|im_start|>assistant\n' }}" + "{% endif %}" + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + *init_inputs, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + trust_remote_code=False, + **kwargs, + ): + r""" + Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined + tokenizer. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. + - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved + using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g., + `./my_model_directory/`. + - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary + file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., + `./my_model_directory/vocab.txt`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download the vocabulary files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Attempt to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + local_files_only (`bool`, *optional*, defaults to `False`): + Whether or not to only rely on local files and not to attempt to download any files. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + subfolder (`str`, *optional*): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for + facebook/rag-token-base), specify it here. + inputs (additional positional arguments, *optional*): + Will be passed along to the Tokenizer `__init__` method. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (additional keyword arguments, *optional*): + Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`, + `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, + `additional_special_tokens`. See parameters in the `__init__` for more details. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer + # Download vocabulary from huggingface.co and cache. + tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") + + # Download vocabulary from huggingface.co (user-uploaded) and cache. + tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") + + # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) + tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") + + # If the tokenizer uses a single vocabulary file, you can point directly to this file + tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt") + + # You can link tokens to special vocabulary when instantiating + tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="") + # You should be sure '' is in the vocabulary when doing that. + # Otherwise use tokenizer.add_special_tokens({'unk_token': ''}) instead) + assert tokenizer.unk_token == "" + ```""" + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + use_auth_token = kwargs.pop("use_auth_token", None) + subfolder = kwargs.pop("subfolder", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + commit_hash = kwargs.pop("_commit_hash", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + vocab_files = {} + init_configuration = {} + + is_local = os.path.isdir(pretrained_model_name_or_path) + single_file_id = None + if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): + if len(cls.vocab_files_names) > 1: + raise ValueError( + f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " + "supported for this tokenizer. Use a model identifier or the path to a directory instead." + ) + warnings.warn( + f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and " + "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", + FutureWarning, + ) + file_id = list(cls.vocab_files_names.keys())[0] + + vocab_files[file_id] = pretrained_model_name_or_path + single_file_id = file_id + else: + # At this point pretrained_model_name_or_path is either a directory or a model identifier name + additional_files_names = { + "added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy + "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy + "tokenizer_config_file": TOKENIZER_CONFIG_FILE, + # tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders + "tokenizer_file": FULL_TOKENIZER_FILE, + } + vocab_files = {**cls.vocab_files_names, **additional_files_names} + if "tokenizer_file" in vocab_files: + # Try to get the tokenizer config to see if there are versioned tokenizer files. + fast_tokenizer_file = FULL_TOKENIZER_FILE + resolved_config_file = cached_file( + pretrained_model_name_or_path, + TOKENIZER_CONFIG_FILE, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + user_agent=user_agent, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + if resolved_config_file is not None: + with open(resolved_config_file, encoding="utf-8") as reader: + tokenizer_config = json.load(reader) + if "fast_tokenizer_files" in tokenizer_config: + fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"]) + vocab_files["tokenizer_file"] = fast_tokenizer_file + + # Get files from url, cache, or disk depending on the case + resolved_vocab_files = {} + unresolved_files = [] + for file_id, file_path in vocab_files.items(): + if file_path is None: + resolved_vocab_files[file_id] = None + elif single_file_id == file_id: + if os.path.isfile(file_path): + resolved_vocab_files[file_id] = file_path + elif is_remote_url(file_path): + resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) + else: + resolved_vocab_files[file_id] = cached_file( + pretrained_model_name_or_path, + file_path, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, + ) + commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash) + + if len(unresolved_files) > 0: + logger.info( + f"Can't load following files from cache: {unresolved_files} and cannot check if these " + "files are necessary for the tokenizer to operate." + ) + + if all(full_file_name is None for full_file_name in resolved_vocab_files.values()): + raise EnvironmentError( + f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing all relevant files for a {cls.__name__} tokenizer." + ) + + for file_id, file_path in vocab_files.items(): + if file_id not in resolved_vocab_files: + continue + + if is_local: + logger.info(f"loading file {file_path}") + else: + logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}") + + return cls._from_pretrained( + resolved_vocab_files, + pretrained_model_name_or_path, + init_configuration, + *init_inputs, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + _commit_hash=commit_hash, + _is_local=is_local, + trust_remote_code=trust_remote_code, + **kwargs, + ) + + @classmethod + def _from_pretrained( + cls, + resolved_vocab_files, + pretrained_model_name_or_path, + init_configuration, + *init_inputs, + token=None, + cache_dir=None, + local_files_only=False, + _commit_hash=None, + _is_local=False, + trust_remote_code=False, + **kwargs, + ): + # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json + # file or if `from_slow` is set to True. + from_slow = kwargs.get("from_slow", False) + has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None + if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None: + slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained( + copy.deepcopy(resolved_vocab_files), + pretrained_model_name_or_path, + copy.deepcopy(init_configuration), + *init_inputs, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + _commit_hash=_commit_hash, + **(copy.deepcopy(kwargs)), + ) + else: + slow_tokenizer = None + + # Prepare tokenizer initialization kwargs + # Did we saved some inputs and kwargs to reload ? + tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) + if tokenizer_config_file is not None: + with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: + init_kwargs = json.load(tokenizer_config_handle) + # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers. + config_tokenizer_class = init_kwargs.get("tokenizer_class") + init_kwargs.pop("tokenizer_class", None) + if not has_tokenizer_file: + init_kwargs.pop("tokenizer_file", None) + saved_init_inputs = init_kwargs.pop("init_inputs", ()) + if not init_inputs: + init_inputs = saved_init_inputs + else: + config_tokenizer_class = None + init_kwargs = init_configuration + + if "auto_map" in init_kwargs and not _is_local: + # For backward compatibility with odl format. + if isinstance(init_kwargs["auto_map"], (tuple, list)): + init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]} + init_kwargs["auto_map"] = add_model_info_to_auto_map( + init_kwargs["auto_map"], pretrained_model_name_or_path + ) + + if config_tokenizer_class is None: + # Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo. + # If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with + # AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain. + # Maybe we can just remove this entirely? + from .models.auto.configuration_auto import AutoConfig # tests_ignore + + # Second attempt. If we have not yet found tokenizer_class, let's try to use the config. + try: + config = AutoConfig.from_pretrained( + pretrained_model_name_or_path, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + trust_remote_code=trust_remote_code, + _commit_hash=_commit_hash, + ) + config_tokenizer_class = config.tokenizer_class + except (OSError, ValueError, KeyError): + # skip if an error occurred. + config = None + if config_tokenizer_class is None: + # Third attempt. If we have not yet found the original type of the tokenizer, + # we are loading we see if we can infer it from the type of the configuration file + from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore + + if hasattr(config, "model_type"): + model_type = config.model_type + else: + # Fallback: use pattern matching on the string. + model_type = None + for pattern in TOKENIZER_MAPPING_NAMES.keys(): + if pattern in str(pretrained_model_name_or_path): + model_type = pattern + break + + if model_type is not None: + config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get( + model_type, (None, None) + ) + if config_tokenizer_class is None: + config_tokenizer_class = config_tokenizer_class_fast + + if config_tokenizer_class is not None: + if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""): + logger.warning( + "The tokenizer class you load from this checkpoint is not the same type as the class this" + " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you" + f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called" + f" from is '{cls.__name__}'." + ) + + # Update with newly provided kwargs + init_kwargs.update(kwargs) + + # Merge resolved_vocab_files arguments in init_kwargs. + added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) + special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) + for args_name, file_path in resolved_vocab_files.items(): + if args_name not in init_kwargs: + init_kwargs[args_name] = file_path + tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None) + + if slow_tokenizer is not None: + init_kwargs["__slow_tokenizer"] = slow_tokenizer + init_kwargs["name_or_path"] = pretrained_model_name_or_path + + #### Handle tokenizer serialization of added and special tokens + added_tokens_decoder: Dict[int, AddedToken] = {} + added_tokens_map: Dict[str, AddedToken] = {} + # if we have info on the slow added tokens + if "added_tokens_decoder" in init_kwargs: + for idx, token in init_kwargs["added_tokens_decoder"].items(): + if isinstance(token, dict): + token = AddedToken(**token) + if isinstance(token, AddedToken): + added_tokens_decoder[int(idx)] = token + added_tokens_map[str(token)] = token + else: + raise ValueError( + f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance" + ) + else: + # begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified + if special_tokens_map_file is not None: + with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: + special_tokens_map = json.load(special_tokens_map_handle) + for key, value in special_tokens_map.items(): + if key in kwargs and kwargs[key]: + # This value has already been redefined by the kwargs + # We keep this new value and ignore the one stored in the special_tokens_map_file + continue + if isinstance(value, dict): + value = AddedToken(**value, special=True) + elif key == "additional_special_tokens" and isinstance(value, list): + additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or [] + for token in value: + token = AddedToken(**token, special=True) if isinstance(token, dict) else token + if token not in additional_special_tokens: + additional_special_tokens.append(token) + value = additional_special_tokens + init_kwargs[key] = value + + # slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`. + # this is for legacy purpose. We don't add the tokens after init for efficiency. + if added_tokens_file is not None: + special_tokens = [] + for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): + if init_kwargs[key] is not None: + if key == "additional_special_tokens": + special_tokens += [str(token) for token in init_kwargs[key]] + else: + special_tokens.append(str(init_kwargs[key])) + + with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: + added_tok_encoder = json.load(added_tokens_handle) + for str_token, index in added_tok_encoder.items(): + # if index not in added_tokens_decoder and str_token not in added_tokens_map: + special = str_token in special_tokens + added_tokens_decoder[index] = AddedToken( + str_token, rstrip=False, lstrip=False, normalized=not special, special=special + ) + added_tokens_map[str(token)] = added_tokens_decoder[index] + + # allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer + # if `tokenizer_config.json` is `None` + if tokenizer_file is not None: + # This is for slow so can be done before + with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle: + tokenizer_file_handle = json.load(tokenizer_file_handle) + added_tokens = tokenizer_file_handle.pop("added_tokens") + for serialized_tokens in added_tokens: + idx = serialized_tokens.pop("id") + added_tokens_decoder[idx] = AddedToken(**serialized_tokens) + added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx] + # end legacy + + # Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken + # convert {'__type': 'AddedToken', 'content': '', 'lstrip': False, 'normalized': True, ...} to AddedTokens + init_kwargs["added_tokens_decoder"] = added_tokens_decoder + init_kwargs = cls.convert_added_tokens(init_kwargs, save=False) + for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): + if added_tokens_map != {} and init_kwargs[key] is not None: + if key != "additional_special_tokens": + init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key]) + + # Instantiate the tokenizer. + try: + tokenizer = cls(*init_inputs, **init_kwargs) + except OSError: + raise OSError( + "Unable to load vocabulary from file. " + "Please check that the provided vocabulary is accessible and not corrupted." + ) + + if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size: + logger.warning_advice( + "Special tokens have been added in the vocabulary, make sure the associated word embeddings are" + " fine-tuned or trained." + ) + return tokenizer + + @staticmethod + def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): + # This method should be deleted in Transformers v5 + # Its only purpose is to potentially throw a warning + # that incorrectly defined max lengths of T5's tokenizer are used + # which we will correct in Transformers v5. + return max_model_length + + @classmethod + def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True): + if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken": + obj.pop("__type") + return AddedToken(**obj) + if isinstance(obj, AddedToken) and save: + obj = obj.__getstate__() + if add_type_field: + obj["__type"] = "AddedToken" + else: + # Don't save "special" for previous tokenizers + obj.pop("special") + return obj + elif isinstance(obj, (list, tuple)): + return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj] + elif isinstance(obj, dict): + return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()} + return obj + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + legacy_format: Optional[bool] = None, + filename_prefix: Optional[str] = None, + push_to_hub: bool = False, + **kwargs, + ) -> Tuple[str]: + """ + Save the full tokenizer state. + + + This method make sure the full tokenizer can then be re-loaded using the + [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method.. + + Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for + instance, modifying `tokenizer.do_lower_case` after creation). + + Args: + save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved. + legacy_format (`bool`, *optional*): + Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON + format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate + added_tokens files. + + If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with + "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be + loaded in the corresponding "slow" tokenizer. + + If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value + error is raised. + filename_prefix (`str`, *optional*): + A prefix to add to the names of the files saved by the tokenizer. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + + Returns: + A tuple of `str`: The files saved. + """ + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + special_tokens_map_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE + ) + tokenizer_config_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE + ) + + tokenizer_config = copy.deepcopy(self.init_kwargs) + + # Let's save the init kwargs + target_keys = set(self.init_kwargs.keys()) + # Let's save the special tokens map (only the strings) + target_keys.update(["model_max_length", "clean_up_tokenization_spaces"]) + + for k in target_keys: + if hasattr(self, k): + tokenizer_config[k] = getattr(self, k) + + # Let's make sure we properly save the special tokens. + tokenizer_config.update(self.special_tokens_map) + + if self.chat_template is not None: + if isinstance(self.chat_template, dict): + # Chat template dicts are saved to the config as lists of dicts with fixed key names. + # They will be reconstructed as a single dict during loading. + tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()] + else: + tokenizer_config["chat_template"] = self.chat_template + + if len(self.init_inputs) > 0: + tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs) + for file_id in self.vocab_files_names.keys(): + tokenizer_config.pop(file_id, None) + + # no typefields, this way old fast and slow can load it + tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True) + + # Process added tokens seperatly: allows previous versions to ignore it! + added_tokens = {} + for key, value in self.added_tokens_decoder.items(): + added_tokens[key] = value.__getstate__() + tokenizer_config["added_tokens_decoder"] = added_tokens + + # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained + tokenizer_class = self.__class__.__name__ + # Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast` + if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast": + tokenizer_class = tokenizer_class[:-4] + tokenizer_config["tokenizer_class"] = tokenizer_class + if getattr(self, "_auto_map", None) is not None: + tokenizer_config["auto_map"] = self._auto_map + if getattr(self, "_processor_class", None) is not None: + tokenizer_config["processor_class"] = self._processor_class + + # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be + # loaded from the Hub. + if self._auto_class is not None: + custom_object_save(self, save_directory, config=tokenizer_config) + + # remove private information + if "name_or_path" in tokenizer_config: + tokenizer_config.pop("name_or_path") + tokenizer_config.pop("special_tokens_map_file", None) + tokenizer_config.pop("tokenizer_file", None) + + with open(tokenizer_config_file, "w", encoding="utf-8") as f: + out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + f.write(out_str) + logger.info(f"tokenizer config file saved in {tokenizer_config_file}") + + # Sanitize AddedTokens in special_tokens_map + + # kept for forward compatibility, will be removed in transoformers 5. Typefields are not saved for FC, special should not be save either + write_dict = self.convert_added_tokens(self.special_tokens_map_extended, save=True, add_type_field=False) + with open(special_tokens_map_file, "w", encoding="utf-8") as f: + out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + f.write(out_str) + logger.info(f"Special tokens file saved in {special_tokens_map_file}") + + file_names = (tokenizer_config_file, special_tokens_map_file) + + save_files = self._save_pretrained( + save_directory=save_directory, + file_names=file_names, + legacy_format=legacy_format, + filename_prefix=filename_prefix, + ) + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=kwargs.get("token"), + ) + + return save_files + + def _save_pretrained( + self, + save_directory: Union[str, os.PathLike], + file_names: Tuple[str], + legacy_format: Optional[bool] = None, + filename_prefix: Optional[str] = None, + ) -> Tuple[str]: + """ + Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens. + + Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the + specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`] + """ + if legacy_format is False: + raise ValueError( + "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format." + ) + + save_directory = str(save_directory) + + added_tokens_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE + ) + # the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size + added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} + if added_vocab: + with open(added_tokens_file, "w", encoding="utf-8") as f: + out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + f.write(out_str) + logger.info(f"added tokens file saved in {added_tokens_file}") + + vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) + + return file_names + vocab_files + (added_tokens_file,) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Save only the vocabulary of the tokenizer (vocabulary + added tokens). + + This method won't save the configuration and special token mappings of the tokenizer. Use + [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + raise NotImplementedError + + def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: + """ + Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`. + + Args: + text (`str`): + The sequence to be encoded. + pair (`str`, *optional*): + A second sequence to be encoded with the first. + add_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to add the special tokens associated with the corresponding model. + kwargs (additional keyword arguments, *optional*): + Will be passed to the underlying model specific encode method. See details in + [`~PreTrainedTokenizerBase.__call__`] + + Returns: + `List[str]`: The list of tokens. + """ + raise NotImplementedError + + @add_end_docstrings( + ENCODE_KWARGS_DOCSTRING, + """ + **kwargs: Passed along to the `.tokenize()` method. + """, + """ + Returns: + `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text. + """, + ) + def encode( + self, + text: Union[TextInput, PreTokenizedInput, EncodedInput], + text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> List[int]: + """ + Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. + + Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. + + Args: + text (`str`, `List[str]` or `List[int]`): + The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the + `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` + method). + text_pair (`str`, `List[str]` or `List[int]`, *optional*): + Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using + the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` + method). + """ + encoded_inputs = self.encode_plus( + text, + text_pair=text_pair, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + return_tensors=return_tensors, + **kwargs, + ) + + return encoded_inputs["input_ids"] + + def num_special_tokens_to_add(self, pair: bool = False) -> int: + raise NotImplementedError + + def _get_padding_truncation_strategies( + self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs + ): + """ + Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy + and pad_to_max_length) and behaviors. + """ + old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate") + old_pad_to_max_length = kwargs.pop("pad_to_max_length", False) + + # Backward compatibility for previous behavior, maybe we should deprecate it: + # If you only set max_length, it activates truncation for max_length + if max_length is not None and padding is False and truncation is None: + if verbose: + if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False): + logger.warning( + "Truncation was not explicitly activated but `max_length` is provided a specific value, please" + " use `truncation=True` to explicitly truncate examples to max length. Defaulting to" + " 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the" + " tokenizer you can select this strategy more precisely by providing a specific strategy to" + " `truncation`." + ) + self.deprecation_warnings["Truncation-not-explicitly-activated"] = True + truncation = "longest_first" + + # Get padding strategy + if padding is False and old_pad_to_max_length: + if verbose: + warnings.warn( + "The `pad_to_max_length` argument is deprecated and will be removed in a future version, " + "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or " + "use `padding='max_length'` to pad to a max length. In this case, you can give a specific " + "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the " + "maximal input size of the model (e.g. 512 for Bert).", + FutureWarning, + ) + if max_length is None: + padding_strategy = PaddingStrategy.LONGEST + else: + padding_strategy = PaddingStrategy.MAX_LENGTH + elif padding is not False: + if padding is True: + if verbose: + if max_length is not None and ( + truncation is None or truncation is False or truncation == "do_not_truncate" + ): + warnings.warn( + "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. " + "To pad to max length, use `padding='max_length'`." + ) + if old_pad_to_max_length is not False: + warnings.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.") + padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch + elif not isinstance(padding, PaddingStrategy): + padding_strategy = PaddingStrategy(padding) + elif isinstance(padding, PaddingStrategy): + padding_strategy = padding + else: + padding_strategy = PaddingStrategy.DO_NOT_PAD + + # Get truncation strategy + if truncation is None and old_truncation_strategy != "do_not_truncate": + if verbose: + warnings.warn( + "The `truncation_strategy` argument is deprecated and will be removed in a future version, use" + " `truncation=True` to truncate examples to a max length. You can give a specific length with" + " `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input" + " size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific" + " truncation strategy selected among `truncation='only_first'` (will only truncate the first" + " sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the" + " pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence" + " in the pairs).", + FutureWarning, + ) + truncation_strategy = TruncationStrategy(old_truncation_strategy) + elif truncation is not False and truncation is not None: + if truncation is True: + truncation_strategy = ( + TruncationStrategy.LONGEST_FIRST + ) # Default to truncate the longest sequences in pairs of inputs + elif not isinstance(truncation, TruncationStrategy): + truncation_strategy = TruncationStrategy(truncation) + elif isinstance(truncation, TruncationStrategy): + truncation_strategy = truncation + else: + truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE + + # Set max length if needed + if max_length is None: + if padding_strategy == PaddingStrategy.MAX_LENGTH: + if self.model_max_length > LARGE_INTEGER: + if verbose: + if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False): + logger.warning( + "Asking to pad to max_length but no maximum length is provided and the model has no" + " predefined maximum length. Default to no padding." + ) + self.deprecation_warnings["Asking-to-pad-to-max_length"] = True + padding_strategy = PaddingStrategy.DO_NOT_PAD + else: + max_length = self.model_max_length + + if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: + if self.model_max_length > LARGE_INTEGER: + if verbose: + if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False): + logger.warning( + "Asking to truncate to max_length but no maximum length is provided and the model has" + " no predefined maximum length. Default to no truncation." + ) + self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True + truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE + else: + max_length = self.model_max_length + + # Test if we have a padding token + if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0): + raise ValueError( + "Asking to pad but the tokenizer does not have a padding token. " + "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " + "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`." + ) + + # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided + if ( + truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE + and padding_strategy != PaddingStrategy.DO_NOT_PAD + and pad_to_multiple_of is not None + and max_length is not None + and (max_length % pad_to_multiple_of != 0) + ): + raise ValueError( + "Truncation and padding are both activated but " + f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})." + ) + + return padding_strategy, truncation_strategy, max_length, kwargs + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def __call__( + self, + text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, + text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, + text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, + text_pair_target: Optional[ + Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] + ] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + """ + Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of + sequences. + + Args: + text (`str`, `List[str]`, `List[List[str]]`, *optional*): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + text_target (`str`, `List[str]`, `List[List[str]]`, *optional*): + The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a + list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), + you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*): + The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a + list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), + you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + """ + # To avoid duplicating + all_kwargs = { + "add_special_tokens": add_special_tokens, + "padding": padding, + "truncation": truncation, + "max_length": max_length, + "stride": stride, + "is_split_into_words": is_split_into_words, + "pad_to_multiple_of": pad_to_multiple_of, + "return_tensors": return_tensors, + "return_token_type_ids": return_token_type_ids, + "return_attention_mask": return_attention_mask, + "return_overflowing_tokens": return_overflowing_tokens, + "return_special_tokens_mask": return_special_tokens_mask, + "return_offsets_mapping": return_offsets_mapping, + "return_length": return_length, + "verbose": verbose, + } + all_kwargs.update(kwargs) + if text is None and text_target is None: + raise ValueError("You need to specify either `text` or `text_target`.") + if text is not None: + # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the + # input mode in this case. + if not self._in_target_context_manager: + self._switch_to_input_mode() + encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs) + if text_target is not None: + self._switch_to_target_mode() + target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs) + # Leave back tokenizer in input mode + self._switch_to_input_mode() + + if text_target is None: + return encodings + elif text is None: + return target_encodings + else: + encodings["labels"] = target_encodings["input_ids"] + return encodings + + def _call_one( + self, + text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], + text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + # Input type checking for clearer error + def _is_valid_text_input(t): + if isinstance(t, str): + # Strings are fine + return True + elif isinstance(t, (list, tuple)): + # List are fine as long as they are... + if len(t) == 0: + # ... empty + return True + elif isinstance(t[0], str): + # ... list of strings + return True + elif isinstance(t[0], (list, tuple)): + # ... list with an empty list or with a list of strings + return len(t[0]) == 0 or isinstance(t[0][0], str) + else: + return False + else: + return False + + if not _is_valid_text_input(text): + raise ValueError( + "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) " + "or `List[List[str]]` (batch of pretokenized examples)." + ) + + if text_pair is not None and not _is_valid_text_input(text_pair): + raise ValueError( + "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) " + "or `List[List[str]]` (batch of pretokenized examples)." + ) + + if is_split_into_words: + is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) + else: + is_batched = isinstance(text, (list, tuple)) + + if is_batched: + if isinstance(text_pair, str): + raise TypeError( + "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as" + " `text`." + ) + if text_pair is not None and len(text) != len(text_pair): + raise ValueError( + f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" + f" {len(text_pair)}." + ) + batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text + return self.batch_encode_plus( + batch_text_or_text_pairs=batch_text_or_text_pairs, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + is_split_into_words=is_split_into_words, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + else: + return self.encode_plus( + text=text, + text_pair=text_pair, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + is_split_into_words=is_split_into_words, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def encode_plus( + self, + text: Union[TextInput, PreTokenizedInput, EncodedInput], + text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + """ + Tokenize and prepare for the model a sequence or a pair of sequences. + + + + This method is deprecated, `__call__` should be used instead. + + + + Args: + text (`str`, `List[str]` or `List[int]` (the latter only for not-fast tokenizers)): + The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the + `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` + method). + text_pair (`str`, `List[str]` or `List[int]`, *optional*): + Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using + the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` + method). + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + return self._encode_plus( + text=text, + text_pair=text_pair, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + is_split_into_words=is_split_into_words, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + def _encode_plus( + self, + text: Union[TextInput, PreTokenizedInput, EncodedInput], + text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + raise NotImplementedError + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + List[PreTokenizedInputPair], + List[EncodedInput], + List[EncodedInputPair], + ], + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + """ + Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. + + + + This method is deprecated, `__call__` should be used instead. + + + + Args: + batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`): + Batch of sequences or pair of sequences to be encoded. This can be a list of + string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see + details in `encode_plus`). + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + return self._batch_encode_plus( + batch_text_or_text_pairs=batch_text_or_text_pairs, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + is_split_into_words=is_split_into_words, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + List[PreTokenizedInputPair], + List[EncodedInput], + List[EncodedInputPair], + ], + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + raise NotImplementedError + + def pad( + self, + encoded_inputs: Union[ + BatchEncoding, + List[BatchEncoding], + Dict[str, EncodedInput], + Dict[str, List[EncodedInput]], + List[Dict[str, EncodedInput]], + ], + padding: Union[bool, str, PaddingStrategy] = True, + max_length: Optional[int] = None, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + verbose: bool = True, + ) -> BatchEncoding: + """ + Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length + in the batch. + + Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, + `self.pad_token_id` and `self.pad_token_type_id`). + + Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the + text followed by a call to the `pad` method to get a padded encoding. + + + + If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the + result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of + PyTorch tensors, you will lose the specific device of your tensors however. + + + + Args: + encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`): + Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of + tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str, + List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader + collate function. + + Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see + the note above for the return type. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): + Select a strategy to pad the returned sequences (according to the model's padding side and padding + index) among: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + max_length (`int`, *optional*): + Maximum length of the returned list and optionally padding length (see above). + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask (`bool`, *optional*): + Whether to return the attention mask. If left to the default, will return the attention mask according + to the specific tokenizer's default, defined by the `return_outputs` attribute. + + [What are attention masks?](../glossary#attention-mask) + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + verbose (`bool`, *optional*, defaults to `True`): + Whether or not to print more information and warnings. + """ + if self.__class__.__name__.endswith("Fast"): + if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False): + logger.warning_advice( + f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer," + " using the `__call__` method is faster than using a method to encode the text followed by a call" + " to the `pad` method to get a padded encoding." + ) + self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True + + # If we have a list of dicts, let's convert it in a dict of lists + # We do this to allow using this method as a collate_fn function in PyTorch Dataloader + if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): + encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} + + # The model's main input name, usually `input_ids`, has be passed for padding + if self.model_input_names[0] not in encoded_inputs: + raise ValueError( + "You should supply an encoding or a list of encodings to this method " + f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" + ) + + required_input = encoded_inputs[self.model_input_names[0]] + + if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0): + if return_attention_mask: + encoded_inputs["attention_mask"] = [] + return encoded_inputs + + # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects + # and rebuild them afterwards if no return_tensors is specified + # Note that we lose the specific device the tensor may be on for PyTorch + + first_element = required_input[0] + if isinstance(first_element, (list, tuple)): + # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. + for item in required_input: + if len(item) != 0: + first_element = item[0] + break + # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do. + if not isinstance(first_element, (int, list, tuple)): + if is_tf_tensor(first_element): + return_tensors = "tf" if return_tensors is None else return_tensors + elif is_torch_tensor(first_element): + return_tensors = "pt" if return_tensors is None else return_tensors + elif isinstance(first_element, np.ndarray): + return_tensors = "np" if return_tensors is None else return_tensors + else: + raise ValueError( + f"type of {first_element} unknown: {type(first_element)}. " + "Should be one of a python, numpy, pytorch or tensorflow object." + ) + + for key, value in encoded_inputs.items(): + encoded_inputs[key] = to_py_obj(value) + + # Convert padding_strategy in PaddingStrategy + padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( + padding=padding, max_length=max_length, verbose=verbose + ) + + required_input = encoded_inputs[self.model_input_names[0]] + if required_input and not isinstance(required_input[0], (list, tuple)): + encoded_inputs = self._pad( + encoded_inputs, + max_length=max_length, + padding_strategy=padding_strategy, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + return BatchEncoding(encoded_inputs, tensor_type=return_tensors) + + batch_size = len(required_input) + assert all( + len(v) == batch_size for v in encoded_inputs.values() + ), "Some items in the output dictionary have a different batch size than others." + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = max(len(inputs) for inputs in required_input) + padding_strategy = PaddingStrategy.MAX_LENGTH + + batch_outputs = {} + for i in range(batch_size): + inputs = {k: v[i] for k, v in encoded_inputs.items()} + outputs = self._pad( + inputs, + max_length=max_length, + padding_strategy=padding_strategy, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + for key, value in outputs.items(): + if key not in batch_outputs: + batch_outputs[key] = [] + batch_outputs[key].append(value) + + return BatchEncoding(batch_outputs, tensor_type=return_tensors) + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create the token type IDs corresponding to the sequences passed. [What are token type + IDs?](../glossary#token-type-ids) + + Should be overridden in a subclass if the model has a special way of building those. + + Args: + token_ids_0 (`List[int]`): The first tokenized sequence. + token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. + + Returns: + `List[int]`: The token type ids. + """ + if token_ids_1 is None: + return len(token_ids_0) * [0] + return [0] * len(token_ids_0) + [1] * len(token_ids_1) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. + + This implementation does not add special tokens and this method should be overridden in a subclass. + + Args: + token_ids_0 (`List[int]`): The first tokenized sequence. + token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. + + Returns: + `List[int]`: The model input with special tokens. + """ + if token_ids_1 is None: + return token_ids_0 + return token_ids_0 + token_ids_1 + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def prepare_for_model( + self, + ids: List[int], + pair_ids: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + prepend_batch_axis: bool = False, + **kwargs, + ) -> BatchEncoding: + """ + Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It + adds special tokens, truncates sequences if overflowing while taking into account the special tokens and + manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* + different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return + overflowing tokens. Such a combination of arguments will raise an error. + + Args: + ids (`List[int]`): + Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and + `convert_tokens_to_ids` methods. + pair_ids (`List[int]`, *optional*): + Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` + and `convert_tokens_to_ids` methods. + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + pair = bool(pair_ids is not None) + len_ids = len(ids) + len_pair_ids = len(pair_ids) if pair else 0 + + if return_token_type_ids and not add_special_tokens: + raise ValueError( + "Asking to return token_type_ids while setting add_special_tokens to False " + "results in an undefined behavior. Please set add_special_tokens to True or " + "set return_token_type_ids to None." + ) + + if ( + return_overflowing_tokens + and truncation_strategy == TruncationStrategy.LONGEST_FIRST + and pair_ids is not None + ): + raise ValueError( + "Not possible to return overflowing tokens for pair of sequences with the " + "`longest_first`. Please select another truncation strategy than `longest_first`, " + "for instance `only_second` or `only_first`." + ) + + # Load from model defaults + if return_token_type_ids is None: + return_token_type_ids = "token_type_ids" in self.model_input_names + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + encoded_inputs = {} + + # Compute the total size of the returned encodings + total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) + + # Truncation: Handle max sequence length + overflowing_tokens = [] + if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: + ids, pair_ids, overflowing_tokens = self.truncate_sequences( + ids, + pair_ids=pair_ids, + num_tokens_to_remove=total_len - max_length, + truncation_strategy=truncation_strategy, + stride=stride, + ) + + if return_overflowing_tokens: + encoded_inputs["overflowing_tokens"] = overflowing_tokens + encoded_inputs["num_truncated_tokens"] = total_len - max_length + + # Add special tokens + if add_special_tokens: + sequence = self.build_inputs_with_special_tokens(ids, pair_ids) + token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) + else: + sequence = ids + pair_ids if pair else ids + token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) + + # Build output dictionary + encoded_inputs["input_ids"] = sequence + if return_token_type_ids: + encoded_inputs["token_type_ids"] = token_type_ids + if return_special_tokens_mask: + if add_special_tokens: + encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) + else: + encoded_inputs["special_tokens_mask"] = [0] * len(sequence) + + # Check lengths + self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) + + # Padding + if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: + encoded_inputs = self.pad( + encoded_inputs, + max_length=max_length, + padding=padding_strategy.value, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + if return_length: + encoded_inputs["length"] = len(encoded_inputs["input_ids"]) + + batch_outputs = BatchEncoding( + encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis + ) + + return batch_outputs + + def truncate_sequences( + self, + ids: List[int], + pair_ids: Optional[List[int]] = None, + num_tokens_to_remove: int = 0, + truncation_strategy: Union[str, TruncationStrategy] = "longest_first", + stride: int = 0, + ) -> Tuple[List[int], List[int], List[int]]: + """ + Truncates a sequence pair in-place following the strategy. + + Args: + ids (`List[int]`): + Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and + `convert_tokens_to_ids` methods. + pair_ids (`List[int]`, *optional*): + Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` + and `convert_tokens_to_ids` methods. + num_tokens_to_remove (`int`, *optional*, defaults to 0): + Number of tokens to remove using the truncation strategy. + truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): + The strategy to follow for truncation. Can be: + + - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will truncate + token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a + batch of pairs) is provided. + - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater + than the model maximum admissible input size). + stride (`int`, *optional*, defaults to 0): + If set to a positive number, the overflowing tokens returned will contain some tokens from the main + sequence returned. The value of this argument defines the number of additional tokens. + + Returns: + `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of + overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair + of sequences (or a batch of pairs) is provided. + """ + if num_tokens_to_remove <= 0: + return ids, pair_ids, [] + + if not isinstance(truncation_strategy, TruncationStrategy): + truncation_strategy = TruncationStrategy(truncation_strategy) + + overflowing_tokens = [] + if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( + truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None + ): + if len(ids) > num_tokens_to_remove: + window_len = min(len(ids), stride + num_tokens_to_remove) + if self.truncation_side == "left": + overflowing_tokens = ids[:window_len] + ids = ids[num_tokens_to_remove:] + elif self.truncation_side == "right": + overflowing_tokens = ids[-window_len:] + ids = ids[:-num_tokens_to_remove] + else: + raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.") + + else: + error_msg = ( + f"We need to remove {num_tokens_to_remove} to truncate the input " + f"but the first sequence has a length {len(ids)}. " + ) + if truncation_strategy == TruncationStrategy.ONLY_FIRST: + error_msg = ( + error_msg + "Please select another truncation strategy than " + f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." + ) + logger.error(error_msg) + elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: + logger.warning( + "Be aware, overflowing tokens are not returned for the setting you have chosen," + f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " + "truncation strategy. So the returned list will always be empty even if some " + "tokens have been removed." + ) + len_pair_ids = len(pair_ids) if pair_ids is not None else 0 + len_ids = len(ids) + first_remove = min(abs(len_pair_ids - len_ids), num_tokens_to_remove) + second_remove = num_tokens_to_remove - first_remove + if len_ids > len_pair_ids: + ids_to_move = first_remove + second_remove // 2 + pair_ids_to_move = second_remove - second_remove // 2 + else: + ids_to_move = second_remove // 2 + pair_ids_to_move = first_remove + second_remove - (second_remove // 2) + + if self.truncation_side == "right": + ids = ids[:-ids_to_move] if ids_to_move > 0 else ids + pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids is not None and pair_ids_to_move > 0 else pair_ids + elif self.truncation_side == "left": + ids = ids[ids_to_move:] + pair_ids = pair_ids[pair_ids_to_move:] if pair_ids is not None else None + else: + raise ValueError(f"invalid truncation strategy:{self.truncation_side}") + + elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: + if len(pair_ids) > num_tokens_to_remove: + window_len = min(len(pair_ids), stride + num_tokens_to_remove) + if self.truncation_side == "right": + overflowing_tokens = pair_ids[-window_len:] + pair_ids = pair_ids[:-num_tokens_to_remove] + elif self.truncation_side == "left": + overflowing_tokens = pair_ids[:window_len] + pair_ids = pair_ids[num_tokens_to_remove:] + else: + raise ValueError(f"invalid truncation strategy:{self.truncation_side}") + else: + logger.error( + f"We need to remove {num_tokens_to_remove} to truncate the input " + f"but the second sequence has a length {len(pair_ids)}. " + f"Please select another truncation strategy than {truncation_strategy}, " + "for instance 'longest_first' or 'only_first'." + ) + + return (ids, pair_ids, overflowing_tokens) + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + required_input = encoded_inputs[self.model_input_names[0]] + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if return_attention_mask and "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * len(required_input) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if self.padding_side == "right": + if return_attention_mask: + encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference + ) + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference + encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference + elif self.padding_side == "left": + if return_attention_mask: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + else: + raise ValueError(f"Invalid padding strategy:{self.padding_side}") + + return encoded_inputs + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + """ + Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we + often want to remove sub-word tokenization artifacts at the same time. + + Args: + tokens (`List[str]`): The token to join in a string. + + Returns: + `str`: The joined tokens. + """ + raise NotImplementedError + + def batch_decode( + self, + sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = None, + **kwargs, + ) -> List[str]: + """ + Convert a list of lists of token ids into a list of strings by calling decode. + + Args: + sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): + List of tokenized input ids. Can be obtained using the `__call__` method. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether or not to clean up the tokenization spaces. If `None`, will default to + `self.clean_up_tokenization_spaces`. + kwargs (additional keyword arguments, *optional*): + Will be passed to the underlying model specific decode method. + + Returns: + `List[str]`: The list of decoded sentences. + """ + return [ + self.decode( + seq, + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + for seq in sequences + ] + + def decode( + self, + token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = None, + **kwargs, + ) -> str: + """ + Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special + tokens and clean up tokenization spaces. + + Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. + + Args: + token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): + List of tokenized input ids. Can be obtained using the `__call__` method. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether or not to clean up the tokenization spaces. If `None`, will default to + `self.clean_up_tokenization_spaces`. + kwargs (additional keyword arguments, *optional*): + Will be passed to the underlying model specific decode method. + + Returns: + `str`: The decoded sentence. + """ + # Convert inputs to python lists + token_ids = to_py_obj(token_ids) + + return self._decode( + token_ids=token_ids, + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + def _decode( + self, + token_ids: Union[int, List[int]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = None, + **kwargs, + ) -> str: + raise NotImplementedError + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of ids of the first sequence. + token_ids_1 (`List[int]`, *optional*): + List of ids of the second sequence. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + assert already_has_special_tokens and token_ids_1 is None, ( + "You cannot use ``already_has_special_tokens=False`` with this tokenizer. " + "Please use a slow (full python) tokenizer to activate this argument. " + "Or set `return_special_tokens_mask=True` when calling the encoding method " + "to get the special tokens mask in any tokenizer. " + ) + + all_special_ids = self.all_special_ids # cache the property + + special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] + + return special_tokens_mask + + @staticmethod + def clean_up_tokenization(out_string: str) -> str: + """ + Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. + + Args: + out_string (`str`): The text to clean up. + + Returns: + `str`: The cleaned-up string. + """ + out_string = ( + out_string.replace(" .", ".") + .replace(" ?", "?") + .replace(" !", "!") + .replace(" ,", ",") + .replace(" ' ", "'") + .replace(" n't", "n't") + .replace(" 'm", "'m") + .replace(" 's", "'s") + .replace(" 've", "'ve") + .replace(" 're", "'re") + ) + return out_string + + def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool): + """ + Depending on the input and internal state we might trigger a warning about a sequence that is too long for its + corresponding model + + Args: + ids (`List[str]`): The ids produced by the tokenization + max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set) + verbose (`bool`): Whether or not to print more information and warnings. + + """ + if max_length is None and len(ids) > self.model_max_length and verbose: + if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False): + logger.warning( + "Token indices sequence length is longer than the specified maximum sequence length " + f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model " + "will result in indexing errors" + ) + self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True + + def _switch_to_input_mode(self): + """ + Private method to put the tokenizer in input mode (when it has different modes for input/outputs) + """ + pass + + def _switch_to_target_mode(self): + """ + Private method to put the tokenizer in target mode (when it has different modes for input/outputs) + """ + pass + + @contextmanager + def as_target_tokenizer(self): + """ + Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to + sequence-to-sequence models that need a slightly different processing for the labels. + """ + warnings.warn( + "`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your " + "labels by using the argument `text_target` of the regular `__call__` method (either in the same call as " + "your input texts if you use the same keyword arguments, or in a separate call." + ) + self._switch_to_target_mode() + self._in_target_context_manager = True + yield + self._in_target_context_manager = False + self._switch_to_input_mode() + + @classmethod + def register_for_auto_class(cls, auto_class="AutoTokenizer"): + """ + Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the + library are already mapped with `AutoTokenizer`. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`): + The auto class to register this new tokenizer with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + + def prepare_seq2seq_batch( + self, + src_texts: List[str], + tgt_texts: Optional[List[str]] = None, + max_length: Optional[int] = None, + max_target_length: Optional[int] = None, + padding: str = "longest", + return_tensors: str = None, + truncation: bool = True, + **kwargs, + ) -> BatchEncoding: + """ + Prepare model inputs for translation. For best performance, translate one sentence at a time. + + Arguments: + src_texts (`List[str]`): + List of documents to summarize or source language texts. + tgt_texts (`list`, *optional*): + List of summaries or target language texts. + max_length (`int`, *optional*): + Controls the maximum length for encoder inputs (documents to summarize or source language texts) If + left unset or set to `None`, this will use the predefined model maximum length if a maximum length is + required by one of the truncation/padding parameters. If the model has no specific maximum input length + (like XLNet) truncation/padding to a maximum length will be deactivated. + max_target_length (`int`, *optional*): + Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set + to `None`, this will use the max_length value. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): + Activates and controls padding. Accepts the following values: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`): + Activates and controls truncation. Accepts the following values: + + - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or + to the maximum acceptable input length for the model if that argument is not provided. This will + truncate token by token, removing a token from the longest sequence in the pair if a pair of + sequences (or a batch of pairs) is provided. + - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths + greater than the model maximum admissible input size). + **kwargs: + Additional keyword arguments passed along to `self.__call__`. + + Return: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to the encoder. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. + - **labels** -- List of token ids for tgt_texts. + + The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed. + Otherwise, input_ids, attention_mask will be the only keys. + """ + # docstyle-ignore + formatted_warning = """ +`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular +`__call__` method to prepare your inputs and targets. + +Here is a short example: + +model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...) + +If you either need to use different keyword arguments for the source and target texts, you should do two calls like +this: + +model_inputs = tokenizer(src_texts, ...) +labels = tokenizer(text_target=tgt_texts, ...) +model_inputs["labels"] = labels["input_ids"] + +See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice. +For a more complete example, see the implementation of `prepare_seq2seq_batch`. +""" + warnings.warn(formatted_warning, FutureWarning) + # mBART-specific kwargs that should be ignored by other models. + kwargs.pop("src_lang", None) + kwargs.pop("tgt_lang", None) + if max_length is None: + max_length = self.model_max_length + model_inputs = self( + src_texts, + add_special_tokens=True, + return_tensors=return_tensors, + max_length=max_length, + padding=padding, + truncation=truncation, + **kwargs, + ) + if tgt_texts is None: + return model_inputs + # Process tgt_texts + if max_target_length is None: + max_target_length = max_length + with self.as_target_tokenizer(): + labels = self( + tgt_texts, + add_special_tokens=True, + return_tensors=return_tensors, + padding=padding, + max_length=max_target_length, + truncation=truncation, + **kwargs, + ) + model_inputs["labels"] = labels["input_ids"] + return model_inputs + + +def get_fast_tokenizer_file(tokenization_files: List[str]) -> str: + """ + Get the tokenization file to use for this version of transformers. + + Args: + tokenization_files (`List[str]`): The list of available configuration files. + + Returns: + `str`: The tokenization file to use. + """ + tokenizer_files_map = {} + for file_name in tokenization_files: + search = _re_tokenizer_file.search(file_name) + if search is not None: + v = search.groups()[0] + tokenizer_files_map[v] = file_name + available_versions = sorted(tokenizer_files_map.keys()) + + # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions. + tokenizer_file = FULL_TOKENIZER_FILE + transformers_version = version.parse(__version__) + for v in available_versions: + if version.parse(v) <= transformers_version: + tokenizer_file = tokenizer_files_map[v] + else: + # No point going further since the versions are sorted. + break + + return tokenizer_file + + +# To update the docstring, we need to copy the method, otherwise we change the original docstring. +PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub) +if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None: + PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format( + object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..b1daa1ec1be92fb52df0e62d11456632067abb03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py @@ -0,0 +1,846 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library). For slow (python) tokenizers + see tokenization_utils.py +""" +import copy +import json +import os +from collections import defaultdict +from typing import Any, Dict, List, Optional, Tuple, Union + +import tokenizers.pre_tokenizers as pre_tokenizers_fast +from tokenizers import Encoding as EncodingFast +from tokenizers import Tokenizer as TokenizerFast +from tokenizers.decoders import Decoder as DecoderFast +from tokenizers.trainers import BpeTrainer, UnigramTrainer, WordLevelTrainer, WordPieceTrainer + +from .convert_slow_tokenizer import convert_slow_tokenizer +from .tokenization_utils import PreTrainedTokenizer +from .tokenization_utils_base import ( + INIT_TOKENIZER_DOCSTRING, + AddedToken, + BatchEncoding, + PreTokenizedInput, + PreTokenizedInputPair, + PreTrainedTokenizerBase, + SpecialTokensMixin, + TextInput, + TextInputPair, + TruncationStrategy, +) +from .utils import PaddingStrategy, add_end_docstrings, logging + + +logger = logging.get_logger(__name__) + +# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file +TOKENIZER_FILE = "tokenizer.json" +SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" +TOKENIZER_CONFIG_FILE = "tokenizer_config.json" + +# Slow tokenizers have an additional added tokens files +ADDED_TOKENS_FILE = "added_tokens.json" + +INIT_TOKENIZER_DOCSTRING += """ + tokenizer_object ([`tokenizers.Tokenizer`]): + A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗 + tokenizers](../fast_tokenizers) for more information. + tokenizer_file ([`str`]): + A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗 + tokenizers. +""" + +MODEL_TO_TRAINER_MAPPING = { + "BPE": BpeTrainer, + "Unigram": UnigramTrainer, + "WordLevel": WordLevelTrainer, + "WordPiece": WordPieceTrainer, +} + +VOCAB_FILES_NAMES = {"tokenizer_file": TOKENIZER_FILE} + + +@add_end_docstrings(INIT_TOKENIZER_DOCSTRING) +class PreTrainedTokenizerFast(PreTrainedTokenizerBase): + """ + Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). + + Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. + + Handles all the shared methods for tokenization and special tokens, as well as methods for + downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. + + This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the + specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class: PreTrainedTokenizer = None + + def __init__(self, *args, **kwargs): + tokenizer_object = kwargs.pop("tokenizer_object", None) + slow_tokenizer = kwargs.pop("__slow_tokenizer", None) + fast_tokenizer_file = kwargs.pop("tokenizer_file", None) + from_slow = kwargs.pop("from_slow", False) + added_tokens_decoder = kwargs.pop("added_tokens_decoder", {}) + + if from_slow and slow_tokenizer is None and self.slow_tokenizer_class is None: + raise ValueError( + "Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you " + "have sentencepiece installed." + ) + + if tokenizer_object is not None: + fast_tokenizer = copy.deepcopy(tokenizer_object) + elif fast_tokenizer_file is not None and not from_slow: + # We have a serialization from tokenizers which let us directly build the backend + fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file) + elif slow_tokenizer is not None: + # We need to convert a slow tokenizer to build the backend + fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) + elif self.slow_tokenizer_class is not None: + # We need to create and convert a slow tokenizer to build the backend + slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs) + fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) + else: + raise ValueError( + "Couldn't instantiate the backend tokenizer from one of: \n" + "(1) a `tokenizers` library serialization file, \n" + "(2) a slow tokenizer instance to convert or \n" + "(3) an equivalent slow tokenizer class to instantiate and convert. \n" + "You need to have sentencepiece installed to convert a slow tokenizer to a fast one." + ) + + self._tokenizer = fast_tokenizer + + if slow_tokenizer is not None: + kwargs.update(slow_tokenizer.init_kwargs) + + self._decode_use_source_tokenizer = False + + _truncation = self._tokenizer.truncation + + if _truncation is not None: + self._tokenizer.enable_truncation(**_truncation) + kwargs.setdefault("max_length", _truncation["max_length"]) + kwargs.setdefault("truncation_side", _truncation["direction"]) + kwargs.setdefault("stride", _truncation["stride"]) + kwargs.setdefault("truncation_strategy", _truncation["strategy"]) + else: + self._tokenizer.no_truncation() + + _padding = self._tokenizer.padding + if _padding is not None: + self._tokenizer.enable_padding(**_padding) + kwargs.setdefault("pad_token", _padding["pad_token"]) + kwargs.setdefault("pad_token_type_id", _padding["pad_type_id"]) + kwargs.setdefault("padding_side", _padding["direction"]) + kwargs.setdefault("max_length", _padding["length"]) + kwargs.setdefault("pad_to_multiple_of", _padding["pad_to_multiple_of"]) + + # We call this after having initialized the backend tokenizer because we update it. + super().__init__(**kwargs) + + # The following logic will be replace with a single add_tokens once a fix is pushed to tokenizers + # allows converting a slow -> fast, non-legacy: if the `tokenizer.json` does not have all the added tokens + # uses the information stored in `added_tokens_decoder`. + # this is costly for fast tokenizers as we re-compute the regex again. But not all tokens are added tokens + tokens_to_add = [ + token + for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0]) + if token not in self.added_tokens_decoder + ] + encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add] + # if some of the special tokens are strings, we check if we don't already have a token + tokens_to_add += [ + token for token in self.all_special_tokens_extended if token not in encoder and token not in tokens_to_add + ] + if len(tokens_to_add) > 0: + # super hack: if a token.special is set, tokenizer ignores it for now so FIXME @ArthurZ + # Accumulate added tokens into batches of special/non-special tokens, because calling add_tokens() for + # individual tokens would repeatedly rebuild a trie, which can be slow. + is_last_special = None + tokens = [] + special_tokens = self.all_special_tokens + for token in tokens_to_add: + is_special = ( + (token.special or str(token) in special_tokens) + if isinstance(token, AddedToken) + else str(token) in special_tokens + ) + if is_last_special is None or is_last_special == is_special: + tokens.append(token) + else: + self._add_tokens(tokens, special_tokens=is_last_special) + tokens = [token] + is_last_special = is_special + if tokens: + self._add_tokens(tokens, special_tokens=is_last_special) + + @property + def is_fast(self) -> bool: + return True + + @property + def can_save_slow_tokenizer(self) -> bool: + """ + `bool`: Whether or not the slow tokenizer can be saved. Usually for sentencepiece based slow tokenizer, this + can only be `True` if the original `"sentencepiece.model"` was not deleted. + """ + return True + + @property + def vocab_size(self) -> int: + """ + `int`: Size of the base vocabulary (without the added tokens). + """ + return self._tokenizer.get_vocab_size(with_added_tokens=False) + + def get_vocab(self) -> Dict[str, int]: + return self._tokenizer.get_vocab(with_added_tokens=True) + + @property + def vocab(self) -> Dict[str, int]: + return self.get_vocab() + + @property + def added_tokens_encoder(self) -> Dict[str, int]: + """ + Returns the sorted mapping from string to index. The added tokens encoder is cached for performance + optimisation in `self._added_tokens_encoder` for the slow tokenizers. + """ + return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} + + @property + def added_tokens_decoder(self) -> Dict[int, AddedToken]: + """ + Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. + + Returns: + `Dict[str, int]`: The added tokens. + """ + return self._tokenizer.get_added_tokens_decoder() + + def get_added_vocab(self) -> Dict[str, int]: + """ + Returns the added tokens in the vocabulary as a dictionary of token to index. + + Returns: + `Dict[str, int]`: The added tokens. + """ + return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} + + def __len__(self) -> int: + """ + Size of the full vocabulary with the added tokens. + """ + return self._tokenizer.get_vocab_size(with_added_tokens=True) + + @property + def backend_tokenizer(self) -> TokenizerFast: + """ + `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. + """ + return self._tokenizer + + @property + def decoder(self) -> DecoderFast: + """ + `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. + """ + return self._tokenizer.decoder + + def _convert_encoding( + self, + encoding: EncodingFast, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> Tuple[Dict[str, Any], List[EncodingFast]]: + """ + Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list + of encodings, take care of building a batch from overflowing tokens. + + Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are + lists (overflows) of lists (tokens). + + Output shape: (overflows, sequence length) + """ + if return_token_type_ids is None: + return_token_type_ids = "token_type_ids" in self.model_input_names + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + if return_overflowing_tokens and encoding.overflowing is not None: + encodings = [encoding] + encoding.overflowing + else: + encodings = [encoding] + + encoding_dict = defaultdict(list) + for e in encodings: + encoding_dict["input_ids"].append(e.ids) + + if return_token_type_ids: + encoding_dict["token_type_ids"].append(e.type_ids) + if return_attention_mask: + encoding_dict["attention_mask"].append(e.attention_mask) + if return_special_tokens_mask: + encoding_dict["special_tokens_mask"].append(e.special_tokens_mask) + if return_offsets_mapping: + encoding_dict["offset_mapping"].append(e.offsets) + if return_length: + encoding_dict["length"].append(len(e.ids)) + + return encoding_dict, encodings + + def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: + """ + Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the + vocabulary. + + Args: + tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s). + + Returns: + `int` or `List[int]`: The token id or list of token ids. + """ + if tokens is None: + return None + + if isinstance(tokens, str): + return self._convert_token_to_id_with_added_voc(tokens) + + return [self._convert_token_to_id_with_added_voc(token) for token in tokens] + + def _convert_token_to_id_with_added_voc(self, token: str) -> int: + index = self._tokenizer.token_to_id(token) + if index is None: + return self.unk_token_id + return index + + def _convert_id_to_token(self, index: int) -> Optional[str]: + return self._tokenizer.id_to_token(int(index)) + + def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int: + if special_tokens: + return self._tokenizer.add_special_tokens(new_tokens) + + return self._tokenizer.add_tokens(new_tokens) + + def num_special_tokens_to_add(self, pair: bool = False) -> int: + """ + Returns the number of added tokens when encoding a sequence with special tokens. + + + + This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put + this inside your training loop. + + + + Args: + pair (`bool`, *optional*, defaults to `False`): + Whether the number of added tokens should be computed in the case of a sequence pair or a single + sequence. + + Returns: + `int`: Number of special tokens added to sequences. + """ + return self._tokenizer.num_special_tokens_to_add(pair) + + def convert_ids_to_tokens( + self, ids: Union[int, List[int]], skip_special_tokens: bool = False + ) -> Union[str, List[str]]: + """ + Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and + added tokens. + + Args: + ids (`int` or `List[int]`): + The token id (or token ids) to convert to tokens. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + + Returns: + `str` or `List[str]`: The decoded token(s). + """ + if isinstance(ids, int): + return self._tokenizer.id_to_token(ids) + tokens = [] + for index in ids: + index = int(index) + if skip_special_tokens and index in self.all_special_ids: + continue + tokens.append(self._tokenizer.id_to_token(index)) + return tokens + + def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: + return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens() + + def set_truncation_and_padding( + self, + padding_strategy: PaddingStrategy, + truncation_strategy: TruncationStrategy, + max_length: int, + stride: int, + pad_to_multiple_of: Optional[int], + ): + """ + Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers + library) and restore the tokenizer settings afterwards. + + The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a + padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed + section. + + Args: + padding_strategy ([`~utils.PaddingStrategy`]): + The kind of padding that will be applied to the input + truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]): + The kind of truncation that will be applied to the input + max_length (`int`): + The maximum size of a sequence. + stride (`int`): + The stride to use when handling overflow. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. This is especially useful to enable + the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). + """ + _truncation = self._tokenizer.truncation + _padding = self._tokenizer.padding + # Set truncation and padding on the backend tokenizer + if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE: + if _truncation is not None: + self._tokenizer.no_truncation() + else: + target = { + "max_length": max_length, + "stride": stride, + "strategy": truncation_strategy.value, + "direction": self.truncation_side, + } + + # _truncation might contain more keys that the target `transformers` + # supports. Use only the target keys to trigger `enable_truncation`. + # This should enable this code to works on various `tokenizers` + # targets. + if _truncation is None: + current = None + else: + current = {k: _truncation.get(k, None) for k in target} + + if current != target: + self._tokenizer.enable_truncation(**target) + + if padding_strategy == PaddingStrategy.DO_NOT_PAD: + if _padding is not None: + self._tokenizer.no_padding() + else: + length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None + target = { + "length": length, + "direction": self.padding_side, + "pad_id": self.pad_token_id, + "pad_token": self.pad_token, + "pad_type_id": self.pad_token_type_id, + "pad_to_multiple_of": pad_to_multiple_of, + } + if _padding != target: + self._tokenizer.enable_padding(**target) + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] + ], + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[str] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> BatchEncoding: + if not isinstance(batch_text_or_text_pairs, (tuple, list)): + raise TypeError( + f"batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})" + ) + + # Set the truncation and padding strategy and restore the initial configuration + self.set_truncation_and_padding( + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + ) + + encodings = self._tokenizer.encode_batch( + batch_text_or_text_pairs, + add_special_tokens=add_special_tokens, + is_pretokenized=is_split_into_words, + ) + + # Convert encoding to dict + # `Tokens` has type: Tuple[ + # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], + # List[EncodingFast] + # ] + # with nested dimensions corresponding to batch, overflows, sequence length + tokens_and_encodings = [ + self._convert_encoding( + encoding=encoding, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + ) + for encoding in encodings + ] + + # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension + # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) + # (we say ~ because the number of overflow varies with the example in the batch) + # + # To match each overflowing sample with the original sample in the batch + # we add an overflow_to_sample_mapping array (see below) + sanitized_tokens = {} + for key in tokens_and_encodings[0][0].keys(): + stack = [e for item, _ in tokens_and_encodings for e in item[key]] + sanitized_tokens[key] = stack + sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] + + # If returning overflowing tokens, we need to return a mapping + # from the batch idx to the original sample + if return_overflowing_tokens: + overflow_to_sample_mapping = [] + for i, (toks, _) in enumerate(tokens_and_encodings): + overflow_to_sample_mapping += [i] * len(toks["input_ids"]) + sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping + + for input_ids in sanitized_tokens["input_ids"]: + self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) + return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) + + def _encode_plus( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[bool] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + batched_input = [(text, text_pair)] if text_pair else [text] + batched_output = self._batch_encode_plus( + batched_input, + is_split_into_words=is_split_into_words, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + # Return tensor is None, then we can remove the leading batch axis + # Overflowing tokens are returned as a batch of output so we keep them in this case + if return_tensors is None and not return_overflowing_tokens: + batched_output = BatchEncoding( + { + key: value[0] if len(value) > 0 and isinstance(value[0], list) else value + for key, value in batched_output.items() + }, + batched_output.encodings, + ) + + self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) + + return batched_output + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + return self.backend_tokenizer.decoder.decode(tokens) + + def _decode( + self, + token_ids: Union[int, List[int]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = None, + **kwargs, + ) -> str: + self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) + + if isinstance(token_ids, int): + token_ids = [token_ids] + text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) + + clean_up_tokenization_spaces = ( + clean_up_tokenization_spaces + if clean_up_tokenization_spaces is not None + else self.clean_up_tokenization_spaces + ) + if clean_up_tokenization_spaces: + clean_text = self.clean_up_tokenization(text) + return clean_text + else: + return text + + def _save_pretrained( + self, + save_directory: Union[str, os.PathLike], + file_names: Tuple[str], + legacy_format: Optional[bool] = None, + filename_prefix: Optional[str] = None, + ) -> Tuple[str]: + """ + Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON + file containing {config + vocab + added-tokens}. + """ + save_directory = str(save_directory) + + if self.slow_tokenizer_class is None and legacy_format is True: + raise ValueError( + "Your tokenizer does not have a legacy version defined and therefore cannot register this version. You" + " might consider leaving the legacy_format at `None` or setting it to `False`." + ) + + save_slow = ( + (legacy_format is None or legacy_format is True) + and self.slow_tokenizer_class is not None + and self.can_save_slow_tokenizer + ) + save_fast = legacy_format is None or legacy_format is False + + if save_slow: + added_tokens_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE + ) + # make sure to be foward compatible + added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} + if added_vocab: + with open(added_tokens_file, "w", encoding="utf-8") as f: + out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + f.write(out_str) + + vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) + file_names = file_names + vocab_files + (added_tokens_file,) + + if save_fast: + tokenizer_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_FILE + ) + self.backend_tokenizer.save(tokenizer_file) + file_names = file_names + (tokenizer_file,) + + return file_names + + def train_new_from_iterator( + self, + text_iterator, + vocab_size, + length=None, + new_special_tokens=None, + special_tokens_map=None, + **kwargs, + ): + """ + Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) + as the current one. + + Args: + text_iterator (generator of `List[str]`): + The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts + if you have everything in memory. + vocab_size (`int`): + The size of the vocabulary you want for your tokenizer. + length (`int`, *optional*): + The total number of sequences in the iterator. This is used to provide meaningful progress tracking + new_special_tokens (list of `str` or `AddedToken`, *optional*): + A list of new special tokens to add to the tokenizer you are training. + special_tokens_map (`Dict[str, str]`, *optional*): + If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special + token name to new special token name in this argument. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library. + + Returns: + [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on + `text_iterator`. + + """ + tokenizer_json = json.loads(self._tokenizer.to_str()) + # Remove added tokens for now (uses IDs of tokens) + added_tokens = tokenizer_json.pop("added_tokens") + # Remove post processor for now (uses IDs of tokens) + post_processor = tokenizer_json.pop("post_processor") + + unk_token = None + # Remove vocab + if tokenizer_json["model"]["type"] == "BPE": + tokenizer_json["model"]["vocab"] = {} + tokenizer_json["model"]["merges"] = [] + elif tokenizer_json["model"]["type"] == "Unigram": + if tokenizer_json["model"]["unk_id"] is not None: + unk_id = tokenizer_json["model"]["unk_id"] + unk_token = tokenizer_json["model"]["vocab"][unk_id][0] + if special_tokens_map is not None and unk_token in special_tokens_map: + unk_token = special_tokens_map[unk_token] + tokenizer_json["model"]["unk_id"] = 0 + tokenizer_json["model"]["vocab"] = [[unk_token, 0.0]] + elif tokenizer_json["model"]["type"] in ["WordLevel", "WordPiece"]: + tokenizer_json["model"]["vocab"] = {} + else: + raise ValueError( + f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) " + "only BPE, Unigram, WordLevel and WordPiece." + ) + + if ( + special_tokens_map is not None + and "unk_token" in tokenizer_json["model"] + and tokenizer_json["model"]["unk_token"] in special_tokens_map + ): + tokenizer_json["model"]["unk_token"] = special_tokens_map[tokenizer_json["model"]["unk_token"]] + + tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json)) + + # Get the special tokens from the current tokenizer if none are specified. + special_tokens = [] + for added_token in added_tokens: + special = added_token.pop("special", None) + _ = added_token.pop("id", None) + if tokenizer_json["model"]["type"] != "Unigram" and not special: + continue + if special_tokens_map is not None and added_token["content"] in special_tokens_map: + added_token["content"] = special_tokens_map[added_token["content"]] + special_tokens.append(AddedToken(**added_token)) + + if new_special_tokens is not None: + special_tokens.extend(new_special_tokens) + + # Trainer needs to know the end of word / continuing subword thingies in BPE + if ( + tokenizer_json["model"]["type"] == "BPE" + and "continuing_subword_prefix" not in kwargs + and tokenizer_json["model"]["continuing_subword_prefix"] is not None + ): + kwargs["continuing_subword_prefix"] = tokenizer_json["model"]["continuing_subword_prefix"] + if ( + tokenizer_json["model"]["type"] == "BPE" + and "end_of_word_suffix" not in kwargs + and tokenizer_json["model"]["end_of_word_suffix"] is not None + ): + kwargs["end_of_word_suffix"] = tokenizer_json["model"]["end_of_word_suffix"] + if tokenizer_json["model"]["type"] == "Unigram" and unk_token is not None: + kwargs["unk_token"] = unk_token + if tokenizer_json["pre_tokenizer"] is not None and tokenizer_json["pre_tokenizer"]["type"] == "ByteLevel": + kwargs["initial_alphabet"] = pre_tokenizers_fast.ByteLevel.alphabet() + + trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json["model"]["type"]] + trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs) + tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer) + + if post_processor is not None: + trained_tokenizer_json = json.loads(tokenizer.to_str()) + # Almost done, we just have to adjust the token IDs in the post processor + if "special_tokens" in post_processor: + for key in post_processor["special_tokens"]: + tokens = post_processor["special_tokens"][key]["tokens"] + if special_tokens_map is not None: + tokens = [special_tokens_map.get(token, token) for token in tokens] + post_processor["special_tokens"][key]["tokens"] = tokens + post_processor["special_tokens"][key]["ids"] = [tokenizer.token_to_id(token) for token in tokens] + + for special_token in ["cls", "sep"]: + if special_token in post_processor: + token, _ = post_processor[special_token] + if special_tokens_map is not None and token in special_tokens_map: + token = special_tokens_map[token] + token_id = tokenizer.token_to_id(token) + post_processor[special_token] = [token, token_id] + + trained_tokenizer_json["post_processor"] = post_processor + tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json)) + + kwargs = self.init_kwargs.copy() + # Map pad/cls/mask token at the Transformers level + special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() + special_tokens_list.remove("additional_special_tokens") + for token in special_tokens_list: + # Get the private one to avoid unnecessary warnings. + if getattr(self, f"_{token}") is not None: + special_token = getattr(self, token) + if special_tokens_map is not None and special_token in special_tokens_map: + special_token = special_tokens_map[special_token] + + special_token_full = getattr(self, f"_{token}") + if isinstance(special_token_full, AddedToken): + # Create an added token with the same parameters except the content + kwargs[token] = AddedToken( + special_token, + single_word=special_token_full.single_word, + lstrip=special_token_full.lstrip, + rstrip=special_token_full.rstrip, + normalized=special_token_full.normalized, + special=True, + ) + else: + kwargs[token] = special_token + + additional_special_tokens = self.additional_special_tokens + if new_special_tokens is not None: + additional_special_tokens.extend(new_special_tokens) + if len(additional_special_tokens) > 0: + kwargs["additional_special_tokens"] = additional_special_tokens + + return self.__class__(tokenizer_object=tokenizer, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/trainer_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/trainer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5c57ce0696f6348caf8c56a58fec991d93915b8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/trainer_utils.py @@ -0,0 +1,847 @@ +# coding=utf-8 +# Copyright 2020-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PyTorch-independent utilities for the Trainer class. +""" + +import copy +import functools +import gc +import inspect +import os +import random +import re +import threading +import time +from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union + +import numpy as np + +from .utils import ( + ExplicitEnum, + is_psutil_available, + is_tf_available, + is_torch_available, + is_torch_cuda_available, + is_torch_mlu_available, + is_torch_mps_available, + is_torch_npu_available, + is_torch_xla_available, + is_torch_xpu_available, + requires_backends, +) + + +if is_torch_available(): + import torch + + +def seed_worker(_): + """ + Helper function to set worker seed during Dataloader initialization. + """ + worker_seed = torch.initial_seed() % 2**32 + set_seed(worker_seed) + + +def enable_full_determinism(seed: int, warn_only: bool = False): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow + """ + # set seed first + set_seed(seed) + + if is_torch_available(): + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True, warn_only=warn_only) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + if is_tf_available(): + import tensorflow as tf + + tf.config.experimental.enable_op_determinism() + + +def set_seed(seed: int, deterministic: bool = False): + """ + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). + + Args: + seed (`int`): + The seed to set. + deterministic (`bool`, *optional*, defaults to `False`): + Whether to use deterministic algorithms where available. Can slow down training. + """ + random.seed(seed) + np.random.seed(seed) + if is_torch_available(): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if deterministic: + torch.use_deterministic_algorithms(True) + if is_torch_mlu_available(): + torch.mlu.manual_seed_all(seed) + if is_torch_npu_available(): + torch.npu.manual_seed_all(seed) + if is_torch_xpu_available(): + torch.xpu.manual_seed_all(seed) + if is_tf_available(): + import tensorflow as tf + + tf.random.set_seed(seed) + if deterministic: + tf.config.experimental.enable_op_determinism() + + +def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding + layers. This method is slightly adapted from the original source code that can be found here: + https://github.com/neelsjain/NEFTune Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to + the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + + +class EvalPrediction: + """ + Evaluation output (always contains labels), to be used to compute metrics. + + Parameters: + predictions (`np.ndarray`): Predictions of the model. + label_ids (`np.ndarray`): Targets to be matched. + inputs (`np.ndarray`, *optional*): + """ + + def __init__( + self, + predictions: Union[np.ndarray, Tuple[np.ndarray]], + label_ids: Union[np.ndarray, Tuple[np.ndarray]], + inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None, + ): + self.predictions = predictions + self.label_ids = label_ids + self.inputs = inputs + + def __iter__(self): + if self.inputs is not None: + return iter((self.predictions, self.label_ids, self.inputs)) + else: + return iter((self.predictions, self.label_ids)) + + def __getitem__(self, idx): + if idx < 0 or idx > 2: + raise IndexError("tuple index out of range") + if idx == 2 and self.inputs is None: + raise IndexError("tuple index out of range") + if idx == 0: + return self.predictions + elif idx == 1: + return self.label_ids + elif idx == 2: + return self.inputs + + +class EvalLoopOutput(NamedTuple): + predictions: Union[np.ndarray, Tuple[np.ndarray]] + label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] + metrics: Optional[Dict[str, float]] + num_samples: Optional[int] + + +class PredictionOutput(NamedTuple): + predictions: Union[np.ndarray, Tuple[np.ndarray]] + label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] + metrics: Optional[Dict[str, float]] + + +class TrainOutput(NamedTuple): + global_step: int + training_loss: float + metrics: Dict[str, float] + + +PREFIX_CHECKPOINT_DIR = "checkpoint" +_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") + + +def get_last_checkpoint(folder): + content = os.listdir(folder) + checkpoints = [ + path + for path in content + if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) + ] + if len(checkpoints) == 0: + return + return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) + + +class IntervalStrategy(ExplicitEnum): + NO = "no" + STEPS = "steps" + EPOCH = "epoch" + + +class EvaluationStrategy(ExplicitEnum): + NO = "no" + STEPS = "steps" + EPOCH = "epoch" + + +class HubStrategy(ExplicitEnum): + END = "end" + EVERY_SAVE = "every_save" + CHECKPOINT = "checkpoint" + ALL_CHECKPOINTS = "all_checkpoints" + + +class BestRun(NamedTuple): + """ + The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). + + Parameters: + run_id (`str`): + The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending + with run-{run_id}). + objective (`float`): + The objective that was obtained for this run. + hyperparameters (`Dict[str, Any]`): + The hyperparameters picked to get this run. + run_summary (`Optional[Any]`): + A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. + """ + + run_id: str + objective: Union[float, List[float]] + hyperparameters: Dict[str, Any] + run_summary: Optional[Any] = None + + +def default_compute_objective(metrics: Dict[str, float]) -> float: + """ + The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no + metrics are provided to the [`Trainer`], the sum of all metrics otherwise. + + Args: + metrics (`Dict[str, float]`): The metrics returned by the evaluate method. + + Return: + `float`: The objective to minimize or maximize + """ + metrics = copy.deepcopy(metrics) + loss = metrics.pop("eval_loss", None) + _ = metrics.pop("epoch", None) + # Remove speed metrics + speed_metrics = [ + m + for m in metrics.keys() + if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time") + ] + for sm in speed_metrics: + _ = metrics.pop(sm, None) + return loss if len(metrics) == 0 else sum(metrics.values()) + + +def default_hp_space_optuna(trial) -> Dict[str, float]: + from .integrations import is_optuna_available + + assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" + return { + "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), + "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), + "seed": trial.suggest_int("seed", 1, 40), + "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), + } + + +def default_hp_space_ray(trial) -> Dict[str, float]: + from .integrations import is_ray_tune_available + + assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" + from ray import tune + + return { + "learning_rate": tune.loguniform(1e-6, 1e-4), + "num_train_epochs": tune.choice(list(range(1, 6))), + "seed": tune.uniform(1, 40), + "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), + } + + +def default_hp_space_sigopt(trial): + return [ + {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, + {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, + {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, + { + "categorical_values": ["4", "8", "16", "32", "64"], + "name": "per_device_train_batch_size", + "type": "categorical", + }, + ] + + +def default_hp_space_wandb(trial) -> Dict[str, float]: + from .integrations import is_wandb_available + + if not is_wandb_available(): + raise ImportError("This function needs wandb installed: `pip install wandb`") + + return { + "method": "random", + "metric": {"name": "objective", "goal": "minimize"}, + "parameters": { + "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, + "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, + "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, + "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, + }, + } + + +class HPSearchBackend(ExplicitEnum): + OPTUNA = "optuna" + RAY = "ray" + SIGOPT = "sigopt" + WANDB = "wandb" + + +def is_main_process(local_rank): + """ + Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on + `local_rank`. + """ + if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + return xm.get_ordinal() == 0 + return local_rank in [-1, 0] + + +def total_processes_number(local_rank): + """ + Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. + """ + if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + return xm.xrt_world_size() + elif local_rank != -1 and is_torch_available(): + import torch + + return torch.distributed.get_world_size() + return 1 + + +def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None): + """ + Measure and return speed performance metrics. + + This function requires a time snapshot `start_time` before the operation to be measured starts and this function + should be run immediately after the operation to be measured has completed. + + Args: + - split: name to prefix metric (like train, eval, test...) + - start_time: operation start time + - num_samples: number of samples processed + - num_steps: number of steps processed + - num_tokens: number of tokens processed + """ + runtime = time.time() - start_time + result = {f"{split}_runtime": round(runtime, 4)} + if runtime == 0: + return result + if num_samples is not None: + samples_per_second = num_samples / runtime + result[f"{split}_samples_per_second"] = round(samples_per_second, 3) + if num_steps is not None: + steps_per_second = num_steps / runtime + result[f"{split}_steps_per_second"] = round(steps_per_second, 3) + if num_tokens is not None: + tokens_per_second = num_tokens / runtime + result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3) + return result + + +class SchedulerType(ExplicitEnum): + LINEAR = "linear" + COSINE = "cosine" + COSINE_WITH_RESTARTS = "cosine_with_restarts" + POLYNOMIAL = "polynomial" + CONSTANT = "constant" + CONSTANT_WITH_WARMUP = "constant_with_warmup" + INVERSE_SQRT = "inverse_sqrt" + REDUCE_ON_PLATEAU = "reduce_lr_on_plateau" + COSINE_WITH_MIN_LR = "cosine_with_min_lr" + + +class TrainerMemoryTracker: + """ + A helper class that tracks cpu and gpu memory. + + This class will silently skip unless `psutil` is available. Install with `pip install psutil`. + + When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. + + Example : + + ```python + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) + self._memory_tracker.start() + # code ... + metrics = {"train_runtime": 10.5} + self._memory_tracker.stop_and_update_metrics(metrics) + ``` + + At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`. + + To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. + """ + + # map trainer methods to metrics prefix + stages = { + "__init__": "init", + "train": "train", + "_inner_training_loop": "train", + "evaluate": "eval", + "predict": "test", + } + + def __init__(self, skip_memory_metrics=False): + self.skip_memory_metrics = skip_memory_metrics + + if not is_psutil_available(): + # soft dependency on psutil + self.skip_memory_metrics = True + + if self.skip_memory_metrics: + return + + import psutil # noqa + + if is_torch_cuda_available() or is_torch_mlu_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_mps_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_xpu_available(): + import torch + + self.torch = torch + self.gpu = {} + elif is_torch_npu_available(): + import torch + + self.torch = torch + self.gpu = {} + else: + self.torch = None + + self.process = psutil.Process() + + self.cur_stage = None + self.cpu = {} + self.init_reported = False + + def derive_stage(self): + """derives the stage/caller name automatically""" + caller = inspect.currentframe().f_back.f_back.f_code.co_name + if caller in self.stages: + return self.stages[caller] + else: + raise ValueError( + f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" + ) + + def cpu_mem_used(self): + """get resident set size memory for the current process""" + return self.process.memory_info().rss + + def peak_monitor_func(self): + self.cpu_mem_used_peak = -1 + + while True: + self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) + + # can't sleep or will not catch the peak right (this comment is here on purpose) + # time.sleep(0.001) # 1msec + + if not self.peak_monitoring: + break + + def start(self): + """start tracking for the caller's stage""" + if self.skip_memory_metrics: + return + + stage = self.derive_stage() + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + self.cur_stage = stage + + gc.collect() + + if self.torch is not None: + if torch.cuda.is_available(): + self.torch.cuda.reset_peak_memory_stats() + self.torch.cuda.empty_cache() + elif is_torch_mlu_available(): + self.torch.mlu.reset_peak_memory_stats() + self.torch.mlu.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.reset_peak_memory_stats() + self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.reset_peak_memory_stats() + self.torch.npu.empty_cache() + elif is_torch_mps_available(): + self.torch.mps.empty_cache() + + # gpu + if self.torch is not None: + if torch.cuda.is_available(): + self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() + elif is_torch_mlu_available(): + self.gpu_mem_used_at_start = self.torch.mlu.memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() + elif is_torch_mps_available(): + self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory() + + # cpu + self.cpu_mem_used_at_start = self.cpu_mem_used() + + self.peak_monitoring = True + peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) + peak_monitor_thread.daemon = True + peak_monitor_thread.start() + + def stop(self, stage): + """stop tracking for the passed stage""" + + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + # this sends a signal to peak_monitor_func to complete its loop + self.peak_monitoring = False + + # first ensure all objects get collected and their memory is freed + gc.collect() + + if self.torch is not None: + if torch.cuda.is_available(): + self.torch.cuda.empty_cache() + elif is_torch_mlu_available(): + self.torch.mlu.empty_cache() + elif is_torch_xpu_available(): + self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.empty_cache() + elif is_torch_mps_available(): + self.torch.mps.empty_cache() + + # concepts: + # - alloc_delta: the difference of allocated memory between the end and the start + # - peaked_delta: the difference between the peak memory and the current memory + # in order to know how much memory the measured code consumed one needs to sum these two + + # gpu + if self.torch is not None: + if torch.cuda.is_available(): + self.gpu_mem_used_now = self.torch.cuda.memory_allocated() + self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() + elif is_torch_mlu_available(): + self.gpu_mem_used_now = self.torch.mlu.memory_allocated() + self.gpu_mem_used_peak = self.torch.mlu.max_memory_allocated() + elif is_torch_xpu_available(): + self.gpu_mem_used_now = self.torch.xpu.memory_allocated() + self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_now = self.torch.npu.memory_allocated() + self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() + elif is_torch_mps_available(): + self.gpu_mem_used_now = self.torch.mps.current_allocated_memory() + # self.torch.mps.max_memory_allocated() does not exist yet + self.gpu_mem_used_peak = None + + else: + raise ValueError("No available GPU device found!") + + self.gpu[self.cur_stage] = { + "begin": self.gpu_mem_used_at_start, + "end": self.gpu_mem_used_now, + "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), + } + if self.gpu_mem_used_peak is not None: + self.gpu[self.cur_stage]["peaked"] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now) + else: + self.gpu[self.cur_stage]["peaked"] = "Not available" + + # cpu + self.cpu_mem_used_now = self.cpu_mem_used() + self.cpu[self.cur_stage] = { + "begin": self.cpu_mem_used_at_start, + "end": self.cpu_mem_used_now, + "alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start), + "peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), + } + + # reset - cycle finished + self.cur_stage = None + + def update_metrics(self, stage, metrics): + """updates the metrics""" + if self.skip_memory_metrics: + return + + # deal with nested calls of eval during train - simply ignore those + if self.cur_stage is not None and self.cur_stage != stage: + return + + # since we don't have a way to return init metrics, we push them into the first of train/val/predict + stages = [stage] + if not self.init_reported: + stages.insert(0, "init") + self.init_reported = True + + for stage in stages: + for t in ["alloc", "peaked"]: + if stage in self.cpu and t in self.cpu[stage]: + metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] + if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: + metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] + # if we need additional debug info, enable the following + # for t in ["begin", "end"]: + # if stage in self.cpu and t in self.cpu[stage]: + # metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t] + # if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: + # metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t] + + # since memory can be allocated before init, and it might be difficult to track overall + # memory usage, in particular for GPU, let's report memory usage at the point init was called + if stages[0] == "init": + metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] + if self.torch is not None: + metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] + # if we also wanted to report any additional memory allocations in between init and + # whatever the next stage was we could also report this: + # if self.cpu["init"]["end"] != self.cpu[stage]["begin"]: + # metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"] + # if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]: + # metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"] + + def stop_and_update_metrics(self, metrics=None): + """combine stop and metrics update in one call for simpler code""" + if self.skip_memory_metrics: + return + + stage = self.derive_stage() + self.stop(stage) + + # init doesn't have metrics to update so we just save that data for later stages to retrieve + if metrics is not None: + self.update_metrics(stage, metrics) + + +def has_length(dataset): + """ + Checks if the dataset implements __len__() and it doesn't raise an error + """ + try: + return len(dataset) is not None + except TypeError: + # TypeError: len() of unsized object + return False + + +def denumpify_detensorize(metrics): + """ + Recursively calls `.item()` on the element of the dictionary passed + """ + if isinstance(metrics, (list, tuple)): + return type(metrics)(denumpify_detensorize(m) for m in metrics) + elif isinstance(metrics, dict): + return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) + elif isinstance(metrics, np.generic): + return metrics.item() + elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: + return metrics.item() + return metrics + + +def number_of_arguments(func): + """ + Return the number of arguments of the passed function, even if it's a partial function. + """ + if isinstance(func, functools.partial): + total_args = len(inspect.signature(func.func).parameters) + return total_args - len(func.args) - len(func.keywords) + return len(inspect.signature(func).parameters) + + +def find_executable_batch_size( + function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False +): + """ + Args: + A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or + CUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as + its first argument. + function (`callable`, *optional*) + A function to wrap + starting_batch_size (`int`, *optional*) + The batch size to try and fit into memory + auto_find_batch_size (`bool`, *optional*) + If False, will just execute `function` + """ + if function is None: + return functools.partial( + find_executable_batch_size, + starting_batch_size=starting_batch_size, + auto_find_batch_size=auto_find_batch_size, + ) + + if auto_find_batch_size: + requires_backends(find_executable_batch_size, "accelerate") + from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size + + return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) + + return functools.partial(function, batch_size=starting_batch_size) + + +class FSDPOption(ExplicitEnum): + FULL_SHARD = "full_shard" + SHARD_GRAD_OP = "shard_grad_op" + NO_SHARD = "no_shard" + HYBRID_SHARD = "hybrid_shard" + HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" + OFFLOAD = "offload" + AUTO_WRAP = "auto_wrap" + + +class RemoveColumnsCollator: + """Wrap the data collator to remove unused columns before they are passed to the collator.""" + + def __init__( + self, + data_collator, + signature_columns, + logger=None, + model_name: Optional[str] = None, + description: Optional[str] = None, + ): + self.data_collator = data_collator + self.signature_columns = signature_columns + self.logger = logger + self.description = description + self.model_name = model_name + self.message_logged = False + + def _remove_columns(self, feature: dict) -> dict: + if not isinstance(feature, dict): + return feature + if not self.message_logged and self.logger and self.model_name: + ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if self.description is None else f"in the {self.description} set" + self.logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " + " you can safely ignore this message." + ) + self.message_logged = True + return {k: v for k, v in feature.items() if k in self.signature_columns} + + def __call__(self, features: List[dict]): + features = [self._remove_columns(feature) for feature in features] + return self.data_collator(features) + + +def check_target_module_exists(optim_target_modules, key: str, return_is_regex: bool = False): + """A helper method to check if the passed module's key name matches any of the target modules in the optim_target_modules. + + Args: + optim_target_modules (`Union[str, List[str]]`): + A list of strings to try to match. Can be also a full string. + key (`str`): + A key to search any matches in optim_target_modules + return_is_regex (`bool`): + If set to `True`, the method will return whether the passed `optim_target_modules` + is a regex or not. + + Returns: + `bool` : True of match object if key matches any target modules from config, False or + None if no match found + `bool` : If the matched target module is a regex to silence out the warnings in Trainer + for extra modules being found (only if `target_module_found=True` for an array of regex). + """ + target_module_found = False + is_regex = False + + if isinstance(optim_target_modules, str): + target_module_found = bool(re.fullmatch(optim_target_modules, key)) + is_regex = True if not optim_target_modules == key else False + elif key in optim_target_modules: # from here, target_module_found must be a list of str + # this module is specified directly in target_modules + target_module_found = True + elif any(target_key in key for target_key in optim_target_modules): + target_module_found = True + elif any(bool(re.fullmatch(optim_target_module, key)) for optim_target_module in optim_target_modules): + target_module_found = True + is_regex = True + + if return_is_regex: + return target_module_found, is_regex + + return target_module_found diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/training_args.py b/llmeval-env/lib/python3.10/site-packages/transformers/training_args.py new file mode 100644 index 0000000000000000000000000000000000000000..338bb116dddecee0091dc830bee9cac7b2fb4293 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/training_args.py @@ -0,0 +1,2880 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import io +import json +import math +import os +import warnings +from dataclasses import asdict, dataclass, field, fields +from datetime import timedelta +from enum import Enum +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +from huggingface_hub import get_full_repo_name +from packaging import version + +from .debug_utils import DebugOption +from .trainer_utils import ( + EvaluationStrategy, + FSDPOption, + HubStrategy, + IntervalStrategy, + SchedulerType, +) +from .utils import ( + ACCELERATE_MIN_VERSION, + ExplicitEnum, + cached_property, + is_accelerate_available, + is_safetensors_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_torch_available, + is_torch_bf16_cpu_available, + is_torch_bf16_gpu_available, + is_torch_mlu_available, + is_torch_neuroncore_available, + is_torch_npu_available, + is_torch_tf32_available, + is_torch_xla_available, + is_torch_xpu_available, + logging, + requires_backends, +) +from .utils.generic import strtobool +from .utils.import_utils import is_optimum_neuron_available + + +logger = logging.get_logger(__name__) +log_levels = logging.get_log_levels_dict().copy() +trainer_log_levels = dict(**log_levels, passive=-1) + +if is_torch_available(): + import torch + import torch.distributed as dist + + from .pytorch_utils import is_torch_greater_or_equal_than_2_0 + +if is_accelerate_available(): + from accelerate.state import AcceleratorState, PartialState + from accelerate.utils import DistributedType + + from .trainer_pt_utils import AcceleratorConfig + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +if is_torch_neuroncore_available(check_device=False): + # torchrun support + # https://github.com/pytorch/xla/pull/3609 + if os.environ.get("TORCHELASTIC_RUN_ID"): + if is_optimum_neuron_available(): + logger.info( + "Make sure that you are performing the training with the TrainiumTrainer from optimum[neuron], this " + "will fail otherwise." + ) + else: + logger.warning( + "Please use the TrainiumTrainer from optimum[neuron] instead of the Transformers library to perform " + "training on AWS Trainium instances. More information here: " + "https://github.com/huggingface/optimum-neuron" + ) + import torch_xla.distributed.xla_backend as xbn + + if not isinstance(dist.group.WORLD, xbn.ProcessGroupXla): + dist.init_process_group(backend="xla") + if not isinstance(dist.group.WORLD, xbn.ProcessGroupXla): + raise AssertionError("Failed to initialize torch.distributed process group using XLA backend.") + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + + smp.init() + + +def default_logdir() -> str: + """ + Same default as PyTorch + """ + import socket + from datetime import datetime + + current_time = datetime.now().strftime("%b%d_%H-%M-%S") + return os.path.join("runs", current_time + "_" + socket.gethostname()) + + +def get_int_from_env(env_keys, default): + """Returns the first positive env value found in the `env_keys` list or the default.""" + for e in env_keys: + val = int(os.environ.get(e, -1)) + if val >= 0: + return val + return default + + +def get_xla_device_type(device: "torch.device") -> Optional[str]: + """ + Returns the xla device type (CPU|GPU|TPU) or None if the device is a non-xla device. + """ + if is_torch_xla_available(): + if device.type == "cpu": + return "CPU" + return xm.xla_real_devices([device])[0].split(":")[0] + return None + + +class OptimizerNames(ExplicitEnum): + """ + Stores the acceptable string identifiers for optimizers. + """ + + ADAMW_HF = "adamw_hf" + ADAMW_TORCH = "adamw_torch" + ADAMW_TORCH_FUSED = "adamw_torch_fused" + ADAMW_TORCH_XLA = "adamw_torch_xla" + ADAMW_TORCH_NPU_FUSED = "adamw_torch_npu_fused" + ADAMW_APEX_FUSED = "adamw_apex_fused" + ADAFACTOR = "adafactor" + ADAMW_ANYPRECISION = "adamw_anyprecision" + SGD = "sgd" + ADAGRAD = "adagrad" + ADAMW_BNB = "adamw_bnb_8bit" + ADAMW_8BIT = "adamw_8bit" # just an alias for adamw_bnb_8bit + LION_8BIT = "lion_8bit" + LION = "lion_32bit" + PAGED_ADAMW = "paged_adamw_32bit" + PAGED_ADAMW_8BIT = "paged_adamw_8bit" + PAGED_LION = "paged_lion_32bit" + PAGED_LION_8BIT = "paged_lion_8bit" + RMSPROP = "rmsprop" + RMSPROP_BNB = "rmsprop_bnb" + RMSPROP_8BIT = "rmsprop_bnb_8bit" + RMSPROP_32BIT = "rmsprop_bnb_32bit" + GALORE_ADAMW = "galore_adamw" + GALORE_ADAMW_8BIT = "galore_adamw_8bit" + GALORE_ADAFACTOR = "galore_adafactor" + GALORE_ADAMW_LAYERWISE = "galore_adamw_layerwise" + GALORE_ADAMW_8BIT_LAYERWISE = "galore_adamw_8bit_layerwise" + GALORE_ADAFACTOR_LAYERWISE = "galore_adafactor_layerwise" + + +# Sometimes users will pass in a `str` repr of a dict in the CLI +# We need to track what fields those can be. Each time a new arg +# has a dict type, it must be added to this list. +# Important: These should be typed with Optional[Union[dict,str,...]] +_VALID_DICT_FIELDS = [ + "accelerator_config", + "fsdp_config", + "deepspeed", + "gradient_checkpointing_kwargs", + "lr_scheduler_kwargs", +] + + +def _convert_str_dict(passed_value: dict): + "Safely checks that a passed value is a dictionary and converts any string values to their appropriate types." + for key, value in passed_value.items(): + if isinstance(value, dict): + passed_value[key] = _convert_str_dict(value) + elif isinstance(value, str): + # First check for bool and convert + if value.lower() in ("true", "false"): + passed_value[key] = value.lower() == "true" + # Check for digit + elif value.isdigit(): + passed_value[key] = int(value) + elif value.replace(".", "", 1).isdigit(): + passed_value[key] = float(value) + + return passed_value + + +# TODO: `TrainingArguments` users rely on it being fully mutable. In the future see if we can narrow this to a few keys: https://github.com/huggingface/transformers/pull/25903 +@dataclass +class TrainingArguments: + """ + TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop + itself**. + + Using [`HfArgumentParser`] we can turn this class into + [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the + command line. + + Parameters: + output_dir (`str`): + The output directory where the model predictions and checkpoints will be written. + overwrite_output_dir (`bool`, *optional*, defaults to `False`): + If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` + points to a checkpoint directory. + do_train (`bool`, *optional*, defaults to `False`): + Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used + by your training/evaluation scripts instead. See the [example + scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. + do_eval (`bool`, *optional*): + Whether to run evaluation on the validation set or not. Will be set to `True` if `evaluation_strategy` is + different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your + training/evaluation scripts instead. See the [example + scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. + do_predict (`bool`, *optional*, defaults to `False`): + Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's + intended to be used by your training/evaluation scripts instead. See the [example + scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. + evaluation_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): + The evaluation strategy to adopt during training. Possible values are: + + - `"no"`: No evaluation is done during training. + - `"steps"`: Evaluation is done (and logged) every `eval_steps`. + - `"epoch"`: Evaluation is done at the end of each epoch. + + prediction_loss_only (`bool`, *optional*, defaults to `False`): + When performing evaluation and generating predictions, only returns the loss. + per_device_train_batch_size (`int`, *optional*, defaults to 8): + The batch size per GPU/XPU/TPU/MPS/NPU core/CPU for training. + per_device_eval_batch_size (`int`, *optional*, defaults to 8): + The batch size per GPU/XPU/TPU/MPS/NPU core/CPU for evaluation. + gradient_accumulation_steps (`int`, *optional*, defaults to 1): + Number of updates steps to accumulate the gradients for, before performing a backward/update pass. + + + + When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, + evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. + + + + eval_accumulation_steps (`int`, *optional*): + Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If + left unset, the whole predictions are accumulated on GPU/NPU/TPU before being moved to the CPU (faster but + requires more memory). + eval_delay (`float`, *optional*): + Number of epochs or steps to wait for before the first evaluation can be performed, depending on the + evaluation_strategy. + learning_rate (`float`, *optional*, defaults to 5e-5): + The initial learning rate for [`AdamW`] optimizer. + weight_decay (`float`, *optional*, defaults to 0): + The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in [`AdamW`] + optimizer. + adam_beta1 (`float`, *optional*, defaults to 0.9): + The beta1 hyperparameter for the [`AdamW`] optimizer. + adam_beta2 (`float`, *optional*, defaults to 0.999): + The beta2 hyperparameter for the [`AdamW`] optimizer. + adam_epsilon (`float`, *optional*, defaults to 1e-8): + The epsilon hyperparameter for the [`AdamW`] optimizer. + max_grad_norm (`float`, *optional*, defaults to 1.0): + Maximum gradient norm (for gradient clipping). + num_train_epochs(`float`, *optional*, defaults to 3.0): + Total number of training epochs to perform (if not an integer, will perform the decimal part percents of + the last epoch before stopping training). + max_steps (`int`, *optional*, defaults to -1): + If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. + lr_scheduler_type (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): + The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. + lr_scheduler_kwargs ('dict', *optional*, defaults to {}): + The extra arguments for the lr_scheduler. See the documentation of each scheduler for possible values. + warmup_ratio (`float`, *optional*, defaults to 0.0): + Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. + warmup_steps (`int`, *optional*, defaults to 0): + Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. + log_level (`str`, *optional*, defaults to `passive`): + Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', + 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and keeps the + current log level for the Transformers library (which will be `"warning"` by default). + log_level_replica (`str`, *optional*, defaults to `"warning"`): + Logger log level to use on replicas. Same choices as `log_level`" + log_on_each_node (`bool`, *optional*, defaults to `True`): + In multinode distributed training, whether to log using `log_level` once per node, or only on the main + node. + logging_dir (`str`, *optional*): + [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to + *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***. + logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): + The logging strategy to adopt during training. Possible values are: + + - `"no"`: No logging is done during training. + - `"epoch"`: Logging is done at the end of each epoch. + - `"steps"`: Logging is done every `logging_steps`. + + logging_first_step (`bool`, *optional*, defaults to `False`): + Whether to log the first `global_step` or not. + logging_steps (`int` or `float`, *optional*, defaults to 500): + Number of update steps between two logs if `logging_strategy="steps"`. Should be an integer or a float in + range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. + logging_nan_inf_filter (`bool`, *optional*, defaults to `True`): + Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is `nan` + or `inf` is filtered and the average loss of the current logging window is taken instead. + + + + `logging_nan_inf_filter` only influences the logging of loss values, it does not change the behavior the + gradient is computed or applied to the model. + + + + save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): + The checkpoint save strategy to adopt during training. Possible values are: + + - `"no"`: No save is done during training. + - `"epoch"`: Save is done at the end of each epoch. + - `"steps"`: Save is done every `save_steps`. + save_steps (`int` or `float`, *optional*, defaults to 500): + Number of updates steps before two checkpoint saves if `save_strategy="steps"`. Should be an integer or a + float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. + save_total_limit (`int`, *optional*): + If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in + `output_dir`. When `load_best_model_at_end` is enabled, the "best" checkpoint according to + `metric_for_best_model` will always be retained in addition to the most recent ones. For example, for + `save_total_limit=5` and `load_best_model_at_end`, the four last checkpoints will always be retained + alongside the best model. When `save_total_limit=1` and `load_best_model_at_end`, it is possible that two + checkpoints are saved: the last one and the best one (if they are different). + save_safetensors (`bool`, *optional*, defaults to `True`): + Use [safetensors](https://huggingface.co/docs/safetensors) saving and loading for state dicts instead of + default `torch.load` and `torch.save`. + save_on_each_node (`bool`, *optional*, defaults to `False`): + When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on + the main one. + + This should not be activated when the different nodes use the same storage as the files will be saved with + the same names for each node. + save_only_model (`bool`, *optional*, defaults to `False`): + When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state. + Note that when this is true, you won't be able to resume training from checkpoint. + This enables you to save storage by not storing the optimizer, scheduler & rng state. + You can only load the model using `from_pretrained` with this option set to `True`. + use_cpu (`bool`, *optional*, defaults to `False`): + Whether or not to use cpu. If set to False, we will use cuda or mps device if available. + seed (`int`, *optional*, defaults to 42): + Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the + [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters. + data_seed (`int`, *optional*): + Random seed to be used with data samplers. If not set, random generators for data sampling will use the + same seed as `seed`. This can be used to ensure reproducibility of data sampling, independent of the model + seed. + jit_mode_eval (`bool`, *optional*, defaults to `False`): + Whether or not to use PyTorch jit trace for inference. + use_ipex (`bool`, *optional*, defaults to `False`): + Use Intel extension for PyTorch when it is available. [IPEX + installation](https://github.com/intel/intel-extension-for-pytorch). + bf16 (`bool`, *optional*, defaults to `False`): + Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher + NVIDIA architecture or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change. + fp16 (`bool`, *optional*, defaults to `False`): + Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training. + fp16_opt_level (`str`, *optional*, defaults to 'O1'): + For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on + the [Apex documentation](https://nvidia.github.io/apex/amp). + fp16_backend (`str`, *optional*, defaults to `"auto"`): + This argument is deprecated. Use `half_precision_backend` instead. + half_precision_backend (`str`, *optional*, defaults to `"auto"`): + The backend to use for mixed precision training. Must be one of `"auto", "apex", "cpu_amp"`. `"auto"` will + use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the + requested backend. + bf16_full_eval (`bool`, *optional*, defaults to `False`): + Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm + metric values. This is an experimental API and it may change. + fp16_full_eval (`bool`, *optional*, defaults to `False`): + Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm + metric values. + tf32 (`bool`, *optional*): + Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends + on PyTorch's version default of `torch.backends.cuda.matmul.allow_tf32`. For more details please refer to + the [TF32](https://huggingface.co/docs/transformers/performance#tf32) documentation. This is an + experimental API and it may change. + local_rank (`int`, *optional*, defaults to -1): + Rank of the process during distributed training. + ddp_backend (`str`, *optional*): + The backend to use for distributed training. Must be one of `"nccl"`, `"mpi"`, `"ccl"`, `"gloo"`, `"hccl"`. + tpu_num_cores (`int`, *optional*): + When training on TPU, the number of TPU cores (automatically passed by launcher script). + dataloader_drop_last (`bool`, *optional*, defaults to `False`): + Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) + or not. + eval_steps (`int` or `float`, *optional*): + Number of update steps between two evaluations if `evaluation_strategy="steps"`. Will default to the same + value as `logging_steps` if not set. Should be an integer or a float in range `[0,1)`. If smaller than 1, + will be interpreted as ratio of total training steps. + dataloader_num_workers (`int`, *optional*, defaults to 0): + Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the + main process. + past_index (`int`, *optional*, defaults to -1): + Some models like [TransformerXL](../model_doc/transformerxl) or [XLNet](../model_doc/xlnet) can make use of + the past hidden states for their predictions. If this argument is set to a positive int, the `Trainer` will + use the corresponding output (usually index 2) as the past state and feed it to the model at the next + training step under the keyword argument `mems`. + run_name (`str`, *optional*): + A descriptor for the run. Typically used for [wandb](https://www.wandb.com/) and + [mlflow](https://www.mlflow.org/) logging. + disable_tqdm (`bool`, *optional*): + Whether or not to disable the tqdm progress bars and table of metrics produced by + [`~notebook.NotebookTrainingTracker`] in Jupyter Notebooks. Will default to `True` if the logging level is + set to warn or lower (default), `False` otherwise. + remove_unused_columns (`bool`, *optional*, defaults to `True`): + Whether or not to automatically remove the columns unused by the model forward method. + label_names (`List[str]`, *optional*): + The list of keys in your dictionary of inputs that correspond to the labels. + + Will eventually default to the list of argument names accepted by the model that contain the word "label", + except if the model used is one of the `XxxForQuestionAnswering` in which case it will also include the + `["start_positions", "end_positions"]` keys. + load_best_model_at_end (`bool`, *optional*, defaults to `False`): + Whether or not to load the best model found during training at the end of training. When this option is + enabled, the best checkpoint will always be saved. See + [`save_total_limit`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.save_total_limit) + for more. + + + + When set to `True`, the parameters `save_strategy` needs to be the same as `evaluation_strategy`, and in + the case it is "steps", `save_steps` must be a round multiple of `eval_steps`. + + + + metric_for_best_model (`str`, *optional*): + Use in conjunction with `load_best_model_at_end` to specify the metric to use to compare two different + models. Must be the name of a metric returned by the evaluation with or without the prefix `"eval_"`. Will + default to `"loss"` if unspecified and `load_best_model_at_end=True` (to use the evaluation loss). + + If you set this value, `greater_is_better` will default to `True`. Don't forget to set it to `False` if + your metric is better when lower. + greater_is_better (`bool`, *optional*): + Use in conjunction with `load_best_model_at_end` and `metric_for_best_model` to specify if better models + should have a greater metric or not. Will default to: + + - `True` if `metric_for_best_model` is set to a value that isn't `"loss"` or `"eval_loss"`. + - `False` if `metric_for_best_model` is not set, or set to `"loss"` or `"eval_loss"`. + ignore_data_skip (`bool`, *optional*, defaults to `False`): + When resuming training, whether or not to skip the epochs and batches to get the data loading at the same + stage as in the previous training. If set to `True`, the training will begin faster (as that skipping step + can take a long time) but will not yield the same results as the interrupted training would have. + fsdp (`bool`, `str` or list of [`~trainer_utils.FSDPOption`], *optional*, defaults to `''`): + Use PyTorch Distributed Parallel Training (in distributed training only). + + A list of options along the following: + + - `"full_shard"`: Shard parameters, gradients and optimizer states. + - `"shard_grad_op"`: Shard optimizer states and gradients. + - `"hybrid_shard"`: Apply `FULL_SHARD` within a node, and replicate parameters across nodes. + - `"hybrid_shard_zero2"`: Apply `SHARD_GRAD_OP` within a node, and replicate parameters across nodes. + - `"offload"`: Offload parameters and gradients to CPUs (only compatible with `"full_shard"` and + `"shard_grad_op"`). + - `"auto_wrap"`: Automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`. + fsdp_config (`str` or `dict`, *optional*): + Config to be used with fsdp (Pytorch Distributed Parallel Training). The value is either a location of + fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`. + + A List of config and its options: + - min_num_params (`int`, *optional*, defaults to `0`): + FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `fsdp` field is + passed). + - transformer_layer_cls_to_wrap (`List[str]`, *optional*): + List of transformer layer class names (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, + `T5Block` .... (useful only when `fsdp` flag is passed). + - backward_prefetch (`str`, *optional*) + FSDP's backward prefetch mode. Controls when to prefetch next set of parameters (useful only when + `fsdp` field is passed). + + A list of options along the following: + + - `"backward_pre"` : Prefetches the next set of parameters before the current set of parameter's + gradient + computation. + - `"backward_post"` : This prefetches the next set of parameters after the current set of + parameter’s + gradient computation. + - forward_prefetch (`bool`, *optional*, defaults to `False`) + FSDP's forward prefetch mode (useful only when `fsdp` field is passed). + If `"True"`, then FSDP explicitly prefetches the next upcoming all-gather while executing in the + forward pass. + - limit_all_gathers (`bool`, *optional*, defaults to `False`) + FSDP's limit_all_gathers (useful only when `fsdp` field is passed). + If `"True"`, FSDP explicitly synchronizes the CPU thread to prevent too many in-flight + all-gathers. + - use_orig_params (`bool`, *optional*, defaults to `True`) + If `"True"`, allows non-uniform `requires_grad` during init, which means support for interspersed + frozen and trainable paramteres. Useful in cases such as parameter-efficient fine-tuning. Please + refer this + [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019 + - sync_module_states (`bool`, *optional*, defaults to `True`) + If `"True"`, each individually wrapped FSDP unit will broadcast module parameters from rank 0 to + ensure they are the same across all ranks after initialization + - activation_checkpointing (`bool`, *optional*, defaults to `False`): + If `"True"`, activation checkpointing is a technique to reduce memory usage by clearing activations of + certain layers and recomputing them during a backward pass. Effectively, this trades extra + computation time for reduced memory usage. + - xla (`bool`, *optional*, defaults to `False`): + Whether to use PyTorch/XLA Fully Sharded Data Parallel Training. This is an experimental feature + and its API may evolve in the future. + - xla_fsdp_settings (`dict`, *optional*) + The value is a dictionary which stores the XLA FSDP wrapping parameters. + + For a complete list of options, please see [here]( + https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py). + - xla_fsdp_grad_ckpt (`bool`, *optional*, defaults to `False`): + Will use gradient checkpointing over each nested XLA FSDP wrapped layer. This setting can only be + used when the xla flag is set to true, and an auto wrapping policy is specified through + fsdp_min_num_params or fsdp_transformer_layer_cls_to_wrap. + + deepspeed (`str` or `dict`, *optional*): + Use [Deepspeed](https://github.com/microsoft/deepspeed). This is an experimental feature and its API may + evolve in the future. The value is either the location of DeepSpeed json config file (e.g., + `ds_config.json`) or an already loaded json file as a `dict`" + + + If enabling any Zero-init, make sure that your model is not initialized until + *after* initializing the `TrainingArguments`, else it will not be applied. + + + accelerator_config (`str`, `dict`, or `AcceleratorConfig`, *optional*): + Config to be used with the internal `Accelerator` implementation. The value is either a location of + accelerator json config file (e.g., `accelerator_config.json`), an already loaded json file as `dict`, + or an instance of [`~trainer_pt_utils.AcceleratorConfig`]. + + A list of config and its options: + - split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If + `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a + round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set + in your script multiplied by the number of processes. + - dispatch_batches (`bool`, *optional*): + If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process + and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose + underlying dataset is an `IterableDataset`, `False` otherwise. + - even_batches (`bool`, *optional*, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + - use_seedable_sampler (`bool`, *optional*, defaults to `True`): + Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures + training results are fully reproducable using a different sampling technique. While seed-to-seed results + may differ, on average the differences are neglible when using multiple different seeds to compare. Should + also be ran with [`~utils.set_seed`] for the best results. + + label_smoothing_factor (`float`, *optional*, defaults to 0.0): + The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded + labels are changed from 0s and 1s to `label_smoothing_factor/num_labels` and `1 - label_smoothing_factor + + label_smoothing_factor/num_labels` respectively. + debug (`str` or list of [`~debug_utils.DebugOption`], *optional*, defaults to `""`): + Enable one or more debug features. This is an experimental feature. + + Possible options are: + + - `"underflow_overflow"`: detects overflow in model's input/outputs and reports the last frames that led to + the event + - `"tpu_metrics_debug"`: print debug metrics on TPU + + The options should be separated by whitespaces. + optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): + The optimizer to use: adamw_hf, adamw_torch, adamw_torch_fused, adamw_apex_fused, adamw_anyprecision or + adafactor. + optim_args (`str`, *optional*): + Optional arguments that are supplied to AnyPrecisionAdamW. + group_by_length (`bool`, *optional*, defaults to `False`): + Whether or not to group together samples of roughly the same length in the training dataset (to minimize + padding applied and be more efficient). Only useful if applying dynamic padding. + length_column_name (`str`, *optional*, defaults to `"length"`): + Column name for precomputed lengths. If the column exists, grouping by length will use these values rather + than computing them on train startup. Ignored unless `group_by_length` is `True` and the dataset is an + instance of `Dataset`. + report_to (`str` or `List[str]`, *optional*, defaults to `"all"`): + The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, + `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, `"neptune"`, + `"tensorboard"`, and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no + integrations. + ddp_find_unused_parameters (`bool`, *optional*): + When using distributed training, the value of the flag `find_unused_parameters` passed to + `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. + ddp_bucket_cap_mb (`int`, *optional*): + When using distributed training, the value of the flag `bucket_cap_mb` passed to `DistributedDataParallel`. + ddp_broadcast_buffers (`bool`, *optional*): + When using distributed training, the value of the flag `broadcast_buffers` passed to + `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. + dataloader_pin_memory (`bool`, *optional*, defaults to `True`): + Whether you want to pin memory in data loaders or not. Will default to `True`. + dataloader_persistent_workers (`bool`, *optional*, defaults to `False`): + If True, the data loader will not shut down the worker processes after a dataset has been consumed once. + This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will + increase RAM usage. Will default to `False`. + dataloader_prefetch_factor (`int`, *optional*): + Number of batches loaded in advance by each worker. + 2 means there will be a total of 2 * num_workers batches prefetched across all workers. + skip_memory_metrics (`bool`, *optional*, defaults to `True`): + Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows + down the training and evaluation speed. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push the model to the Hub every time the model is saved. If this is activated, + `output_dir` will begin a git directory synced with the repo (determined by `hub_model_id`) and the content + will be pushed each time a save is triggered (depending on your `save_strategy`). Calling + [`~Trainer.save_model`] will also trigger a push. + + + + If `output_dir` exists, it needs to be a local clone of the repository to which the [`Trainer`] will be + pushed. + + + + resume_from_checkpoint (`str`, *optional*): + The path to a folder with a valid checkpoint for your model. This argument is not directly used by + [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example + scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. + hub_model_id (`str`, *optional*): + The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in + which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, + for instance `"user_name/model"`, which allows you to push to an organization you are a member of with + `"organization_name/model"`. Will default to `user_name/output_dir_name` with *output_dir_name* being the + name of `output_dir`. + + Will default to the name of `output_dir`. + hub_strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): + Defines the scope of what is pushed to the Hub and when. Possible values are: + + - `"end"`: push the model, its configuration, the tokenizer (if passed along to the [`Trainer`]) and a + draft of a model card when the [`~Trainer.save_model`] method is called. + - `"every_save"`: push the model, its configuration, the tokenizer (if passed along to the [`Trainer`]) and + a draft of a model card each time there is a model save. The pushes are asynchronous to not block + training, and in case the save are very frequent, a new push is only attempted if the previous one is + finished. A last push is made with the final model at the end of training. + - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named + last-checkpoint, allowing you to resume training easily with + `trainer.train(resume_from_checkpoint="last-checkpoint")`. + - `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the output + folder (so you will get one checkpoint folder per folder in your final repository) + + hub_token (`str`, *optional*): + The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with + `huggingface-cli login`. + hub_private_repo (`bool`, *optional*, defaults to `False`): + If True, the Hub repo will be set to private. + hub_always_push (`bool`, *optional*, defaults to `False`): + Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. + gradient_checkpointing (`bool`, *optional*, defaults to `False`): + If True, use gradient checkpointing to save memory at the expense of slower backward pass. + gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): + Key word arguments to be passed to the `gradient_checkpointing_enable` method. + include_inputs_for_metrics (`bool`, *optional*, defaults to `False`): + Whether or not the inputs will be passed to the `compute_metrics` function. This is intended for metrics + that need inputs, predictions and references for scoring calculation in Metric class. + eval_do_concat_batches (`bool`, *optional*, defaults to `True`): + Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, + will instead store them as lists, with each batch kept separate. + auto_find_batch_size (`bool`, *optional*, defaults to `False`) + Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding + CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) + full_determinism (`bool`, *optional*, defaults to `False`) + If `True`, [`enable_full_determinism`] is called instead of [`set_seed`] to ensure reproducible results in + distributed training. Important: this will negatively impact the performance, so only use it for debugging. + torchdynamo (`str`, *optional*): + If set, the backend compiler for TorchDynamo. Possible choices are `"eager"`, `"aot_eager"`, `"inductor"`, + `"nvfuser"`, `"aot_nvfuser"`, `"aot_cudagraphs"`, `"ofi"`, `"fx2trt"`, `"onnxrt"` and `"ipex"`. + ray_scope (`str`, *optional*, defaults to `"last"`): + The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray will + then use the last checkpoint of all trials, compare those, and select the best one. However, other options + are also available. See the [Ray documentation]( + https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial) for + more options. + ddp_timeout (`int`, *optional*, defaults to 1800): + The timeout for `torch.distributed.init_process_group` calls, used to avoid GPU socket timeouts when + performing slow operations in distributed runnings. Please refer the [PyTorch documentation] + (https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more + information. + use_mps_device (`bool`, *optional*, defaults to `False`): + This argument is deprecated.`mps` device will be used if it is available similar to `cuda` device. + torch_compile (`bool`, *optional*, defaults to `False`): + Whether or not to compile the model using PyTorch 2.0 + [`torch.compile`](https://pytorch.org/get-started/pytorch-2.0/). + + This will use the best defaults for the [`torch.compile` + API](https://pytorch.org/docs/stable/generated/torch.compile.html?highlight=torch+compile#torch.compile). + You can customize the defaults with the argument `torch_compile_backend` and `torch_compile_mode` but we + don't guarantee any of them will work as the support is progressively rolled in in PyTorch. + + This flag and the whole compile API is experimental and subject to change in future releases. + torch_compile_backend (`str`, *optional*): + The backend to use in `torch.compile`. If set to any value, `torch_compile` will be set to `True`. + + Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. + + This flag is experimental and subject to change in future releases. + torch_compile_mode (`str`, *optional*): + The mode to use in `torch.compile`. If set to any value, `torch_compile` will be set to `True`. + + Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. + + This flag is experimental and subject to change in future releases. + split_batches (`bool`, *optional*): + Whether or not the accelerator should split the batches yielded by the dataloaders across the devices + during distributed training. If + + set to `True`, the actual batch size used will be the same on any kind of distributed processes, but it + must be a + + round multiple of the number of processes you are using (such as GPUs). + include_tokens_per_second (`bool`, *optional*): + Whether or not to compute the number of tokens per second per device for training speed metrics. + + This will iterate over the entire training dataloader once beforehand, + + and will slow down the entire process. + + include_num_input_tokens_seen (`bool`, *optional*): + Whether or not to track the number of input tokens seen throughout training. + + May be slower in distributed training as gather operations must be called. + + neftune_noise_alpha (`Optional[float]`): + If not `None`, this will activate NEFTune noise embeddings. This can drastically improve model performance + for instruction fine-tuning. Check out the [original paper](https://arxiv.org/abs/2310.05914) and the + [original code](https://github.com/neelsjain/NEFTune). Support transformers `PreTrainedModel` and also + `PeftModel` from peft. + optim_target_modules (`Union[str, List[str]]`, *optional*): + The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm + https://arxiv.org/abs/2403.03507 + See: https://github.com/jiaweizzhao/GaLore for more details. You need to make sure to pass a valid GaloRe + optimizer, e.g. one of: "galore_adamw", "galore_adamw_8bit", "galore_adafactor" and make sure that the target modules are `nn.Linear` modules + only. + """ + + framework = "pt" + output_dir: str = field( + metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, + ) + overwrite_output_dir: bool = field( + default=False, + metadata={ + "help": ( + "Overwrite the content of the output directory. " + "Use this to continue training if output_dir points to a checkpoint directory." + ) + }, + ) + + do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) + do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) + do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) + evaluation_strategy: Union[IntervalStrategy, str] = field( + default="no", + metadata={"help": "The evaluation strategy to use."}, + ) + prediction_loss_only: bool = field( + default=False, + metadata={"help": "When performing evaluation and predictions, only returns the loss."}, + ) + + per_device_train_batch_size: int = field( + default=8, metadata={"help": "Batch size per GPU/TPU/MPS/NPU core/CPU for training."} + ) + per_device_eval_batch_size: int = field( + default=8, metadata={"help": "Batch size per GPU/TPU/MPS/NPU core/CPU for evaluation."} + ) + + per_gpu_train_batch_size: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Deprecated, the use of `--per_device_train_batch_size` is preferred. " + "Batch size per GPU/TPU core/CPU for training." + ) + }, + ) + per_gpu_eval_batch_size: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Deprecated, the use of `--per_device_eval_batch_size` is preferred. " + "Batch size per GPU/TPU core/CPU for evaluation." + ) + }, + ) + + gradient_accumulation_steps: int = field( + default=1, + metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."}, + ) + eval_accumulation_steps: Optional[int] = field( + default=None, + metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."}, + ) + + eval_delay: Optional[float] = field( + default=0, + metadata={ + "help": ( + "Number of epochs or steps to wait for before the first evaluation can be performed, depending on the" + " evaluation_strategy." + ) + }, + ) + + learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) + weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) + adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) + adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) + adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) + max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."}) + + num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) + max_steps: int = field( + default=-1, + metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."}, + ) + lr_scheduler_type: Union[SchedulerType, str] = field( + default="linear", + metadata={"help": "The scheduler type to use."}, + ) + lr_scheduler_kwargs: Optional[Union[dict, str]] = field( + default_factory=dict, + metadata={ + "help": ( + "Extra parameters for the lr_scheduler such as {'num_cycles': 1} for the cosine with hard restarts." + ) + }, + ) + warmup_ratio: float = field( + default=0.0, metadata={"help": "Linear warmup over warmup_ratio fraction of total steps."} + ) + warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) + + log_level: Optional[str] = field( + default="passive", + metadata={ + "help": ( + "Logger log level to use on the main node. Possible choices are the log levels as strings: 'debug'," + " 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and" + " lets the application set the level. Defaults to 'passive'." + ), + "choices": trainer_log_levels.keys(), + }, + ) + log_level_replica: Optional[str] = field( + default="warning", + metadata={ + "help": "Logger log level to use on replica nodes. Same choices and defaults as ``log_level``", + "choices": trainer_log_levels.keys(), + }, + ) + log_on_each_node: bool = field( + default=True, + metadata={ + "help": ( + "When doing a multinode distributed training, whether to log once per node or just once on the main" + " node." + ) + }, + ) + logging_dir: Optional[str] = field(default=None, metadata={"help": "Tensorboard log dir."}) + logging_strategy: Union[IntervalStrategy, str] = field( + default="steps", + metadata={"help": "The logging strategy to use."}, + ) + logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"}) + logging_steps: float = field( + default=500, + metadata={ + "help": ( + "Log every X updates steps. Should be an integer or a float in range `[0,1)`. " + "If smaller than 1, will be interpreted as ratio of total training steps." + ) + }, + ) + logging_nan_inf_filter: bool = field(default=True, metadata={"help": "Filter nan and inf losses for logging."}) + save_strategy: Union[IntervalStrategy, str] = field( + default="steps", + metadata={"help": "The checkpoint save strategy to use."}, + ) + save_steps: float = field( + default=500, + metadata={ + "help": ( + "Save checkpoint every X updates steps. Should be an integer or a float in range `[0,1)`. " + "If smaller than 1, will be interpreted as ratio of total training steps." + ) + }, + ) + save_total_limit: Optional[int] = field( + default=None, + metadata={ + "help": ( + "If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in" + " `output_dir`. When `load_best_model_at_end` is enabled, the 'best' checkpoint according to" + " `metric_for_best_model` will always be retained in addition to the most recent ones. For example," + " for `save_total_limit=5` and `load_best_model_at_end=True`, the four last checkpoints will always be" + " retained alongside the best model. When `save_total_limit=1` and `load_best_model_at_end=True`," + " it is possible that two checkpoints are saved: the last one and the best one (if they are different)." + " Default is unlimited checkpoints" + ) + }, + ) + save_safetensors: Optional[bool] = field( + default=True, + metadata={ + "help": "Use safetensors saving and loading for state dicts instead of default torch.load and torch.save." + }, + ) + save_on_each_node: bool = field( + default=False, + metadata={ + "help": ( + "When doing multi-node distributed training, whether to save models and checkpoints on each node, or" + " only on the main one" + ) + }, + ) + save_only_model: bool = field( + default=False, + metadata={ + "help": ( + "When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state." + "Note that when this is true, you won't be able to resume training from checkpoint." + "This enables you to save storage by not storing the optimizer, scheduler & rng state." + "You can only load the model using from_pretrained with this option set to True." + ) + }, + ) + no_cuda: bool = field( + default=False, + metadata={"help": "This argument is deprecated. It will be removed in version 5.0 of 🤗 Transformers."}, + ) + use_cpu: bool = field( + default=False, + metadata={ + "help": " Whether or not to use cpu. If set to False, we will use cuda/tpu/mps/npu device if available." + }, + ) + use_mps_device: bool = field( + default=False, + metadata={ + "help": "This argument is deprecated. `mps` device will be used if available similar to `cuda` device." + " It will be removed in version 5.0 of 🤗 Transformers" + }, + ) + seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) + data_seed: Optional[int] = field(default=None, metadata={"help": "Random seed to be used with data samplers."}) + jit_mode_eval: bool = field( + default=False, metadata={"help": "Whether or not to use PyTorch jit trace for inference"} + ) + use_ipex: bool = field( + default=False, + metadata={ + "help": ( + "Use Intel extension for PyTorch when it is available, installation:" + " 'https://github.com/intel/intel-extension-for-pytorch'" + ) + }, + ) + bf16: bool = field( + default=False, + metadata={ + "help": ( + "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA" + " architecture or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change." + ) + }, + ) + fp16: bool = field( + default=False, + metadata={"help": "Whether to use fp16 (mixed) precision instead of 32-bit"}, + ) + fp16_opt_level: str = field( + default="O1", + metadata={ + "help": ( + "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " + "See details at https://nvidia.github.io/apex/amp.html" + ) + }, + ) + half_precision_backend: str = field( + default="auto", + metadata={ + "help": "The backend to be used for half precision.", + "choices": ["auto", "apex", "cpu_amp"], + }, + ) + bf16_full_eval: bool = field( + default=False, + metadata={ + "help": ( + "Whether to use full bfloat16 evaluation instead of 32-bit. This is an experimental API and it may" + " change." + ) + }, + ) + fp16_full_eval: bool = field( + default=False, + metadata={"help": "Whether to use full float16 evaluation instead of 32-bit"}, + ) + tf32: Optional[bool] = field( + default=None, + metadata={ + "help": ( + "Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental" + " API and it may change." + ) + }, + ) + local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"}) + ddp_backend: Optional[str] = field( + default=None, + metadata={ + "help": "The backend to be used for distributed training", + "choices": ["nccl", "gloo", "mpi", "ccl", "hccl", "cncl"], + }, + ) + tpu_num_cores: Optional[int] = field( + default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"} + ) + tpu_metrics_debug: bool = field( + default=False, + metadata={ + "help": ( + "Deprecated, the use of `--debug tpu_metrics_debug` is preferred. TPU: Whether to print debug metrics" + ) + }, + ) + debug: Union[str, List[DebugOption]] = field( + default="", + metadata={ + "help": ( + "Whether or not to enable debug mode. Current options: " + "`underflow_overflow` (Detect underflow and overflow in activations and weights), " + "`tpu_metrics_debug` (print debug metrics on TPU)." + ) + }, + ) + + dataloader_drop_last: bool = field( + default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."} + ) + eval_steps: Optional[float] = field( + default=None, + metadata={ + "help": ( + "Run an evaluation every X steps. Should be an integer or a float in range `[0,1)`. " + "If smaller than 1, will be interpreted as ratio of total training steps." + ) + }, + ) + dataloader_num_workers: int = field( + default=0, + metadata={ + "help": ( + "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded" + " in the main process." + ) + }, + ) + dataloader_prefetch_factor: Optional[int] = field( + default=None if not is_torch_available() or is_torch_greater_or_equal_than_2_0 else 2, + metadata={ + "help": ( + "Number of batches loaded in advance by each worker. " + "2 means there will be a total of 2 * num_workers batches prefetched across all workers. " + "Default is 2 for PyTorch < 2.0.0 and otherwise None." + ) + }, + ) + past_index: int = field( + default=-1, + metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."}, + ) + + run_name: Optional[str] = field( + default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."} + ) + disable_tqdm: Optional[bool] = field( + default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."} + ) + + remove_unused_columns: Optional[bool] = field( + default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."} + ) + label_names: Optional[List[str]] = field( + default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."} + ) + load_best_model_at_end: Optional[bool] = field( + default=False, + metadata={ + "help": ( + "Whether or not to load the best model found during training at the end of training. When this option" + " is enabled, the best checkpoint will always be saved. See `save_total_limit` for more." + ) + }, + ) + metric_for_best_model: Optional[str] = field( + default=None, metadata={"help": "The metric to use to compare two different models."} + ) + greater_is_better: Optional[bool] = field( + default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."} + ) + ignore_data_skip: bool = field( + default=False, + metadata={ + "help": ( + "When resuming training, whether or not to skip the first epochs and batches to get to the same" + " training data." + ) + }, + ) + fsdp: Optional[Union[List[FSDPOption], str]] = field( + default="", + metadata={ + "help": ( + "Whether or not to use PyTorch Fully Sharded Data Parallel (FSDP) training (in distributed training" + " only). The base option should be `full_shard`, `shard_grad_op` or `no_shard` and you can add" + " CPU-offload to `full_shard` or `shard_grad_op` like this: full_shard offload` or `shard_grad_op" + " offload`. You can add auto-wrap to `full_shard` or `shard_grad_op` with the same syntax: full_shard" + " auto_wrap` or `shard_grad_op auto_wrap`." + ), + }, + ) + fsdp_min_num_params: int = field( + default=0, + metadata={ + "help": ( + "This parameter is deprecated. FSDP's minimum number of parameters for Default Auto Wrapping. (useful" + " only when `fsdp` field is passed)." + ) + }, + ) + fsdp_config: Optional[Union[dict, str]] = field( + default=None, + metadata={ + "help": ( + "Config to be used with FSDP (Pytorch Fully Sharded Data Parallel). The value is either a " + "fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`." + ) + }, + ) + fsdp_transformer_layer_cls_to_wrap: Optional[str] = field( + default=None, + metadata={ + "help": ( + "This parameter is deprecated. Transformer layer class name (case-sensitive) to wrap, e.g," + " `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `fsdp` flag is passed)." + ) + }, + ) + accelerator_config: Optional[Union[dict, str]] = field( + default=None, + metadata={ + "help": ( + "Config to be used with the internal Accelerator object initializtion. The value is either a " + "accelerator json config file (e.g., `accelerator_config.json`) or an already loaded json file as `dict`." + ) + }, + ) + deepspeed: Optional[Union[dict, str]] = field( + default=None, + metadata={ + "help": ( + "Enable deepspeed and pass the path to deepspeed json config file (e.g. `ds_config.json`) or an already" + " loaded json file as a dict" + ) + }, + ) + label_smoothing_factor: float = field( + default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."} + ) + + default_optim = "adamw_torch" + # XXX: enable when pytorch==2.0.1 comes out - we want to give it time to get all the bugs sorted out + # if is_torch_available() and version.parse(version.parse(torch.__version__).base_version) >= version.parse("2.1.0"): + # default_optim = "adamw_torch_fused" + # and update the doc above to: + # optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch_fused"` (for torch<2.1.0 `"adamw_torch"`): + optim: Union[OptimizerNames, str] = field( + default=default_optim, + metadata={"help": "The optimizer to use."}, + ) + optim_args: Optional[str] = field(default=None, metadata={"help": "Optional arguments to supply to optimizer."}) + adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) + group_by_length: bool = field( + default=False, + metadata={"help": "Whether or not to group samples of roughly the same length together when batching."}, + ) + length_column_name: Optional[str] = field( + default="length", + metadata={"help": "Column name with precomputed lengths to use when grouping by length."}, + ) + report_to: Union[None, str, List[str]] = field( + default=None, metadata={"help": "The list of integrations to report the results and logs to."} + ) + ddp_find_unused_parameters: Optional[bool] = field( + default=None, + metadata={ + "help": ( + "When using distributed training, the value of the flag `find_unused_parameters` passed to " + "`DistributedDataParallel`." + ) + }, + ) + ddp_bucket_cap_mb: Optional[int] = field( + default=None, + metadata={ + "help": ( + "When using distributed training, the value of the flag `bucket_cap_mb` passed to " + "`DistributedDataParallel`." + ) + }, + ) + ddp_broadcast_buffers: Optional[bool] = field( + default=None, + metadata={ + "help": ( + "When using distributed training, the value of the flag `broadcast_buffers` passed to " + "`DistributedDataParallel`." + ) + }, + ) + dataloader_pin_memory: bool = field( + default=True, metadata={"help": "Whether or not to pin memory for DataLoader."} + ) + dataloader_persistent_workers: bool = field( + default=False, + metadata={ + "help": "If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage." + }, + ) + skip_memory_metrics: bool = field( + default=True, metadata={"help": "Whether or not to skip adding of memory profiler reports to metrics."} + ) + use_legacy_prediction_loop: bool = field( + default=False, metadata={"help": "Whether or not to use the legacy prediction_loop in the Trainer."} + ) + push_to_hub: bool = field( + default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} + ) + resume_from_checkpoint: Optional[str] = field( + default=None, + metadata={"help": "The path to a folder with a valid checkpoint for your model."}, + ) + hub_model_id: Optional[str] = field( + default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} + ) + hub_strategy: Union[HubStrategy, str] = field( + default="every_save", + metadata={"help": "The hub strategy to use when `--push_to_hub` is activated."}, + ) + hub_token: Optional[str] = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) + hub_private_repo: bool = field(default=False, metadata={"help": "Whether the model repository is private or not."}) + hub_always_push: bool = field( + default=False, + metadata={"help": "Unless `True`, the Trainer will skip pushes if the previous one wasn't finished yet."}, + ) + gradient_checkpointing: bool = field( + default=False, + metadata={ + "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." + }, + ) + gradient_checkpointing_kwargs: Optional[Union[dict, str]] = field( + default=None, + metadata={ + "help": "Gradient checkpointing key word arguments such as `use_reentrant`. Will be passed to `torch.utils.checkpoint.checkpoint` through `model.gradient_checkpointing_enable`." + }, + ) + include_inputs_for_metrics: bool = field( + default=False, metadata={"help": "Whether or not the inputs will be passed to the `compute_metrics` function."} + ) + eval_do_concat_batches: bool = field( + default=True, + metadata={ + "help": "Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, will instead store them as lists, with each batch kept separate." + }, + ) + # Deprecated arguments + fp16_backend: str = field( + default="auto", + metadata={ + "help": "Deprecated. Use half_precision_backend instead", + "choices": ["auto", "apex", "cpu_amp"], + }, + ) + push_to_hub_model_id: Optional[str] = field( + default=None, metadata={"help": "The name of the repository to which push the `Trainer`."} + ) + push_to_hub_organization: Optional[str] = field( + default=None, metadata={"help": "The name of the organization in with to which push the `Trainer`."} + ) + push_to_hub_token: Optional[str] = field( + default=None, metadata={"help": "The token to use to push to the Model Hub."} + ) + _n_gpu: int = field(init=False, repr=False, default=-1) + mp_parameters: str = field( + default="", + metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in Trainer"}, + ) + + auto_find_batch_size: bool = field( + default=False, + metadata={ + "help": ( + "Whether to automatically decrease the batch size in half and rerun the training loop again each time" + " a CUDA Out-of-Memory was reached" + ) + }, + ) + full_determinism: bool = field( + default=False, + metadata={ + "help": ( + "Whether to call enable_full_determinism instead of set_seed for reproducibility in distributed" + " training. Important: this will negatively impact the performance, so only use it for debugging." + ) + }, + ) + torchdynamo: Optional[str] = field( + default=None, + metadata={ + "help": "This argument is deprecated, use `--torch_compile_backend` instead.", + }, + ) + ray_scope: Optional[str] = field( + default="last", + metadata={ + "help": ( + 'The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray' + " will then use the last checkpoint of all trials, compare those, and select the best one. However," + " other options are also available. See the Ray documentation" + " (https://docs.ray.io/en/latest/tune/api_docs/analysis.html" + "#ray.tune.ExperimentAnalysis.get_best_trial)" + " for more options." + ) + }, + ) + ddp_timeout: Optional[int] = field( + default=1800, + metadata={ + "help": "Overrides the default timeout for distributed training (value should be given in seconds)." + }, + ) + torch_compile: bool = field( + default=False, metadata={"help": "If set to `True`, the model will be wrapped in `torch.compile`."} + ) + torch_compile_backend: Optional[str] = field( + default=None, + metadata={ + "help": "Which backend to use with `torch.compile`, passing one will trigger a model compilation.", + }, + ) + torch_compile_mode: Optional[str] = field( + default=None, + metadata={ + "help": "Which mode to use with `torch.compile`, passing one will trigger a model compilation.", + }, + ) + + dispatch_batches: Optional[bool] = field( + default=None, + metadata={"help": "Deprecated. Pass {'dispatch_batches':VALUE} to `accelerator_config`."}, + ) + + split_batches: Optional[bool] = field( + default=None, + metadata={"help": "Deprecated. Pass {'split_batches':True} to `accelerator_config`."}, + ) + + include_tokens_per_second: Optional[bool] = field( + default=False, + metadata={"help": "If set to `True`, the speed metrics will include `tgs` (tokens per second per device)."}, + ) + + include_num_input_tokens_seen: Optional[bool] = field( + default=False, + metadata={ + "help": "If set to `True`, will track the number of input tokens seen throughout training. (May be slower in distributed training)" + }, + ) + + neftune_noise_alpha: Optional[float] = field( + default=None, + metadata={ + "help": "Activates neftune noise embeddings into the model. NEFTune has been proven to drastically improve model performances for instrcution fine-tuning. Check out the original paper here: https://arxiv.org/abs/2310.05914 and the original code here: https://github.com/neelsjain/NEFTune. Only supported for `PreTrainedModel` and `PeftModel` classes." + }, + ) + + optim_target_modules: Union[None, str, List[str]] = field( + default=None, + metadata={ + "help": "Target modules for the optimizer defined in the `optim` argument. Only used for the GaLore optimizer at the moment." + }, + ) + + def __post_init__(self): + # Parse in args that could be `dict` sent in from the CLI as a string + for field in _VALID_DICT_FIELDS: + passed_value = getattr(self, field) + # We only want to do this if the str starts with a bracket to indiciate a `dict` + # else its likely a filename if supported + if isinstance(passed_value, str) and passed_value.startswith("{"): + loaded_dict = json.loads(passed_value) + # Convert str values to types if applicable + loaded_dict = _convert_str_dict(loaded_dict) + setattr(self, field, loaded_dict) + + # expand paths, if not os.makedirs("~/bar") will make directory + # in the current directory instead of the actual home + # see https://github.com/huggingface/transformers/issues/10628 + if self.output_dir is not None: + self.output_dir = os.path.expanduser(self.output_dir) + if self.logging_dir is None and self.output_dir is not None: + self.logging_dir = os.path.join(self.output_dir, default_logdir()) + if self.logging_dir is not None: + self.logging_dir = os.path.expanduser(self.logging_dir) + + if self.disable_tqdm is None: + self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN + + if isinstance(self.evaluation_strategy, EvaluationStrategy): + warnings.warn( + "using `EvaluationStrategy` for `evaluation_strategy` is deprecated and will be removed in version 5" + " of 🤗 Transformers. Use `IntervalStrategy` instead", + FutureWarning, + ) + # Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it. + self.evaluation_strategy = self.evaluation_strategy.value + if self.no_cuda: + warnings.warn( + "using `no_cuda` is deprecated and will be removed in version 5.0 of 🤗 Transformers. " + "Use `use_cpu` instead", + FutureWarning, + ) + self.use_cpu = self.no_cuda + + self.evaluation_strategy = IntervalStrategy(self.evaluation_strategy) + self.logging_strategy = IntervalStrategy(self.logging_strategy) + self.save_strategy = IntervalStrategy(self.save_strategy) + self.hub_strategy = HubStrategy(self.hub_strategy) + + self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type) + if self.do_eval is False and self.evaluation_strategy != IntervalStrategy.NO: + self.do_eval = True + + # eval_steps has to be defined and non-zero, fallbacks to logging_steps if the latter is non-zero + if self.evaluation_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0): + if self.logging_steps > 0: + logger.info(f"using `logging_steps` to initialize `eval_steps` to {self.logging_steps}") + self.eval_steps = self.logging_steps + else: + raise ValueError( + f"evaluation strategy {self.evaluation_strategy} requires either non-zero --eval_steps or" + " --logging_steps" + ) + + # logging_steps must be non-zero for logging_strategy that is other than 'no' + if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps == 0: + raise ValueError(f"logging strategy {self.logging_strategy} requires non-zero --logging_steps") + + if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps > 1: + if self.logging_steps != int(self.logging_steps): + raise ValueError(f"--logging_steps must be an integer if bigger than 1: {self.logging_steps}") + self.logging_steps = int(self.logging_steps) + if self.evaluation_strategy == IntervalStrategy.STEPS and self.eval_steps > 1: + if self.eval_steps != int(self.eval_steps): + raise ValueError(f"--eval_steps must be an integer if bigger than 1: {self.eval_steps}") + self.eval_steps = int(self.eval_steps) + if self.save_strategy == IntervalStrategy.STEPS and self.save_steps > 1: + if self.save_steps != int(self.save_steps): + raise ValueError(f"--save_steps must be an integer if bigger than 1: {self.save_steps}") + self.save_steps = int(self.save_steps) + + # Sanity checks for load_best_model_at_end: we require save and eval strategies to be compatible. + if self.load_best_model_at_end: + if self.evaluation_strategy != self.save_strategy: + raise ValueError( + "--load_best_model_at_end requires the save and eval strategy to match, but found\n- Evaluation " + f"strategy: {self.evaluation_strategy}\n- Save strategy: {self.save_strategy}" + ) + if self.evaluation_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0: + if self.eval_steps < 1 or self.save_steps < 1: + if not (self.eval_steps < 1 and self.save_steps < 1): + raise ValueError( + "--load_best_model_at_end requires the saving steps to be a multiple of the evaluation " + "steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps " + f"{self.save_steps} and eval_steps {self.eval_steps}." + ) + # Work around floating point precision issues + LARGE_MULTIPLIER = 1_000_000 + if (self.save_steps * LARGE_MULTIPLIER) % (self.eval_steps * LARGE_MULTIPLIER) != 0: + raise ValueError( + "--load_best_model_at_end requires the saving steps to be a multiple of the evaluation " + f"steps, but found {self.save_steps}, which is not a multiple of {self.eval_steps}." + ) + raise ValueError( + "--load_best_model_at_end requires the saving steps to be a round multiple of the evaluation " + f"steps, but found {self.save_steps}, which is not a round multiple of {self.eval_steps}." + ) + + safetensors_available = is_safetensors_available() + if self.save_safetensors and not safetensors_available: + raise ValueError(f"--save_safetensors={self.save_safetensors} requires safetensors to be installed!") + if not self.save_safetensors and safetensors_available: + logger.info( + f"Found safetensors installation, but --save_safetensors={self.save_safetensors}. " + f"Safetensors should be a preferred weights saving format due to security and performance reasons. " + f"If your model cannot be saved by safetensors please feel free to open an issue at " + f"https://github.com/huggingface/safetensors!" + ) + + if ( + self.load_best_model_at_end or self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU + ) and self.metric_for_best_model is None: + self.metric_for_best_model = "loss" + if self.greater_is_better is None and self.metric_for_best_model is not None: + self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"] + if self.run_name is None: + self.run_name = self.output_dir + if self.framework == "pt" and is_torch_available(): + if self.fp16_backend and self.fp16_backend != "auto": + warnings.warn( + "`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" + " `half_precision_backend` instead", + FutureWarning, + ) + self.half_precision_backend = self.fp16_backend + + if self.bf16 or self.bf16_full_eval: + if self.use_cpu and not is_torch_bf16_cpu_available() and not is_torch_xla_available(): + # cpu + raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10") + elif not self.use_cpu: + if torch.cuda.is_available() and not is_torch_bf16_gpu_available(): + # gpu + raise ValueError( + "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" + ) + elif not is_torch_xpu_available(): + # xpu + from .pytorch_utils import is_torch_greater_or_equal_than_1_12 + + if not is_torch_greater_or_equal_than_1_12: + raise ValueError( + "Your setup doesn't support bf16/xpu. You need torch>=1.12, using Intel XPU/GPU with IPEX installed" + ) + + if self.fp16 and self.bf16: + raise ValueError("At most one of fp16 and bf16 can be True, but not both") + + if self.fp16_full_eval and self.bf16_full_eval: + raise ValueError("At most one of fp16 and bf16 can be True for full eval, but not both") + + if self.bf16: + if self.half_precision_backend == "apex": + raise ValueError(" `--half_precision_backend apex`: GPU bf16 is not supported by apex.") + + if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU: + if self.evaluation_strategy == IntervalStrategy.NO: + raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires an eval strategy") + if not is_torch_available(): + raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires torch>=0.2.0") + + self.optim = OptimizerNames(self.optim) + if self.adafactor: + warnings.warn( + "`--adafactor` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--optim" + " adafactor` instead", + FutureWarning, + ) + self.optim = OptimizerNames.ADAFACTOR + if self.optim == OptimizerNames.ADAMW_TORCH_FUSED and is_torch_available(): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("2.0.0"): + raise ValueError("--optim adamw_torch_fused requires PyTorch 2.0 or higher") + # there is a bug in fp16/AMP in pt-2.0.0 + if version.parse(version.parse(torch.__version__).base_version) == version.parse("2.0.0") and self.fp16: + raise ValueError("--optim adamw_torch_fused with --fp16 requires PyTorch>2.0") + + if ( + self.framework == "pt" + and is_torch_available() + and (self.device.type != "cuda") + and (self.device.type != "mlu") + and (self.device.type != "npu") + and (self.device.type != "xpu") + and (get_xla_device_type(self.device) not in ["GPU", "CUDA"]) + and (self.fp16 or self.fp16_full_eval) + ): + raise ValueError( + "FP16 Mixed precision training with AMP or APEX (`--fp16`) and FP16 half precision evaluation" + " (`--fp16_full_eval`) can only be used on CUDA or MLU devices or NPU devices or certain XPU devices (with IPEX)." + ) + + if ( + self.framework == "pt" + and is_torch_available() + and (self.device.type != "cuda") + and (self.device.type != "mlu") + and (self.device.type != "npu") + and (self.device.type != "xpu") + and (get_xla_device_type(self.device) not in ["GPU", "CUDA"]) + and (get_xla_device_type(self.device) != "TPU") + and (self.device.type != "cpu") + and (self.bf16 or self.bf16_full_eval) + ): + raise ValueError( + "BF16 Mixed precision training with AMP (`--bf16`) and BF16 half precision evaluation" + " (`--bf16_full_eval`) can only be used on CUDA, XPU (with IPEX), NPU, MLU or CPU/TPU/NeuronCore devices." + ) + + if self.torchdynamo is not None: + warnings.warn( + "`torchdynamo` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" + " `torch_compile_backend` instead", + FutureWarning, + ) + self.torch_compile_backend = self.torchdynamo + if (self.torch_compile_mode is not None or self.torch_compile_backend is not None) and not self.torch_compile: + self.torch_compile = True + if self.torch_compile and self.torch_compile_backend is None: + self.torch_compile_backend = "inductor" + + # accelerate integration for torch compile + if self.torch_compile: + # set env vars for accelerate + prefix = "ACCELERATE_DYNAMO_" + os.environ[prefix + "BACKEND"] = self.torch_compile_backend + if self.torch_compile_mode is not None: + os.environ[prefix + "MODE"] = self.torch_compile_mode + + if self.framework == "pt" and is_torch_available() and self.torch_compile: + if is_torch_tf32_available(): + if self.tf32 is None and not self.fp16 or self.bf16: + logger.info( + "Setting TF32 in CUDA backends to speedup torch compile, you won't see any improvement" + " otherwise." + ) + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + else: + logger.warning( + "The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here." + ) + if self.framework == "pt" and is_torch_available() and self.tf32 is not None: + if self.tf32: + if is_torch_tf32_available(): + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + else: + raise ValueError("--tf32 requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7") + else: + if is_torch_tf32_available(): + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + # no need to assert on else + + # if training args is specified, it will override the one specified in the accelerate config + if self.half_precision_backend != "apex": + mixed_precision_dtype = os.environ.get("ACCELERATE_MIXED_PRECISION", "no") + if self.fp16: + mixed_precision_dtype = "fp16" + elif self.bf16: + mixed_precision_dtype = "bf16" + os.environ["ACCELERATE_MIXED_PRECISION"] = mixed_precision_dtype + + if self.report_to is None: + logger.info( + "The default value for the training argument `--report_to` will change in v5 (from all installed " + "integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as " + "now. You should start updating your code and make this info disappear :-)." + ) + self.report_to = "all" + if self.report_to == "all" or self.report_to == ["all"]: + # Import at runtime to avoid a circular import. + from .integrations import get_available_reporting_integrations + + self.report_to = get_available_reporting_integrations() + elif self.report_to == "none" or self.report_to == ["none"]: + self.report_to = [] + elif not isinstance(self.report_to, list): + self.report_to = [self.report_to] + + if self.warmup_ratio < 0 or self.warmup_ratio > 1: + raise ValueError("warmup_ratio must lie in range [0,1]") + elif self.warmup_ratio > 0 and self.warmup_steps > 0: + logger.info( + "Both warmup_ratio and warmup_steps given, warmup_steps will override any effect of warmup_ratio" + " during training" + ) + + if isinstance(self.fsdp, bool): + self.fsdp = "full_shard" if self.fsdp else "" + if isinstance(self.fsdp, str): + self.fsdp = [FSDPOption(s) for s in self.fsdp.split()] + if self.fsdp == [FSDPOption.OFFLOAD]: + raise ValueError( + "`--fsdp offload` can't work on its own. It needs to be added to `--fsdp full_shard` or " + '`--fsdp shard_grad_op`. For example, `--fsdp "full_shard offload"`.' + ) + elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.fsdp: + raise ValueError("`--fsdp full_shard` is not compatible with `--fsdp shard_grad_op`.") + + if self.fsdp_config is None: + self.fsdp_config = {} + + if isinstance(self.fsdp_config, str): + if len(self.fsdp) == 0: + warnings.warn("`--fsdp_config` is useful only when `--fsdp` is specified.") + with io.open(self.fsdp_config, "r", encoding="utf-8") as f: + self.fsdp_config = json.load(f) + for k in list(self.fsdp_config.keys()): + if k.startswith("fsdp_"): + v = self.fsdp_config.pop(k) + self.fsdp_config[k[5:]] = v + + if self.fsdp_min_num_params > 0: + warnings.warn("using `--fsdp_min_num_params` is deprecated. Use fsdp_config instead ", FutureWarning) + + self.fsdp_config["min_num_params"] = max(self.fsdp_config.get("min_num_params", 0), self.fsdp_min_num_params) + + # if fsdp_config["transformer_layer_cls_to_wrap"] is specified as a string, convert it to a list with a single object + if isinstance(self.fsdp_config.get("transformer_layer_cls_to_wrap", None), str): + self.fsdp_config["transformer_layer_cls_to_wrap"] = [self.fsdp_config["transformer_layer_cls_to_wrap"]] + + if self.fsdp_transformer_layer_cls_to_wrap is not None: + warnings.warn( + "using `--fsdp_transformer_layer_cls_to_wrap` is deprecated. Use fsdp_config instead ", FutureWarning + ) + self.fsdp_config["transformer_layer_cls_to_wrap"] = self.fsdp_config.get( + "transformer_layer_cls_to_wrap", [] + ) + [self.fsdp_transformer_layer_cls_to_wrap] + + if len(self.fsdp) == 0 and self.fsdp_config["min_num_params"] > 0: + warnings.warn("`min_num_params` is useful only when `--fsdp` is specified.") + + if len(self.fsdp) == 0 and self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None: + warnings.warn("`transformer_layer_cls_to_wrap` is useful only when `--fsdp` is specified.") + + if ( + len(self.fsdp) > 0 + and self.fsdp_config["min_num_params"] > 0 + and self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None + ): + raise ValueError("`min_num_params` and `transformer_layer_cls_to_wrap` are mutually exclusive.") + self.fsdp_config["xla"] = self.fsdp_config.get("xla", False) + self.fsdp_config["xla_fsdp_v2"] = self.fsdp_config.get("xla_fsdp_v2", False) + self.fsdp_config["xla_fsdp_grad_ckpt"] = self.fsdp_config.get("xla_fsdp_grad_ckpt", False) + if self.fsdp_config["xla"]: + if len(self.fsdp) > 0: + # store XLA fsdp configuration parameters into a dictionary + # Copy the config to avoid modifying the original config (which may be used for JSON serialization) + self.xla_fsdp_config = self.fsdp_config.get("xla_fsdp_settings", {}).copy() + # apply appropriate string to torch.dtype conversions for parameters + if "compute_dtype" in self.xla_fsdp_config: + self.xla_fsdp_config["compute_dtype"] = getattr(torch, self.xla_fsdp_config["compute_dtype"]) + if "buffer_dtype" in self.xla_fsdp_config: + self.xla_fsdp_config["buffer_dtype"] = getattr(torch, self.xla_fsdp_config["buffer_dtype"]) + else: + warnings.warn("XLA FSDP can be used only when `--fsdp` is specified.") + else: + if self.fsdp_config["xla_fsdp_grad_ckpt"]: + warnings.warn("`--xla_fsdp_grad_ckpt` is useful only when `--xla` is set to true.") + + # accelerate integration for FSDP + if len(self.fsdp) > 0 and not self.fsdp_config["xla"]: + os.environ["ACCELERATE_USE_FSDP"] = "true" + from accelerate.utils.constants import ( + FSDP_AUTO_WRAP_POLICY, + FSDP_SHARDING_STRATEGY, + ) + + prefix = "FSDP_" + for fsdp_option in self.fsdp: + if fsdp_option.upper() in FSDP_SHARDING_STRATEGY: + # set environment variable for FSDP sharding strategy + os.environ[f"{prefix}SHARDING_STRATEGY"] = ( + str(FSDP_SHARDING_STRATEGY.index(fsdp_option.upper()) + 1) + if is_accelerate_available("0.26.0") + else fsdp_option.upper() + ) + elif fsdp_option == FSDPOption.OFFLOAD: + os.environ[f"{prefix}OFFLOAD_PARAMS"] = "true" + elif fsdp_option == FSDPOption.AUTO_WRAP: + os.environ[f"{prefix}AUTO_WRAP_POLICY"] = FSDP_AUTO_WRAP_POLICY[0] + if self.fsdp_config["min_num_params"] > 0: + os.environ[f"{prefix}MIN_NUM_PARAMS"] = str(self.fsdp_config["min_num_params"]) + os.environ[f"{prefix}AUTO_WRAP_POLICY"] = FSDP_AUTO_WRAP_POLICY[1] + elif self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None: + os.environ[f"{prefix}TRANSFORMER_CLS_TO_WRAP"] = ",".join( + self.fsdp_config["transformer_layer_cls_to_wrap"] + ) + prefetch_policy = self.fsdp_config.get("backward_prefetch", "NO_PREFETCH") + os.environ[f"{prefix}BACKWARD_PREFETCH"] = prefetch_policy.upper() + os.environ[f"{prefix}FORWARD_PREFETCH"] = self.fsdp_config.get("forward_prefetch", "false") + os.environ[f"{prefix}SYNC_MODULE_STATES"] = self.fsdp_config.get("sync_module_states", "true") + os.environ[f"{prefix}USE_ORIG_PARAMS"] = self.fsdp_config.get("use_orig_params", "true") + + if is_accelerate_available(): + if not isinstance(self.accelerator_config, (AcceleratorConfig)): + if self.accelerator_config is None: + self.accelerator_config = AcceleratorConfig() + elif isinstance(self.accelerator_config, dict): + self.accelerator_config = AcceleratorConfig(**self.accelerator_config) + # Check that a user didn't pass in the class instantiator + # such as `accelerator_config = AcceleratorConfig` + elif isinstance(self.accelerator_config, type): + raise NotImplementedError( + "Tried passing in a callable to `accelerator_config`, but this is not supported. " + "Please pass in a fully constructed `AcceleratorConfig` object instead." + ) + else: + self.accelerator_config = AcceleratorConfig.from_json_file(self.accelerator_config) + if self.dispatch_batches is not None: + warnings.warn( + "Using `--dispatch_batches` is deprecated and will be removed in version 4.41 of 🤗 Transformers. Use" + " `--accelerator_config {'dispatch_batches':VALUE} instead", + FutureWarning, + ) + self.accelerator_config.dispatch_batches = self.dispatch_batches + + if self.split_batches is not None: + warnings.warn( + "Using `--split_batches` is deprecated and will be removed in version 4.41 of 🤗 Transformers. Use" + " `--accelerator_config {'split_batches':VALUE} instead", + FutureWarning, + ) + self.accelerator_config.split_batches = self.split_batches + + if self.tpu_metrics_debug: + warnings.warn( + "using `--tpu_metrics_debug` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" + " `--debug tpu_metrics_debug` instead", + FutureWarning, + ) + if self.debug is None: + self.debug = " tpu_metrics_debug" + else: + self.debug += " tpu_metrics_debug" + self.tpu_metrics_debug = False + + if isinstance(self.debug, str): + self.debug = [DebugOption(s) for s in self.debug.split()] + elif self.debug is None: + self.debug = [] + + self.deepspeed_plugin = None + if self.deepspeed: + # - must be run very last in arg parsing, since it will use a lot of these settings. + # - must be run before the model is created. + if not is_accelerate_available(): + raise ValueError("--deepspeed requires Accelerate to be installed: `pip install accelerate`.") + from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig + + # will be used later by the Trainer + # note: leave self.deepspeed unmodified in case a user relies on it not to be modified) + self.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.deepspeed) + self.hf_deepspeed_config.trainer_config_process(self) + + # Accelerate DeepSpeed Plugin + from accelerate.utils import DeepSpeedPlugin + + os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" + self.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.hf_deepspeed_config) + elif strtobool(os.environ.get("ACCELERATE_USE_DEEPSPEED", "false")): + # Accelerate DeepSpeed Plugin + from accelerate.utils import DeepSpeedPlugin + + self.deepspeed_plugin = DeepSpeedPlugin() + mixed_precision = os.environ.get("ACCELERATE_MIXED_PRECISION", "no") + self.deepspeed_plugin.set_mixed_precision(mixed_precision) + self.deepspeed_plugin.set_deepspeed_weakref() + + if self.use_cpu: + self.dataloader_pin_memory = False + + if ( + (not is_torch_available() or is_torch_greater_or_equal_than_2_0) + and self.dataloader_num_workers == 0 + and self.dataloader_prefetch_factor is not None + ): + raise ValueError( + "--dataloader_prefetch_factor can only be set when data is loaded in a different process, i.e." + " when --dataloader_num_workers > 1." + ) + + if self.push_to_hub_token is not None: + warnings.warn( + "`--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use " + "`--hub_token` instead.", + FutureWarning, + ) + self.hub_token = self.push_to_hub_token + + if self.push_to_hub_model_id is not None: + self.hub_model_id = get_full_repo_name( + self.push_to_hub_model_id, organization=self.push_to_hub_organization, token=self.hub_token + ) + if self.push_to_hub_organization is not None: + warnings.warn( + "`--push_to_hub_model_id` and `--push_to_hub_organization` are deprecated and will be removed in " + "version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this " + f"argument (in this case {self.hub_model_id}).", + FutureWarning, + ) + else: + warnings.warn( + "`--push_to_hub_model_id` is deprecated and will be removed in version 5 of 🤗 Transformers. Use " + "`--hub_model_id` instead and pass the full repo name to this argument (in this case " + f"{self.hub_model_id}).", + FutureWarning, + ) + elif self.push_to_hub_organization is not None: + self.hub_model_id = f"{self.push_to_hub_organization}/{Path(self.output_dir).name}" + warnings.warn( + "`--push_to_hub_organization` is deprecated and will be removed in version 5 of 🤗 Transformers. Use " + "`--hub_model_id` instead and pass the full repo name to this argument (in this case " + f"{self.hub_model_id}).", + FutureWarning, + ) + + def __str__(self): + self_as_dict = asdict(self) + + # Remove deprecated arguments. That code should be removed once + # those deprecated arguments are removed from TrainingArguments. (TODO: v5) + del self_as_dict["per_gpu_train_batch_size"] + del self_as_dict["per_gpu_eval_batch_size"] + + self_as_dict = {k: f"<{k.upper()}>" if k.endswith("_token") else v for k, v in self_as_dict.items()} + + attrs_as_str = [f"{k}={v},\n" for k, v in sorted(self_as_dict.items())] + return f"{self.__class__.__name__}(\n{''.join(attrs_as_str)})" + + __repr__ = __str__ + + @property + def train_batch_size(self) -> int: + """ + The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training). + """ + if self.per_gpu_train_batch_size: + logger.warning( + "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " + "version. Using `--per_device_train_batch_size` is preferred." + ) + per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size + train_batch_size = per_device_batch_size * max(1, self.n_gpu) + return train_batch_size + + @property + def eval_batch_size(self) -> int: + """ + The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training). + """ + if self.per_gpu_eval_batch_size: + logger.warning( + "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " + "version. Using `--per_device_eval_batch_size` is preferred." + ) + per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size + eval_batch_size = per_device_batch_size * max(1, self.n_gpu) + return eval_batch_size + + @property + def ddp_timeout_delta(self) -> timedelta: + """ + The actual timeout for torch.distributed.init_process_group since it expects a timedelta variable. + """ + return timedelta(seconds=self.ddp_timeout) + + @cached_property + def _setup_devices(self) -> "torch.device": + requires_backends(self, ["torch"]) + logger.info("PyTorch: setting up devices") + if not is_sagemaker_mp_enabled(): + if not is_accelerate_available(): + raise ImportError( + f"Using the `Trainer` with `PyTorch` requires `accelerate>={ACCELERATE_MIN_VERSION}`: " + "Please run `pip install transformers[torch]` or `pip install accelerate -U`" + ) + AcceleratorState._reset_state(reset_partial_state=True) + self.distributed_state = None + if not self.use_ipex and "ACCELERATE_USE_IPEX" not in os.environ: + os.environ["ACCELERATE_USE_IPEX"] = "false" + if self.use_cpu or strtobool(os.environ.get("ACCELERATE_USE_CPU", "False")): + self.distributed_state = PartialState(cpu=True, backend=self.ddp_backend) + self._n_gpu = 0 + elif is_sagemaker_mp_enabled(): + local_rank = smp.local_rank() + device = torch.device("cuda", local_rank) + self._n_gpu = 1 + torch.cuda.set_device(device) + elif is_sagemaker_dp_enabled(): + self.distributed_state = PartialState(_use_sagemaker_dp=True) + self._n_gpu = 1 + elif self.deepspeed: + # Need to do similar for Accelerator init + os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" + self.distributed_state = PartialState(timeout=timedelta(seconds=self.ddp_timeout)) + del os.environ["ACCELERATE_USE_DEEPSPEED"] + self._n_gpu = 1 + else: + self.distributed_state = PartialState( + backend=self.ddp_backend, timeout=timedelta(seconds=self.ddp_timeout) + ) + self._n_gpu = 1 + if not is_sagemaker_mp_enabled(): + device = self.distributed_state.device + self.local_rank = self.distributed_state.local_process_index + if dist.is_available() and dist.is_initialized() and self.parallel_mode != ParallelMode.DISTRIBUTED: + logger.warning( + "torch.distributed process group is initialized, but parallel_mode != ParallelMode.DISTRIBUTED. " + "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" + ) + if is_torch_xla_available(): + device = self.distributed_state.device + self._n_gpu = 0 + elif is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled(): + # Already set _n_gpu + pass + elif self.distributed_state.distributed_type == DistributedType.NO: + if self.use_mps_device: + warnings.warn( + "`use_mps_device` is deprecated and will be removed in version 5.0 of 🤗 Transformers. " + "`mps` device will be used by default if available similar to the way `cuda` device is used." + "Therefore, no action from user is required. " + ) + if device.type != "mps": + raise ValueError( + "Either you do not have an MPS-enabled device on this machine or MacOS version is not 12.3+ " + "or current PyTorch install was not built with MPS enabled." + ) + if device.type == "mps": + self._n_gpu = 1 + elif self.use_cpu: + device = torch.device("cpu") + self._n_gpu = 0 + elif is_torch_xpu_available(): + device = torch.device("xpu:0") + torch.xpu.set_device(device) + self._n_gpu = 1 + elif is_torch_mlu_available(): + device = torch.device("mlu:0") + torch.mlu.set_device(device) + self._n_gpu = 1 + elif is_torch_npu_available(): + device = torch.device("npu:0") + torch.npu.set_device(device) + self._n_gpu = 1 + else: + # if n_gpu is > 1 we'll use nn.DataParallel. + # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` + # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will + # trigger an error that a device index is missing. Index 0 takes into account the + # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` + # will use the first GPU in that env, i.e. GPU#1 + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at + # the default value. + self._n_gpu = torch.cuda.device_count() + if device.type == "cuda": + torch.cuda.set_device(device) + return device + + @property + def device(self) -> "torch.device": + """ + The device used by this process. + """ + requires_backends(self, ["torch"]) + return self._setup_devices + + @property + def n_gpu(self): + """ + The number of GPUs used by this process. + + Note: + This will only be greater than one when you have multiple GPUs available but are not using distributed + training. For distributed training, it will always be 1. + """ + requires_backends(self, ["torch"]) + # Make sure `self._n_gpu` is properly setup. + if not hasattr(self, "_n_gpu"): + _ = self._setup_devices + return self._n_gpu + + @property + def parallel_mode(self): + """ + The current mode used for parallelism if multiple GPUs/TPU cores are available. One of: + + - `ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU). + - `ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses `torch.nn.DataParallel`). + - `ParallelMode.DISTRIBUTED`: several GPUs, each having its own process (uses + `torch.nn.DistributedDataParallel`). + - `ParallelMode.TPU`: several TPU cores. + """ + requires_backends(self, ["torch"]) + if is_torch_xla_available(): + return ParallelMode.TPU + elif is_sagemaker_mp_enabled(): + return ParallelMode.SAGEMAKER_MODEL_PARALLEL + elif is_sagemaker_dp_enabled(): + return ParallelMode.SAGEMAKER_DATA_PARALLEL + elif ( + self.distributed_state is not None and self.distributed_state.distributed_type != DistributedType.NO + ) or (self.distributed_state is None and self.local_rank != -1): + return ParallelMode.DISTRIBUTED + elif self.n_gpu > 1: + return ParallelMode.NOT_DISTRIBUTED + else: + return ParallelMode.NOT_PARALLEL + + @property + def world_size(self): + """ + The number of processes used in parallel. + """ + requires_backends(self, ["torch"]) + if self.distributed_state is not None: + return self.distributed_state.num_processes + elif is_sagemaker_mp_enabled(): + return smp.dp_size() if not smp.state.cfg.prescaled_batch else smp.rdp_size() + return 1 + + @property + def process_index(self): + """ + The index of the current process used. + """ + requires_backends(self, ["torch"]) + if self.distributed_state is not None: + return self.distributed_state.process_index + elif is_sagemaker_mp_enabled(): + return smp.dp_rank() if not smp.state.cfg.prescaled_batch else smp.rdp_rank() + return 0 + + @property + def local_process_index(self): + """ + The index of the local process used. + """ + requires_backends(self, ["torch"]) + + if self.distributed_state is not None: + return self.distributed_state.local_process_index + elif is_sagemaker_mp_enabled(): + return smp.local_rank() + return 0 + + @property + def should_log(self): + """ + Whether or not the current process should produce log. + """ + if self.log_on_each_node: + return self.local_process_index == 0 + else: + if is_sagemaker_mp_enabled(): + return smp.rank() == 0 + else: + return self.process_index == 0 + + @property + def should_save(self): + """ + Whether or not the current process should write to disk, e.g., to save models and checkpoints. + """ + if self.save_on_each_node: + return self.local_process_index == 0 + else: + if is_sagemaker_mp_enabled(): + return smp.rank() == 0 + else: + return self.process_index == 0 + + def get_process_log_level(self): + """ + Returns the log level to be used depending on whether this process is the main process of node 0, main process + of node non-0, or a non-main process. + + For the main process the log level defaults to the logging level set (`logging.WARNING` if you didn't do + anything) unless overridden by `log_level` argument. + + For the replica processes the log level defaults to `logging.WARNING` unless overridden by `log_level_replica` + argument. + + The choice between the main and replica process settings is made according to the return value of `should_log`. + """ + + # convert to int + log_level = trainer_log_levels[self.log_level] + log_level_replica = trainer_log_levels[self.log_level_replica] + + log_level_main_node = logging.get_verbosity() if log_level == -1 else log_level + log_level_replica_node = logging.get_verbosity() if log_level_replica == -1 else log_level_replica + return log_level_main_node if self.should_log else log_level_replica_node + + @property + def place_model_on_device(self): + """ + Can be subclassed and overridden for some specific integrations. + """ + return not is_sagemaker_mp_enabled() + + @property + def _no_sync_in_gradient_accumulation(self): + """ + Whether or not to use no_sync for the gradients when doing gradient accumulation. + """ + return not ( + self.deepspeed or is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled() or is_torch_neuroncore_available() + ) + + @contextlib.contextmanager + def main_process_first(self, local=True, desc="work"): + """ + A context manager for torch distributed environment where on needs to do something on the main process, while + blocking replicas, and when it's finished releasing the replicas. + + One such use is for `datasets`'s `map` feature which to be efficient should be run once on the main process, + which upon completion saves a cached version of results and which then automatically gets loaded by the + replicas. + + Args: + local (`bool`, *optional*, defaults to `True`): + if `True` first means process of rank 0 of each node if `False` first means process of rank 0 of node + rank 0 In multi-node environment with a shared filesystem you most likely will want to use + `local=False` so that only the main process of the first node will do the processing. If however, the + filesystem is not shared, then the main process of each node will need to do the processing, which is + the default behavior. + desc (`str`, *optional*, defaults to `"work"`): + a work description to be used in debug logs + + """ + if is_torch_available() and self.world_size > 1: + main_process_desc = "main local process" if local else "main process" + if self.distributed_state is not None: + is_main_process = ( + self.distributed_state.is_local_main_process if local else self.distributed_state.is_main_process + ) + elif is_sagemaker_mp_enabled(): + is_main_process = smp.rank() == 0 + + try: + if not is_main_process: + # tell all replicas to wait + logger.debug(f"{self.process_index}: waiting for the {main_process_desc} to perform {desc}") + + if is_torch_xla_available(): + xm.rendezvous(desc) + else: + dist.barrier() + yield + finally: + if is_main_process: + # the wait is over + logger.debug(f"{self.process_index}: {main_process_desc} completed {desc}, releasing all replicas") + if is_torch_xla_available(): + xm.rendezvous(desc) + else: + dist.barrier() + else: + yield + + def get_warmup_steps(self, num_training_steps: int): + """ + Get number of steps used for a linear warmup. + """ + warmup_steps = ( + self.warmup_steps if self.warmup_steps > 0 else math.ceil(num_training_steps * self.warmup_ratio) + ) + return warmup_steps + + def to_dict(self): + """ + Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates + the token values by removing their value. + """ + # filter out fields that are defined as field(init=False) + d = {field.name: getattr(self, field.name) for field in fields(self) if field.init} + + for k, v in d.items(): + if isinstance(v, Enum): + d[k] = v.value + if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): + d[k] = [x.value for x in v] + if k.endswith("_token"): + d[k] = f"<{k.upper()}>" + # Handle the accelerator_config if passed + if is_accelerate_available() and isinstance(v, AcceleratorConfig): + d[k] = v.to_dict() + return d + + def to_json_string(self): + """ + Serializes this instance to a JSON string. + """ + return json.dumps(self.to_dict(), indent=2) + + def to_sanitized_dict(self) -> Dict[str, Any]: + """ + Sanitized serialization to use with TensorBoard’s hparams + """ + d = self.to_dict() + d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}} + + valid_types = [bool, int, float, str] + if is_torch_available(): + valid_types.append(torch.Tensor) + + return {k: v if type(v) in valid_types else str(v) for k, v in d.items()} + + # The following methods are there to simplify the instantiation of `TrainingArguments` + def set_training( + self, + learning_rate: float = 5e-5, + batch_size: int = 8, + weight_decay: float = 0, + num_epochs: float = 3, + max_steps: int = -1, + gradient_accumulation_steps: int = 1, + seed: int = 42, + gradient_checkpointing: bool = False, + ): + """ + A method that regroups all basic arguments linked to the training. + + + + Calling this method will automatically set `self.do_train` to `True`. + + + + Args: + learning_rate (`float`, *optional*, defaults to 5e-5): + The initial learning rate for the optimizer. + batch_size (`int` *optional*, defaults to 8): + The batch size per device (GPU/TPU core/CPU...) used for training. + weight_decay (`float`, *optional*, defaults to 0): + The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in the + optimizer. + num_train_epochs(`float`, *optional*, defaults to 3.0): + Total number of training epochs to perform (if not an integer, will perform the decimal part percents + of the last epoch before stopping training). + max_steps (`int`, *optional*, defaults to -1): + If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. + gradient_accumulation_steps (`int`, *optional*, defaults to 1): + Number of updates steps to accumulate the gradients for, before performing a backward/update pass. + + + + When using gradient accumulation, one step is counted as one step with backward pass. Therefore, + logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training + examples. + + + + seed (`int`, *optional*, defaults to 42): + Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use + the [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized + parameters. + gradient_checkpointing (`bool`, *optional*, defaults to `False`): + If True, use gradient checkpointing to save memory at the expense of slower backward pass. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_training(learning_rate=1e-4, batch_size=32) + >>> args.learning_rate + 1e-4 + ``` + """ + self.do_train = True + self.learning_rate = learning_rate + self.per_device_train_batch_size = batch_size + self.weight_decay = weight_decay + self.num_train_epochs = num_epochs + self.max_steps = max_steps + self.gradient_accumulation_steps = gradient_accumulation_steps + self.seed = seed + self.gradient_checkpointing = gradient_checkpointing + return self + + def set_evaluate( + self, + strategy: Union[str, IntervalStrategy] = "no", + steps: int = 500, + batch_size: int = 8, + accumulation_steps: Optional[int] = None, + delay: Optional[float] = None, + loss_only: bool = False, + jit_mode: bool = False, + ): + """ + A method that regroups all arguments linked to evaluation. + + Args: + strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): + The evaluation strategy to adopt during training. Possible values are: + + - `"no"`: No evaluation is done during training. + - `"steps"`: Evaluation is done (and logged) every `steps`. + - `"epoch"`: Evaluation is done at the end of each epoch. + + Setting a `strategy` different from `"no"` will set `self.do_eval` to `True`. + steps (`int`, *optional*, defaults to 500): + Number of update steps between two evaluations if `strategy="steps"`. + batch_size (`int` *optional*, defaults to 8): + The batch size per device (GPU/TPU core/CPU...) used for evaluation. + accumulation_steps (`int`, *optional*): + Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. + If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster + but requires more memory). + delay (`float`, *optional*): + Number of epochs or steps to wait for before the first evaluation can be performed, depending on the + evaluation_strategy. + loss_only (`bool`, *optional*, defaults to `False`): + Ignores all outputs except the loss. + jit_mode (`bool`, *optional*): + Whether or not to use PyTorch jit trace for inference. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_evaluate(strategy="steps", steps=100) + >>> args.eval_steps + 100 + ``` + """ + self.evaluation_strategy = IntervalStrategy(strategy) + if self.evaluation_strategy == IntervalStrategy.STEPS and steps == 0: + raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") + self.do_eval = self.evaluation_strategy != IntervalStrategy.NO + self.eval_steps = steps + self.per_device_eval_batch_size = batch_size + self.eval_accumulation_steps = accumulation_steps + self.eval_delay = delay + self.prediction_loss_only = loss_only + self.jit_mode_eval = jit_mode + return self + + def set_testing( + self, + batch_size: int = 8, + loss_only: bool = False, + jit_mode: bool = False, + ): + """ + A method that regroups all basic arguments linked to testing on a held-out dataset. + + + + Calling this method will automatically set `self.do_predict` to `True`. + + + + Args: + batch_size (`int` *optional*, defaults to 8): + The batch size per device (GPU/TPU core/CPU...) used for testing. + loss_only (`bool`, *optional*, defaults to `False`): + Ignores all outputs except the loss. + jit_mode (`bool`, *optional*): + Whether or not to use PyTorch jit trace for inference. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_testing(batch_size=32) + >>> args.per_device_eval_batch_size + 32 + ``` + """ + self.do_predict = True + self.per_device_eval_batch_size = batch_size + self.prediction_loss_only = loss_only + self.jit_mode_eval = jit_mode + return self + + def set_save( + self, + strategy: Union[str, IntervalStrategy] = "steps", + steps: int = 500, + total_limit: Optional[int] = None, + on_each_node: bool = False, + ): + """ + A method that regroups all arguments linked to checkpoint saving. + + Args: + strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): + The checkpoint save strategy to adopt during training. Possible values are: + + - `"no"`: No save is done during training. + - `"epoch"`: Save is done at the end of each epoch. + - `"steps"`: Save is done every `save_steps`. + + steps (`int`, *optional*, defaults to 500): + Number of updates steps before two checkpoint saves if `strategy="steps"`. + total_limit (`int`, *optional*): + If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in + `output_dir`. + on_each_node (`bool`, *optional*, defaults to `False`): + When doing multi-node distributed training, whether to save models and checkpoints on each node, or + only on the main one. + + This should not be activated when the different nodes use the same storage as the files will be saved + with the same names for each node. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_save(strategy="steps", steps=100) + >>> args.save_steps + 100 + ``` + """ + self.save_strategy = IntervalStrategy(strategy) + if self.save_strategy == IntervalStrategy.STEPS and steps == 0: + raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") + self.save_steps = steps + self.save_total_limit = total_limit + self.save_on_each_node = on_each_node + return self + + def set_logging( + self, + strategy: Union[str, IntervalStrategy] = "steps", + steps: int = 500, + report_to: Union[str, List[str]] = "none", + level: str = "passive", + first_step: bool = False, + nan_inf_filter: bool = False, + on_each_node: bool = False, + replica_level: str = "passive", + ): + """ + A method that regroups all arguments linked to logging. + + Args: + strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): + The logging strategy to adopt during training. Possible values are: + + - `"no"`: No logging is done during training. + - `"epoch"`: Logging is done at the end of each epoch. + - `"steps"`: Logging is done every `logging_steps`. + + steps (`int`, *optional*, defaults to 500): + Number of update steps between two logs if `strategy="steps"`. + level (`str`, *optional*, defaults to `"passive"`): + Logger log level to use on the main process. Possible choices are the log levels as strings: `"debug"`, + `"info"`, `"warning"`, `"error"` and `"critical"`, plus a `"passive"` level which doesn't set anything + and lets the application set the level. + report_to (`str` or `List[str]`, *optional*, defaults to `"all"`): + The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, + `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, + `"neptune"`, `"tensorboard"`, and `"wandb"`. Use `"all"` to report to all integrations installed, + `"none"` for no integrations. + first_step (`bool`, *optional*, defaults to `False`): + Whether to log and evaluate the first `global_step` or not. + nan_inf_filter (`bool`, *optional*, defaults to `True`): + Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is + `nan` or `inf` is filtered and the average loss of the current logging window is taken instead. + + + + `nan_inf_filter` only influences the logging of loss values, it does not change the behavior the + gradient is computed or applied to the model. + + + + on_each_node (`bool`, *optional*, defaults to `True`): + In multinode distributed training, whether to log using `log_level` once per node, or only on the main + node. + replica_level (`str`, *optional*, defaults to `"passive"`): + Logger log level to use on replicas. Same choices as `log_level` + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_logging(strategy="steps", steps=100) + >>> args.logging_steps + 100 + ``` + """ + self.logging_strategy = IntervalStrategy(strategy) + if self.logging_strategy == IntervalStrategy.STEPS and steps == 0: + raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") + self.logging_steps = steps + self.report_to = report_to + self.log_level = level + self.logging_first_step = first_step + self.logging_nan_inf_filter = nan_inf_filter + self.log_on_each_node = on_each_node + self.log_level_replica = replica_level + return self + + def set_push_to_hub( + self, + model_id: str, + strategy: Union[str, HubStrategy] = "every_save", + token: Optional[str] = None, + private_repo: bool = False, + always_push: bool = False, + ): + """ + A method that regroups all arguments linked to synchronizing checkpoints with the Hub. + + + + Calling this method will set `self.push_to_hub` to `True`, which means the `output_dir` will begin a git + directory synced with the repo (determined by `model_id`) and the content will be pushed each time a save is + triggered (depending on`self.save_strategy`). Calling [`~Trainer.save_model`] will also trigger a push. + + + + Args: + model_id (`str`): + The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in + which case the model will be pushed in your namespace. Otherwise it should be the whole repository + name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of + with `"organization_name/model"`. + strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): + Defines the scope of what is pushed to the Hub and when. Possible values are: + + - `"end"`: push the model, its configuration, the tokenizer (if passed along to the [`Trainer`]) and a + draft of a model card when the [`~Trainer.save_model`] method is called. + - `"every_save"`: push the model, its configuration, the tokenizer (if passed along to the [`Trainer`]) + and + a draft of a model card each time there is a model save. The pushes are asynchronous to not block + training, and in case the save are very frequent, a new push is only attempted if the previous one is + finished. A last push is made with the final model at the end of training. + - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named + last-checkpoint, allowing you to resume training easily with + `trainer.train(resume_from_checkpoint="last-checkpoint")`. + - `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the + output + folder (so you will get one checkpoint folder per folder in your final repository) + + token (`str`, *optional*): + The token to use to push the model to the Hub. Will default to the token in the cache folder obtained + with `huggingface-cli login`. + private_repo (`bool`, *optional*, defaults to `False`): + If True, the Hub repo will be set to private. + always_push (`bool`, *optional*, defaults to `False`): + Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not + finished. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_push_to_hub("me/awesome-model") + >>> args.hub_model_id + 'me/awesome-model' + ``` + """ + self.push_to_hub = True + self.hub_model_id = model_id + self.hub_strategy = HubStrategy(strategy) + self.hub_token = token + self.hub_private_repo = private_repo + self.hub_always_push = always_push + return self + + def set_optimizer( + self, + name: Union[str, OptimizerNames] = "adamw_torch", + learning_rate: float = 5e-5, + weight_decay: float = 0, + beta1: float = 0.9, + beta2: float = 0.999, + epsilon: float = 1e-8, + args: Optional[str] = None, + ): + """ + A method that regroups all arguments linked to the optimizer and its hyperparameters. + + Args: + name (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): + The optimizer to use: `"adamw_hf"`, `"adamw_torch"`, `"adamw_torch_fused"`, `"adamw_apex_fused"`, + `"adamw_anyprecision"` or `"adafactor"`. + learning_rate (`float`, *optional*, defaults to 5e-5): + The initial learning rate. + weight_decay (`float`, *optional*, defaults to 0): + The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights. + beta1 (`float`, *optional*, defaults to 0.9): + The beta1 hyperparameter for the adam optimizer or its variants. + beta2 (`float`, *optional*, defaults to 0.999): + The beta2 hyperparameter for the adam optimizer or its variants. + epsilon (`float`, *optional*, defaults to 1e-8): + The epsilon hyperparameter for the adam optimizer or its variants. + args (`str`, *optional*): + Optional arguments that are supplied to AnyPrecisionAdamW (only useful when + `optim="adamw_anyprecision"`). + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_optimizer(name="adamw_torch", beta1=0.8) + >>> args.optim + 'adamw_torch' + ``` + """ + self.optim = OptimizerNames(name) + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.adam_beta1 = beta1 + self.adam_beta2 = beta2 + self.adam_epsilon = epsilon + self.optim_args = args + return self + + def set_lr_scheduler( + self, + name: Union[str, SchedulerType] = "linear", + num_epochs: float = 3.0, + max_steps: int = -1, + warmup_ratio: float = 0, + warmup_steps: int = 0, + ): + """ + A method that regroups all arguments linked to the learning rate scheduler and its hyperparameters. + + Args: + name (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): + The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. + num_epochs(`float`, *optional*, defaults to 3.0): + Total number of training epochs to perform (if not an integer, will perform the decimal part percents + of the last epoch before stopping training). + max_steps (`int`, *optional*, defaults to -1): + If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. + warmup_ratio (`float`, *optional*, defaults to 0.0): + Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. + warmup_steps (`int`, *optional*, defaults to 0): + Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of + `warmup_ratio`. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_lr_scheduler(name="cosine", warmup_ratio=0.05) + >>> args.warmup_ratio + 0.05 + ``` + """ + self.lr_scheduler_type = SchedulerType(name) + self.num_train_epochs = num_epochs + self.max_steps = max_steps + self.warmup_ratio = warmup_ratio + self.warmup_steps = warmup_steps + return self + + def set_dataloader( + self, + train_batch_size: int = 8, + eval_batch_size: int = 8, + drop_last: bool = False, + num_workers: int = 0, + pin_memory: bool = True, + persistent_workers: bool = False, + prefetch_factor: Optional[int] = None, + auto_find_batch_size: bool = False, + ignore_data_skip: bool = False, + sampler_seed: Optional[int] = None, + ): + """ + A method that regroups all arguments linked to the dataloaders creation. + + Args: + drop_last (`bool`, *optional*, defaults to `False`): + Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch + size) or not. + num_workers (`int`, *optional*, defaults to 0): + Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in + the main process. + pin_memory (`bool`, *optional*, defaults to `True`): + Whether you want to pin memory in data loaders or not. Will default to `True`. + persistent_workers (`bool`, *optional*, defaults to `False`): + If True, the data loader will not shut down the worker processes after a dataset has been consumed + once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, + but will increase RAM usage. Will default to `False`. + prefetch_factor (`int`, *optional*): + Number of batches loaded in advance by each worker. + 2 means there will be a total of 2 * num_workers batches prefetched across all workers. + auto_find_batch_size (`bool`, *optional*, defaults to `False`) + Whether to find a batch size that will fit into memory automatically through exponential decay, + avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) + ignore_data_skip (`bool`, *optional*, defaults to `False`): + When resuming training, whether or not to skip the epochs and batches to get the data loading at the + same stage as in the previous training. If set to `True`, the training will begin faster (as that + skipping step can take a long time) but will not yield the same results as the interrupted training + would have. + sampler_seed (`int`, *optional*): + Random seed to be used with data samplers. If not set, random generators for data sampling will use the + same seed as `self.seed`. This can be used to ensure reproducibility of data sampling, independent of + the model seed. + + Example: + + ```py + >>> from transformers import TrainingArguments + + >>> args = TrainingArguments("working_dir") + >>> args = args.set_dataloader(train_batch_size=16, eval_batch_size=64) + >>> args.per_device_train_batch_size + 16 + ``` + """ + self.per_device_train_batch_size = train_batch_size + self.per_device_eval_batch_size = eval_batch_size + self.dataloader_drop_last = drop_last + self.dataloader_num_workers = num_workers + self.dataloader_pin_memory = pin_memory + self.dataloader_persistent_workers = persistent_workers + self.dataloader_prefetch_factor = prefetch_factor + self.auto_find_batch_size = auto_find_batch_size + self.ignore_data_skip = ignore_data_skip + self.data_seed = sampler_seed + return self + + +class ParallelMode(Enum): + NOT_PARALLEL = "not_parallel" + NOT_DISTRIBUTED = "not_distributed" + DISTRIBUTED = "distributed" + SAGEMAKER_MODEL_PARALLEL = "sagemaker_model_parallel" + SAGEMAKER_DATA_PARALLEL = "sagemaker_data_parallel" + TPU = "tpu" diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/training_args_seq2seq.py b/llmeval-env/lib/python3.10/site-packages/transformers/training_args_seq2seq.py new file mode 100644 index 0000000000000000000000000000000000000000..88ae662570abefe5c6908d1b51510932264a6067 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/training_args_seq2seq.py @@ -0,0 +1,96 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional, Union + +from .generation.configuration_utils import GenerationConfig +from .training_args import TrainingArguments +from .utils import add_start_docstrings + + +logger = logging.getLogger(__name__) + + +@dataclass +@add_start_docstrings(TrainingArguments.__doc__) +class Seq2SeqTrainingArguments(TrainingArguments): + """ + Args: + sortish_sampler (`bool`, *optional*, defaults to `False`): + Whether to use a *sortish sampler* or not. Only possible if the underlying datasets are *Seq2SeqDataset* + for now but will become generally available in the near future. + + It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness + for the training set. + predict_with_generate (`bool`, *optional*, defaults to `False`): + Whether to use generate to calculate generative metrics (ROUGE, BLEU). + generation_max_length (`int`, *optional*): + The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the + `max_length` value of the model configuration. + generation_num_beams (`int`, *optional*): + The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the + `num_beams` value of the model configuration. + generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*): + Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. + - a [`~generation.GenerationConfig`] object. + """ + + sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) + predict_with_generate: bool = field( + default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} + ) + generation_max_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " + "to the `max_length` value of the model configuration." + ) + }, + ) + generation_num_beams: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " + "to the `num_beams` value of the model configuration." + ) + }, + ) + generation_config: Optional[Union[str, Path, GenerationConfig]] = field( + default=None, + metadata={ + "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." + }, + ) + + def to_dict(self): + """ + Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON + serialization support). It obfuscates the token values by removing their value. + """ + # filter out fields that are defined as field(init=False) + d = super().to_dict() + for k, v in d.items(): + if isinstance(v, GenerationConfig): + d[k] = v.to_dict() + return d