diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__init__.py b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb36263b095ebba9e9a1150f9c38f6ac6019be8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__init__.py @@ -0,0 +1,13 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._func import detect_file_encoding +from ._mbstrdecoder import MultiByteStrDecoder + + +__all__ = ( + "detect_file_encoding", + "MultiByteStrDecoder", +) diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78834f5b91bb6acb86379212b6ee88d37e35b7ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1acb83d13e6d6f8bcf8ffb845c9e60f3b4011c15 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e8d7f3d38d7c407aa7b55a70e5c80eb699accab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ed5489e6cdd08a1a87a2cfb56afd0639d38333b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f3a95cb0f86b8978951649b7eb9803fbf3bccba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__version__.py b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..1047f5e5099e9ee9c18279e8520927f558c5b26b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/__version__.py @@ -0,0 +1,6 @@ +__author__ = "Tsuyoshi Hombashi" +__copyright__ = f"Copyright 2016, {__author__}" +__license__ = "MIT License" +__version__ = "1.1.3" +__maintainer__ = __author__ +__email__ = "tsuyoshi.hombashi@gmail.com" diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..217e29c9afdd9fb4949855381f76c0516902b784 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py @@ -0,0 +1,264 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import os.path + + +# list from https://github.com/sindresorhus/binary-extensions +binary_exts = ( + "3dm", + "3ds", + "3g2", + "3gp", + "7z", + "a", + "aac", + "adp", + "ai", + "aif", + "aiff", + "alz", + "ape", + "apk", + "ar", + "arj", + "asf", + "au", + "avi", + "bak", + "baml", + "bh", + "bin", + "bk", + "bmp", + "btif", + "bz2", + "bzip2", + "cab", + "caf", + "cgm", + "class", + "cmx", + "cpio", + "cr2", + "cur", + "dat", + "dcm", + "deb", + "dex", + "djvu", + "dll", + "dmg", + "dng", + "doc", + "docm", + "docx", + "dot", + "dotm", + "dra", + "DS_Store", + "dsk", + "dts", + "dtshd", + "dvb", + "dwg", + "dxf", + "ecelp4800", + "ecelp7470", + "ecelp9600", + "egg", + "eol", + "eot", + "epub", + "exe", + "f4v", + "fbs", + "fh", + "fla", + "flac", + "fli", + "flv", + "fpx", + "fst", + "fvt", + "g3", + "gh", + "gif", + "graffle", + "gz", + "gzip", + "h261", + "h263", + "h264", + "icns", + "ico", + "ief", + "img", + "ipa", + "iso", + "jar", + "jpeg", + "jpg", + "jpgv", + "jpm", + "jxr", + "key", + "ktx", + "lha", + "lib", + "lvp", + "lz", + "lzh", + "lzma", + "lzo", + "m3u", + "m4a", + "m4v", + "mar", + "mdi", + "mht", + "mid", + "midi", + "mj2", + "mka", + "mkv", + "mmr", + "mng", + "mobi", + "mov", + "movie", + "mp3", + "mp4", + "mp4a", + "mpeg", + "mpg", + "mpga", + "mxu", + "nef", + "npx", + "numbers", + "nupkg", + "o", + "oga", + "ogg", + "ogv", + "otf", + "pages", + "pbm", + "pcx", + "pdb", + "pdf", + "pea", + "pgm", + "pic", + "png", + "pnm", + "pot", + "potm", + "potx", + "ppa", + "ppam", + "ppm", + "pps", + "ppsm", + "ppsx", + "ppt", + "pptm", + "pptx", + "psd", + "pya", + "pyc", + "pyo", + "pyv", + "qt", + "rar", + "ras", + "raw", + "resources", + "rgb", + "rip", + "rlc", + "rmf", + "rmvb", + "rtf", + "rz", + "s3m", + "s7z", + "scpt", + "sgi", + "shar", + "sil", + "sketch", + "slk", + "smv", + "snk", + "so", + "stl", + "suo", + "sub", + "swf", + "tar", + "tbz", + "tbz2", + "tga", + "tgz", + "thmx", + "tif", + "tiff", + "tlz", + "ttc", + "ttf", + "txz", + "udf", + "uvh", + "uvi", + "uvm", + "uvp", + "uvs", + "uvu", + "viv", + "vob", + "war", + "wav", + "wax", + "wbmp", + "wdp", + "weba", + "webm", + "webp", + "whl", + "wim", + "wm", + "wma", + "wmv", + "wmx", + "woff", + "woff2", + "wrm", + "wvx", + "xbm", + "xif", + "xla", + "xlam", + "xls", + "xlsb", + "xlsm", + "xlsx", + "xlt", + "xltm", + "xltx", + "xm", + "xmind", + "xpi", + "xpm", + "xwd", + "xz", + "z", + "zip", + "zipx", +) + + +def is_binary_ext_path(filepath) -> bool: + return os.path.splitext(filepath)[1].lstrip(".") in binary_exts diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_func.py b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_func.py new file mode 100644 index 0000000000000000000000000000000000000000..c0e31dbecbd167e1426a7b73aaa91a0c63722878 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_func.py @@ -0,0 +1,56 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import os +import stat +from errno import EBADF, ENOENT, ENOTDIR +from typing import Optional, Union + +from ._binary_ext_checker import is_binary_ext_path + + +def is_fifo(file_path: Union[int, bytes, str]) -> bool: + try: + return stat.S_ISFIFO(os.stat(file_path).st_mode) + except OSError as e: + if e.errno not in (ENOENT, ENOTDIR, EBADF): + raise + + return False + except ValueError: + return False + + +def to_codec_name(name: Optional[str]) -> Optional[str]: + if not name: + return None + + return name.lower().replace("-", "_") + + +def detect_file_encoding(file_path) -> Optional[str]: + from chardet.universaldetector import UniversalDetector + + if not os.path.isfile(file_path) or is_binary_ext_path(file_path) or is_fifo(file_path): + return None + + detector = UniversalDetector() + READ_SIZE = 4 * 1024 + + try: + with open(file_path, mode="rb") as f: + while True: + binary = f.read(READ_SIZE) + if not binary: + break + + detector.feed(binary) + if detector.done: + break + except OSError: + return None + finally: + detector.close() + + return to_codec_name(detector.result.get("encoding")) diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed86446290bfde7b84da935b8b9b4beb9c38f99 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py @@ -0,0 +1,291 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import re +from typing import List, Optional, Sequence + +from ._func import to_codec_name + + +def b(s: str) -> bytes: + return s.encode("latin-1") + + +class MultiByteStrDecoder: + """ + Reference: + https://docs.python.org/3/library/codecs.html + """ + + __CODECS = [ + "utf_7", + "utf_8", + "utf_8_sig", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_32", + "utf_32_be", + "utf_32_le", + "big5", + "big5hkscs", + "cp037", + "cp424", + "cp437", + "cp500", + "cp720", + "cp737", + "cp775", + "cp850", + "cp852", + "cp855", + "cp856", + "cp857", + "cp858", + "cp860", + "cp861", + "cp862", + "cp863", + "cp864", + "cp865", + "cp866", + "cp869", + "cp874", + "cp875", + "cp932", + "cp949", + "cp950", + "cp1006", + "cp1026", + "cp1140", + "cp1250", + "cp1251", + "cp1252", + "cp1253", + "cp1254", + "cp1255", + "cp1256", + "cp1257", + "cp1258", + "euc_jp", + "euc_jis_2004", + "euc_jisx0213", + "euc_kr", + "gb2312", + "gbk", + "gb18030", + "hz", + "iso2022_jp", + "iso2022_jp_1", + "iso2022_jp_2", + "iso2022_jp_2004", + "iso2022_jp_3", + "iso2022_jp_ext", + "iso2022_kr", + "latin_1", + "iso8859_2", + "iso8859_3", + "iso8859_4", + "iso8859_5", + "iso8859_6", + "iso8859_7", + "iso8859_8", + "iso8859_9", + "iso8859_10", + "iso8859_11", + "iso8859_13", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "johab", + "koi8_r", + "koi8_u", + "mac_cyrillic", + "mac_greek", + "mac_iceland", + "mac_latin2", + "mac_roman", + "mac_turkish", + "ptcp154", + "shift_jis", + "shift_jis_2004", + "shift_jisx0213", + "base64_codec", + "bz2_codec", + "hex_codec", + "idna", + "mbcs", + "palmos", + "punycode", + "quopri_codec", + "raw_unicode_escape", + "rot_13", + "string_escape", + "unicode_escape", + "unicode_internal", + "uu_codec", + "zlib_codec", + ] + __RE_UTF7 = re.compile(b("[+].*?[-]")) + + @property + def unicode_str(self) -> str: + return self.__unicode_str + + @property + def codec(self) -> Optional[str]: + return self.__codec + + def __init__(self, value, codec_candidates: Optional[Sequence[str]] = None) -> None: + self.__encoded_str = value + self.__codec: Optional[str] = None + if codec_candidates is None: + self.__codec_candidate_list: List[str] = [] + else: + self.__codec_candidate_list = list(codec_candidates) + + self.__validate_str() + + self.__unicode_str = self.__to_unicode() + + def __repr__(self) -> str: + return f"codec={self.codec:s}, unicode={self.unicode_str:s}" + + def __validate_str(self) -> None: + if isinstance(self.__encoded_str, (str, bytes)): + return + + raise ValueError(f"value must be a string: actual={type(self.__encoded_str)}") + + def __is_buffer(self) -> bool: + return isinstance(self.__encoded_str, memoryview) + + def __is_multibyte_utf7(self, encoded_str) -> bool: + if self.__codec != "utf_7": + return False + + utf7_symbol_count = encoded_str.count(b("+")) + if utf7_symbol_count <= 0: + return False + + if utf7_symbol_count != encoded_str.count(b("-")): + return False + + return utf7_symbol_count == len(self.__RE_UTF7.findall(encoded_str)) + + def __get_encoded_str(self) -> str: + if self.__is_buffer(): + return str(self.__encoded_str) + + return self.__encoded_str + + @staticmethod + def __detect_encoding_helper(encoded_str) -> Optional[str]: + import chardet + + try: + detect = chardet.detect(encoded_str) + except TypeError: + detect = {} # type: ignore + + detect_encoding = detect.get("encoding") + confidence = detect.get("confidence") + + if detect_encoding not in ["ascii", "utf-8"] and confidence and confidence > 0.7: + # utf7 tend to be misrecognized as ascii + return detect_encoding + + return None + + def __get_codec_candidate_list(self, encoded_str) -> List[str]: + codec_candidate_list = copy.deepcopy(self.__CODECS) + detect_encoding = self.__detect_encoding_helper(encoded_str) + + if detect_encoding: + try: + codec_candidate_list.remove(detect_encoding) + except ValueError: + pass + + codec_candidate_list.insert(0, detect_encoding) + + for codec_candidate in self.__codec_candidate_list: + try: + codec_candidate_list.remove(codec_candidate) + except ValueError: + pass + + return self.__codec_candidate_list + codec_candidate_list + + def __to_unicode(self): + encoded_str = self.__get_encoded_str() + + if encoded_str == b"": + self.__codec = "unicode" + return "" + + for codec in self.__get_codec_candidate_list(encoded_str): + if not codec: + continue + + try: + self.__codec = to_codec_name(codec) + decoded_str = encoded_str.decode(codec) + break + except UnicodeDecodeError: + self.__codec = None + continue + except AttributeError: + if isinstance(encoded_str, str): + # already a unicode string (python 3) + self.__codec = "unicode" + + if not encoded_str: + return encoded_str + + return encoded_str + + self.__codec = None + + try: + return f"{encoded_str}" + except UnicodeDecodeError: + # some of the objects that cannot convertible to a string + # may reach this line + raise TypeError("argument must be a string") + else: + self.__codec = None + + try: + message = f"unknown codec: encoded_str={encoded_str}" + except UnicodeDecodeError: + message = f"unknown codec: value-type={type(encoded_str)}" + + raise UnicodeDecodeError(message) + + if self.codec == "utf_7": + return self.__process_utf7(encoded_str, decoded_str) + + return decoded_str + + def __process_utf7(self, encoded_str, decoded_str) -> str: + if not encoded_str: + self.__codec = "unicode" + + return encoded_str + + if self.__is_multibyte_utf7(encoded_str): + try: + decoded_str.encode("ascii") + + self.__codec = "ascii" + + return encoded_str.decode("ascii") + except UnicodeEncodeError: + return decoded_str + + self.__codec = "ascii" + + return encoded_str.decode("ascii") diff --git a/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/py.typed b/env-llmeval/lib/python3.10/site-packages/mbstrdecoder/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/peft/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee379166be720d029fde23df47580c1a7a49eb3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/__init__.py @@ -0,0 +1,90 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.10.0" + +from .auto import ( + AutoPeftModel, + AutoPeftModelForCausalLM, + AutoPeftModelForSequenceClassification, + AutoPeftModelForSeq2SeqLM, + AutoPeftModelForTokenClassification, + AutoPeftModelForQuestionAnswering, + AutoPeftModelForFeatureExtraction, +) +from .mapping import ( + MODEL_TYPE_TO_PEFT_MODEL_MAPPING, + PEFT_TYPE_TO_CONFIG_MAPPING, + get_peft_config, + get_peft_model, + inject_adapter_in_model, +) +from .mixed_model import PeftMixedModel +from .peft_model import ( + PeftModel, + PeftModelForCausalLM, + PeftModelForSeq2SeqLM, + PeftModelForSequenceClassification, + PeftModelForTokenClassification, + PeftModelForQuestionAnswering, + PeftModelForFeatureExtraction, +) +from .tuners import ( + AdaptionPromptConfig, + AdaptionPromptModel, + LoraConfig, + LoftQConfig, + LoraModel, + LoHaConfig, + LoHaModel, + LoKrConfig, + LoKrModel, + IA3Config, + IA3Model, + AdaLoraConfig, + AdaLoraModel, + PrefixEncoder, + PrefixTuningConfig, + PromptEmbedding, + PromptEncoder, + PromptEncoderConfig, + PromptEncoderReparameterizationType, + PromptTuningConfig, + PromptTuningInit, + MultitaskPromptTuningConfig, + MultitaskPromptTuningInit, + OFTConfig, + OFTModel, + PolyConfig, + PolyModel, +) +from .utils import ( + TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, + PeftType, + TaskType, + bloom_model_postprocess_past_key_value, + get_peft_model_state_dict, + prepare_model_for_kbit_training, + replace_lora_weights_loftq, + set_peft_model_state_dict, + shift_tokens_right, + load_peft_weights, + cast_mixed_precision_params, +) +from .config import PeftConfig, PromptLearningConfig diff --git a/env-llmeval/lib/python3.10/site-packages/peft/auto.py b/env-llmeval/lib/python3.10/site-packages/peft/auto.py new file mode 100644 index 0000000000000000000000000000000000000000..353c9e2f84c48bd61194102da6d6e83dfdcd42db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/auto.py @@ -0,0 +1,170 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +import os +from typing import Optional + +from transformers import ( + AutoModel, + AutoModelForCausalLM, + AutoModelForQuestionAnswering, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForTokenClassification, + AutoTokenizer, +) + +from .config import PeftConfig +from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING +from .peft_model import ( + PeftModel, + PeftModelForCausalLM, + PeftModelForFeatureExtraction, + PeftModelForQuestionAnswering, + PeftModelForSeq2SeqLM, + PeftModelForSequenceClassification, + PeftModelForTokenClassification, +) +from .utils.constants import TOKENIZER_CONFIG_NAME +from .utils.other import check_file_exists_on_hf_hub + + +class _BaseAutoPeftModel: + _target_class = None + _target_peft_class = None + + def __init__(self, *args, **kwargs): + # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400 + raise EnvironmentError( # noqa: UP024 + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_config(config)` methods." + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path, + adapter_name: str = "default", + is_trainable: bool = False, + config: Optional[PeftConfig] = None, + **kwargs, + ): + r""" + A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs + are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and + the config object init. + """ + peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + base_model_path = peft_config.base_model_name_or_path + + task_type = getattr(peft_config, "task_type", None) + + if cls._target_class is not None: + target_class = cls._target_class + elif cls._target_class is None and task_type is not None: + # this is only in the case where we use `AutoPeftModel` + raise ValueError( + "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)" + ) + + if task_type is not None: + expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type] + if cls._target_peft_class.__name__ != expected_target_class.__name__: + raise ValueError( + f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }" + " make sure that you are loading the correct model for your task type." + ) + elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None: + auto_mapping = getattr(peft_config, "auto_mapping", None) + base_model_class = auto_mapping["base_model_class"] + parent_library_name = auto_mapping["parent_library"] + + parent_library = importlib.import_module(parent_library_name) + target_class = getattr(parent_library, base_model_class) + else: + raise ValueError( + "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type." + ) + + base_model = target_class.from_pretrained(base_model_path, **kwargs) + + tokenizer_exists = False + if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)): + tokenizer_exists = True + else: + token = kwargs.get("token", None) + if token is None: + token = kwargs.get("use_auth_token", None) + + tokenizer_exists = check_file_exists_on_hf_hub( + repo_id=pretrained_model_name_or_path, + filename=TOKENIZER_CONFIG_NAME, + revision=kwargs.get("revision", None), + repo_type=kwargs.get("repo_type", None), + token=token, + ) + + if tokenizer_exists: + tokenizer = AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False) + ) + base_model.resize_token_embeddings(len(tokenizer)) + + return cls._target_peft_class.from_pretrained( + base_model, + pretrained_model_name_or_path, + adapter_name=adapter_name, + is_trainable=is_trainable, + config=config, + **kwargs, + ) + + +class AutoPeftModel(_BaseAutoPeftModel): + _target_class = None + _target_peft_class = PeftModel + + +class AutoPeftModelForCausalLM(_BaseAutoPeftModel): + _target_class = AutoModelForCausalLM + _target_peft_class = PeftModelForCausalLM + + +class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel): + _target_class = AutoModelForSeq2SeqLM + _target_peft_class = PeftModelForSeq2SeqLM + + +class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel): + _target_class = AutoModelForSequenceClassification + _target_peft_class = PeftModelForSequenceClassification + + +class AutoPeftModelForTokenClassification(_BaseAutoPeftModel): + _target_class = AutoModelForTokenClassification + _target_peft_class = PeftModelForTokenClassification + + +class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel): + _target_class = AutoModelForQuestionAnswering + _target_peft_class = PeftModelForQuestionAnswering + + +class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel): + _target_class = AutoModel + _target_peft_class = PeftModelForFeatureExtraction diff --git a/env-llmeval/lib/python3.10/site-packages/peft/config.py b/env-llmeval/lib/python3.10/site-packages/peft/config.py new file mode 100644 index 0000000000000000000000000000000000000000..99aff43ca41b88ccb4ee8887b2969482d7fec936 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/config.py @@ -0,0 +1,270 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import json +import os +from dataclasses import asdict, dataclass, field +from typing import Dict, Optional, Union + +from huggingface_hub import hf_hub_download +from transformers.utils import PushToHubMixin + +from .utils import CONFIG_NAME, PeftType, TaskType + + +@dataclass +class PeftConfigMixin(PushToHubMixin): + r""" + This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all + PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to + push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a + directory. The method `from_pretrained` will load the configuration of your adapter model from a directory. + + Args: + peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. + """ + + peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."}) + auto_mapping: Optional[dict] = field( + default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."} + ) + + def to_dict(self) -> Dict: + r""" + Returns the configuration for your adapter model as a dictionary. + """ + return asdict(self) + + def save_pretrained(self, save_directory: str, **kwargs) -> None: + r""" + This method saves the configuration of your adapter model in a directory. + + Args: + save_directory (`str`): + The directory where the configuration will be saved. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`] + method. + """ + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + auto_mapping_dict = kwargs.pop("auto_mapping_dict", None) + + output_dict = asdict(self) + # converting set type to list + for key, value in output_dict.items(): + if isinstance(value, set): + output_dict[key] = list(value) + + output_path = os.path.join(save_directory, CONFIG_NAME) + + # Add auto mapping details for custom models. + if auto_mapping_dict is not None: + output_dict["auto_mapping"] = auto_mapping_dict + + # save it + with open(output_path, "w") as writer: + writer.write(json.dumps(output_dict, indent=2, sort_keys=True)) + + @classmethod + def from_peft_type(cls, **kwargs): + r""" + This method loads the configuration of your adapter model from a set of kwargs. + + The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided, + the calling class type is instantiated. + + Args: + kwargs (configuration keyword arguments): + Keyword arguments passed along to the configuration initialization. + """ + # Avoid circular dependency .. TODO: fix this with a larger refactor + from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING + + # TODO: this hack is needed to fix the following issue (on commit 702f937): + # if someone saves a default config and loads it back with `PeftConfig` class it yields to + # not loading the correct config class. + + # from peft import AdaLoraConfig, PeftConfig + # peft_config = AdaLoraConfig() + # print(peft_config) + # >>> AdaLoraConfig(peft_type=, auto_mapping=None, base_model_name_or_path=None, + # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ... + # + # peft_config.save_pretrained("./test_config") + # peft_config = PeftConfig.from_pretrained("./test_config") + # print(peft_config) + # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False) + + if "peft_type" in kwargs: + peft_type = kwargs["peft_type"] + config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] + else: + config_cls = cls + + return config_cls(**kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs): + r""" + This method loads the configuration of your adapter model from a directory. + + Args: + pretrained_model_name_or_path (`str`): + The directory or the Hub repository id where the configuration is saved. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments passed along to the child class initialization. + """ + path = ( + os.path.join(pretrained_model_name_or_path, subfolder) + if subfolder is not None + else pretrained_model_name_or_path + ) + + hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs) + + if os.path.isfile(os.path.join(path, CONFIG_NAME)): + config_file = os.path.join(path, CONFIG_NAME) + else: + try: + config_file = hf_hub_download( + pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs + ) + except Exception: + raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") + + loaded_attributes = cls.from_json_file(config_file) + kwargs = {**class_kwargs, **loaded_attributes} + return cls.from_peft_type(**kwargs) + + @classmethod + def from_json_file(cls, path_json_file: str, **kwargs): + r""" + Loads a configuration file from a json file. + + Args: + path_json_file (`str`): + The path to the json file. + """ + with open(path_json_file) as file: + json_object = json.load(file) + + return json_object + + @classmethod + def _split_kwargs(cls, kwargs): + hf_hub_download_kwargs = {} + class_kwargs = {} + other_kwargs = {} + + for key, value in kwargs.items(): + if key in inspect.signature(hf_hub_download).parameters: + hf_hub_download_kwargs[key] = value + elif key in list(cls.__annotations__): + class_kwargs[key] = value + else: + other_kwargs[key] = value + + return hf_hub_download_kwargs, class_kwargs, other_kwargs + + @classmethod + def _get_peft_type( + cls, + model_id: str, + **hf_hub_download_kwargs, + ): + subfolder = hf_hub_download_kwargs.get("subfolder", None) + + path = os.path.join(model_id, subfolder) if subfolder is not None else model_id + + if os.path.isfile(os.path.join(path, CONFIG_NAME)): + config_file = os.path.join(path, CONFIG_NAME) + else: + try: + config_file = hf_hub_download( + model_id, + CONFIG_NAME, + **hf_hub_download_kwargs, + ) + except Exception: + raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'") + + loaded_attributes = cls.from_json_file(config_file) + return loaded_attributes["peft_type"] + + @property + def is_prompt_learning(self) -> bool: + r""" + Utility method to check if the configuration is for prompt learning. + """ + return False + + @property + def is_adaption_prompt(self) -> bool: + """Return True if this is an adaption prompt config.""" + return False + + +@dataclass +class PeftConfig(PeftConfigMixin): + """ + This is the base configuration class to store the configuration of a [`PeftModel`]. + + Args: + peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. + task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. + inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. + """ + + base_model_name_or_path: Optional[str] = field( + default=None, metadata={"help": "The name of the base model to use."} + ) + revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) + peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) + task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) + inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) + + +@dataclass +class PromptLearningConfig(PeftConfig): + """ + This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or + [`PromptTuning`]. + + Args: + num_virtual_tokens (`int`): The number of virtual tokens to use. + token_dim (`int`): The hidden embedding dimension of the base transformer model. + num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model. + num_attention_heads (`int`): The number of attention heads in the base transformer model. + num_layers (`int`): The number of layers in the base transformer model. + """ + + num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"}) + token_dim: int = field( + default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"} + ) + num_transformer_submodules: Optional[int] = field( + default=None, metadata={"help": "Number of transformer submodules"} + ) + num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"}) + num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"}) + + @property + def is_prompt_learning(self) -> bool: + r""" + Utility method to check if the configuration is for prompt learning. + """ + return True diff --git a/env-llmeval/lib/python3.10/site-packages/peft/helpers.py b/env-llmeval/lib/python3.10/site-packages/peft/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..8875ff7fc493ae4dfff11a1d8e4485b330cb27dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/helpers.py @@ -0,0 +1,113 @@ +import inspect +from copy import deepcopy +from functools import update_wrapper +from types import MethodType + +from .peft_model import PeftModel + + +def update_forward_signature(model: PeftModel) -> None: + """ + Args: + Updates the forward signature of the PeftModel to include parents class signature + model (`PeftModel`): Peft model to update the forward signature + Example: + + ```python + >>> from transformers import WhisperForConditionalGeneration + >>> from peft import get_peft_model, LoraConfig, update_forward_signature + + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"]) + + >>> peft_model = get_peft_model(model, peft_config) + >>> update_forward_signature(peft_model) + ``` + """ + + # Only update signature when the current forward signature only has *args and **kwargs + current_signature = inspect.signature(model.forward) + if ( + len(current_signature.parameters) == 2 + and "args" in current_signature.parameters + and "kwargs" in current_signature.parameters + ): + forward = deepcopy(model.forward.__func__) + update_wrapper( + forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__") + ) + model.forward = MethodType(forward, model) + + +def update_generate_signature(model: PeftModel) -> None: + """ + Args: + Updates the generate signature of a PeftModel with overriding generate to include parents class signature + model (`PeftModel`): Peft model to update the generate signature + Example: + + ```python + >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer + >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature + + >>> model_name_or_path = "bigscience/mt0-large" + >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + + >>> peft_config = LoraConfig( + ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 + ... ) + >>> peft_model = get_peft_model(model, peft_config) + >>> update_generate_signature(peft_model) + >>> help(peft_model.generate) + ``` + """ + if not hasattr(model, "generate"): + return + current_signature = inspect.signature(model.generate) + if ( + len(current_signature.parameters) == 2 + and "args" in current_signature.parameters + and "kwargs" in current_signature.parameters + ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters): + generate = deepcopy(model.generate.__func__) + update_wrapper( + generate, + type(model.get_base_model()).generate, + assigned=("__doc__", "__name__", "__annotations__"), + ) + model.generate = MethodType(generate, model) + + +def update_signature(model: PeftModel, method: str = "all") -> None: + """ + Args: + Updates the signature of a PeftModel include parents class signature for forward or generate method + model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update + signature choose one of "forward", "generate", "all" + Example: + ```python + >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer + >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature + + >>> model_name_or_path = "bigscience/mt0-large" + >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + + >>> peft_config = LoraConfig( + ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 + ... ) + >>> peft_model = get_peft_model(model, peft_config) + >>> update_signature(peft_model) + >>> help(peft_model.generate) + ``` + """ + if method == "forward": + update_forward_signature(model) + elif method == "generate": + update_generate_signature(model) + elif method == "all": + update_forward_signature(model) + update_generate_signature(model) + else: + raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']") diff --git a/env-llmeval/lib/python3.10/site-packages/peft/import_utils.py b/env-llmeval/lib/python3.10/site-packages/peft/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c32d96d52e74bd5de879c06c732fbf82417a8b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/import_utils.py @@ -0,0 +1,73 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import importlib.metadata as importlib_metadata +from functools import lru_cache + +import packaging.version + + +def is_bnb_available() -> bool: + return importlib.util.find_spec("bitsandbytes") is not None + + +def is_bnb_4bit_available() -> bool: + if not is_bnb_available(): + return False + + import bitsandbytes as bnb + + return hasattr(bnb.nn, "Linear4bit") + + +def is_auto_gptq_available(): + if importlib.util.find_spec("auto_gptq") is not None: + AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") + version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) + if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: + return True + else: + raise ImportError( + f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " + f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" + ) + + +def is_optimum_available() -> bool: + return importlib.util.find_spec("optimum") is not None + + +@lru_cache +def is_torch_tpu_available(check_device=True): + "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" + if importlib.util.find_spec("torch_xla") is not None: + if check_device: + # We need to check if `xla_device` can be found, will raise a RuntimeError if not + try: + import torch_xla.core.xla_model as xm + + _ = xm.xla_device() + return True + except RuntimeError: + return False + return True + return False + + +def is_aqlm_available(): + return importlib.util.find_spec("aqlm") is not None + + +def is_auto_awq_available(): + return importlib.util.find_spec("awq") is not None diff --git a/env-llmeval/lib/python3.10/site-packages/peft/mapping.py b/env-llmeval/lib/python3.10/site-packages/peft/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..b62ddf94aafa1a32b2711c0a6e365900065a93b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/mapping.py @@ -0,0 +1,168 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import torch + +from .config import PeftConfig +from .mixed_model import PeftMixedModel +from .peft_model import ( + PeftModel, + PeftModelForCausalLM, + PeftModelForFeatureExtraction, + PeftModelForQuestionAnswering, + PeftModelForSeq2SeqLM, + PeftModelForSequenceClassification, + PeftModelForTokenClassification, +) +from .tuners import ( + AdaLoraConfig, + AdaLoraModel, + AdaptionPromptConfig, + IA3Config, + IA3Model, + LoHaConfig, + LoHaModel, + LoKrConfig, + LoKrModel, + LoraConfig, + LoraModel, + MultitaskPromptTuningConfig, + OFTConfig, + OFTModel, + PolyConfig, + PolyModel, + PrefixTuningConfig, + PromptEncoderConfig, + PromptTuningConfig, +) +from .utils import _prepare_prompt_learning_config + + +if TYPE_CHECKING: + from transformers import PreTrainedModel + + +MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, PeftModel] = { + "SEQ_CLS": PeftModelForSequenceClassification, + "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, + "CAUSAL_LM": PeftModelForCausalLM, + "TOKEN_CLS": PeftModelForTokenClassification, + "QUESTION_ANS": PeftModelForQuestionAnswering, + "FEATURE_EXTRACTION": PeftModelForFeatureExtraction, +} + +PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, PeftConfig] = { + "ADAPTION_PROMPT": AdaptionPromptConfig, + "PROMPT_TUNING": PromptTuningConfig, + "PREFIX_TUNING": PrefixTuningConfig, + "P_TUNING": PromptEncoderConfig, + "LORA": LoraConfig, + "LOHA": LoHaConfig, + "LOKR": LoKrConfig, + "ADALORA": AdaLoraConfig, + "IA3": IA3Config, + "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig, + "OFT": OFTConfig, + "POLY": PolyConfig, +} + +PEFT_TYPE_TO_TUNER_MAPPING = { + "LORA": LoraModel, + "LOHA": LoHaModel, + "LOKR": LoKrModel, + "ADALORA": AdaLoraModel, + "IA3": IA3Model, + "OFT": OFTModel, + "POLY": PolyModel, +} + + +def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig: + """ + Returns a Peft config object from a dictionary. + + Args: + config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. + """ + + return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) + + +def get_peft_model( + model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False +) -> PeftModel | PeftMixedModel: + """ + Returns a Peft model object from a model and a config. + + Args: + model ([`transformers.PreTrainedModel`]): + Model to be wrapped. + peft_config ([`PeftConfig`]): + Configuration object containing the parameters of the Peft model. + adapter_name (`str`, `optional`, defaults to `"default"`): + The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). + mixed (`bool`, `optional`, defaults to `False`): + Whether to allow mixing different (compatible) adapter types. + """ + model_config = getattr(model, "config", {"model_type": "custom"}) + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + + peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) + + if mixed: + return PeftMixedModel(model, peft_config, adapter_name=adapter_name) + + if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: + return PeftModel(model, peft_config, adapter_name=adapter_name) + + if peft_config.is_prompt_learning: + peft_config = _prepare_prompt_learning_config(peft_config, model_config) + return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name) + + +def inject_adapter_in_model( + peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default" +) -> torch.nn.Module: + r""" + A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning + methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API + calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. + + Args: + peft_config (`PeftConfig`): + Configuration object containing the parameters of the Peft model. + model (`torch.nn.Module`): + The input model where the adapter will be injected. + adapter_name (`str`, `optional`, defaults to `"default"`): + The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). + """ + if peft_config.is_prompt_learning or peft_config.is_adaption_prompt: + raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.") + + if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys(): + raise ValueError( + f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`." + ) + + tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] + + # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules. + peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name) + + return peft_model.model diff --git a/env-llmeval/lib/python3.10/site-packages/peft/mixed_model.py b/env-llmeval/lib/python3.10/site-packages/peft/mixed_model.py new file mode 100644 index 0000000000000000000000000000000000000000..92b9f74ecd4caace48a0d1d59288b6fdfdfad0bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/mixed_model.py @@ -0,0 +1,409 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from contextlib import contextmanager +from typing import Any, Optional, Union + +import torch +from accelerate.hooks import remove_hook_from_submodules +from torch import nn +from transformers.utils import PushToHubMixin + +from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES + +from .config import PeftConfig +from .peft_model import PeftModel +from .tuners import ( + AdaLoraModel, + IA3Model, + LoHaModel, + LoKrModel, + LoraModel, + MixedModel, + OFTModel, +) +from .utils import PeftType, _set_adapter, _set_trainable + + +PEFT_TYPE_TO_MODEL_MAPPING = { + PeftType.LORA: LoraModel, + PeftType.LOHA: LoHaModel, + PeftType.LOKR: LoKrModel, + PeftType.ADALORA: AdaLoraModel, + PeftType.IA3: IA3Model, + PeftType.OFT: OFTModel, +} + + +def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None: + r""" + Prepares the model for gradient checkpointing if necessary + """ + # Note: same as PeftModel._prepare_model_for_gradient_checkpointing + if not getattr(model, "is_gradient_checkpointing", True): + return model + + if not ( + getattr(model, "is_loaded_in_8bit", False) + or getattr(model, "is_loaded_in_4bit", False) + or getattr(model, "is_quantized", False) + ): + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + elif hasattr(model, "get_input_embeddings"): + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + +def _check_config_compatible(peft_config: PeftConfig) -> None: + if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES: + raise ValueError( + f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. " + f"Compatible types are: {COMPATIBLE_TUNER_TYPES}" + ) + + +class PeftMixedModel(PushToHubMixin, torch.nn.Module): + """ + PeftMixedModel for loading mixing different types of adapters for inference. + + This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use + `get_peft_model` with the argument `mixed=True`. + + + + Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn + more about using different adapter types. + + + + Example: + + ```py + >>> from peft import get_peft_model + + >>> base_model = ... # load the base model, e.g. from transformers + >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval() + >>> peft_model.load_adapter(path_to_adapter2, "adapter2") + >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters + >>> peft_model(data) # forward pass using both adapters + ``` + + Args: + model (`torch.nn.Module`): + The model to be tuned. + config (`PeftConfig`): + The config of the model to be tuned. The adapter type must be compatible. + adapter_name (`str`, `optional`, defaults to `"default"`): + The name of the first adapter. + """ + + def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__() + _check_config_compatible(peft_config) + _prepare_model_for_gradient_checkpointing(model) + self.modules_to_save = None + self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name) + self.set_modules_to_save(peft_config, adapter_name) + + self.config = getattr(model, "config", {"model_type": "custom"}) + + # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid + # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected + # behavior we disable that in this line. + if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): + self.base_model.config.pretraining_tp = 1 + + @property + def peft_config(self) -> dict[str, PeftConfig]: + return self.base_model.peft_config + + @property + def active_adapter(self) -> str: + return self.base_model.active_adapter + + @property + def active_adapters(self) -> list[str]: + return self.base_model.active_adapters + + def get_nb_trainable_parameters(self): + r""" + Returns the number of trainable parameters and number of all parameters in the model. + """ + # note: same as PeftModel.get_nb_trainable_parameters + trainable_params = 0 + all_param = 0 + for _, param in self.named_parameters(): + num_params = param.numel() + # if using DS Zero 3 and the weights are initialized empty + if num_params == 0 and hasattr(param, "ds_numel"): + num_params = param.ds_numel + + # Due to the design of 4bit linear layers from bitsandbytes + # one needs to multiply the number of parameters by 2 to get + # the correct number of parameters + if param.__class__.__name__ == "Params4bit": + num_params = num_params * 2 + + all_param += num_params + if param.requires_grad: + trainable_params += num_params + + return trainable_params, all_param + + def print_trainable_parameters(self): + """ + Prints the number of trainable parameters in the model. + + Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from + num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns + (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. + For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for + prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number + of trainable parameters of the backbone transformer model which can be different. + """ + # note: same as PeftModel.print_trainable_parameters + trainable_params, all_param = self.get_nb_trainable_parameters() + + print( + f"trainable params: {trainable_params:,d} || " + f"all params: {all_param:,d} || " + f"trainable%: {100 * trainable_params / all_param:.4f}" + ) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.base_model, name) + + def forward(self, *args: Any, **kwargs: Any): + """ + Forward pass of the model. + """ + return self.base_model(*args, **kwargs) + + def generate(self, *args: Any, **kwargs: Any): + """ + Generate output. + """ + return self.base_model.generate(*args, **kwargs) + + @contextmanager + def disable_adapter(self): + """ + Disables the adapter module. + """ + try: + self.base_model.disable_adapter_layers() + yield + finally: + self.base_model.enable_adapter_layers() + + def add_adapter(self, adapter_name: str, peft_config: PeftConfig): + _check_config_compatible(peft_config) + + try: + self.peft_config[adapter_name] = peft_config + self.base_model.inject_adapter(self, adapter_name) + except Exception: # something went wrong, roll back + if adapter_name in self.peft_config: + del self.peft_config[adapter_name] + raise + + self.set_modules_to_save(peft_config, adapter_name) + + def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None: + if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None: + return + + if self.modules_to_save is None: + self.modules_to_save = set(modules_to_save) + else: + self.modules_to_save.update(modules_to_save) + _set_trainable(self, adapter_name) + + def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: + """ + Sets the active adapter(s) for the model. + + Note that the order in which the adapters are applied during the forward pass may not be the same as the order + in which they are passed to this function. Instead, the order during the forward pass is determined by the + order in which the adapters were loaded into the model. The active adapters only determine which adapters are + active during the forward pass, but not the order in which they are applied. + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `List[str]`): + The name of the adapter(s) to be activated. + """ + if isinstance(adapter_name, str): + adapter_name = [adapter_name] + + mismatched = set(adapter_name) - set(self.peft_config.keys()) + if mismatched: + raise ValueError( + f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" + ) + + self.base_model.set_adapter(adapter_name) + _set_adapter(self, adapter_name) + + def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: + if isinstance(adapter_name, str): + adapter_name = [adapter_name] + + mismatched = set(adapter_name) - set(self.peft_config.keys()) + if mismatched: + raise ValueError( + f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" + ) + + self.base_model.delete_adapter(adapter_name) + + def merge_and_unload(self, *args: Any, **kwargs: Any): + r""" + This method merges the adapter layers into the base model. This is needed if someone wants to use the base + model as a standalone model. + + Args: + progressbar (`bool`): + whether to show a progressbar indicating the unload and merge process + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + return self.base_model.merge_and_unload(*args, **kwargs) + + def unload(self, *args: Any, **kwargs: Any): + """ + Gets back the base model by removing all the adapter modules without merging. This gives back the original base + model. + """ + return self.base_model.unload(*args, **kwargs) + + @classmethod + def _split_kwargs(cls, kwargs: dict[str, Any]): + return PeftModel._split_kwargs(kwargs) + + def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any): + output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs) + # TODO: not quite clear why this is necessary but tests fail without it + self.set_adapter(self.active_adapters) + return output + + def create_or_update_model_card(self, output_dir: str): + raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).") + + def save_pretrained( + self, + save_directory: str, + safe_serialization: bool = False, + selected_adapters: Optional[list[str]] = None, + **kwargs: Any, + ): + raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).") + + @classmethod + def from_pretrained( + cls, + model: nn.Module, + model_id: str | os.PathLike, + adapter_name: str = "default", + is_trainable: bool = False, + config: Optional[PeftConfig] = None, + **kwargs: Any, + ): + r""" + Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights. + + Note that the passed `model` may be modified inplace. + + Args: + model (`nn.Module`): + The model to be adapted. + model_id (`str` or `os.PathLike`): + The name of the PEFT configuration to use. Can be either: + - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face + Hub. + - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` + method (`./my_peft_config_directory/`). + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to be loaded. This is useful for loading multiple adapters. + is_trainable (`bool`, *optional*, defaults to `False`): + Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for + inference + config ([`~peft.PeftConfig`], *optional*): + The configuration object to use instead of an automatically loaded configuration. This configuration + object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already + loaded before calling `from_pretrained`. + kwargs: (`optional`): + Additional keyword arguments passed along to the specific PEFT configuration class. + """ + # note: adapted from PeftModel.from_pretrained + from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING + + # load the config + if config is None: + config = PEFT_TYPE_TO_CONFIG_MAPPING[ + PeftConfig._get_peft_type( + model_id, + subfolder=kwargs.get("subfolder", None), + revision=kwargs.get("revision", None), + cache_dir=kwargs.get("cache_dir", None), + use_auth_token=kwargs.get("use_auth_token", None), + ) + ].from_pretrained(model_id, **kwargs) + elif isinstance(config, PeftConfig): + config.inference_mode = not is_trainable + else: + raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") + + # note: this is different from PeftModel.from_pretrained + if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING: + raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.") + + if (getattr(model, "hf_device_map", None) is not None) and len( + set(model.hf_device_map.values()).intersection({"cpu", "disk"}) + ) > 0: + remove_hook_from_submodules(model) + + if config.is_prompt_learning and is_trainable: + # note: should not be possible to reach, but just in case + raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") + else: + config.inference_mode = not is_trainable + + # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel + model = cls(model, config, adapter_name) + model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) + return model diff --git a/env-llmeval/lib/python3.10/site-packages/peft/peft_model.py b/env-llmeval/lib/python3.10/site-packages/peft/peft_model.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b78a1b9a327cc382a0b722aabcfe0ec5b016f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/peft_model.py @@ -0,0 +1,1986 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import collections +import inspect +import os +import warnings +from contextlib import contextmanager +from copy import deepcopy +from typing import Any, Optional, Union + +import packaging.version +import torch +import transformers +from accelerate import dispatch_model, infer_auto_device_map +from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules +from accelerate.utils import get_balanced_memory +from huggingface_hub import ModelCard, ModelCardData, hf_hub_download +from safetensors.torch import save_file as safe_save_file +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from transformers import PreTrainedModel +from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput +from transformers.utils import PushToHubMixin + +from . import __version__ +from .config import PeftConfig +from .tuners import ( + AdaLoraModel, + AdaptionPromptModel, + IA3Model, + LoHaModel, + LoKrModel, + LoraModel, + MultitaskPromptEmbedding, + OFTModel, + PolyModel, + PrefixEncoder, + PromptEmbedding, + PromptEncoder, +) +from .utils import ( + SAFETENSORS_WEIGHTS_NAME, + TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, + WEIGHTS_NAME, + PeftType, + TaskType, + _get_batch_size, + _prepare_prompt_learning_config, + _set_adapter, + _set_trainable, + get_peft_model_state_dict, + id_tensor_storage, + infer_device, + load_peft_weights, + set_peft_model_state_dict, + shift_tokens_right, +) + + +PEFT_TYPE_TO_MODEL_MAPPING = { + PeftType.LORA: LoraModel, + PeftType.LOHA: LoHaModel, + PeftType.LOKR: LoKrModel, + PeftType.PROMPT_TUNING: PromptEmbedding, + PeftType.P_TUNING: PromptEncoder, + PeftType.PREFIX_TUNING: PrefixEncoder, + PeftType.ADALORA: AdaLoraModel, + PeftType.ADAPTION_PROMPT: AdaptionPromptModel, + PeftType.IA3: IA3Model, + PeftType.OFT: OFTModel, + PeftType.POLY: PolyModel, +} + + +class PeftModel(PushToHubMixin, torch.nn.Module): + """ + Base model encompassing various Peft methods. + + Args: + model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. + peft_config ([`PeftConfig`]): The configuration of the Peft model. + adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. + + **Attributes**: + - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. + - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. + - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when + saving the model. + - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if + using [`PromptLearningConfig`]. + - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if + using [`PromptLearningConfig`]. + - **transformer_backbone_name** (`str`) -- The name of the transformer + backbone in the base model if using [`PromptLearningConfig`]. + - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone + in the base model if using [`PromptLearningConfig`]. + """ + + def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__() + self.modules_to_save = None + self.active_adapter = adapter_name + self.peft_type = peft_config.peft_type + # These args are special PEFT arguments that users can pass. They need to be removed before passing them to + # forward. + self.special_peft_forward_args = {"adapter_names"} + + self._is_prompt_learning = peft_config.is_prompt_learning + if self._is_prompt_learning: + self._peft_config = {adapter_name: peft_config} + self.base_model = model + self.add_adapter(adapter_name, peft_config) + else: + self._peft_config = None + cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] + self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) + self.set_additional_trainable_modules(peft_config, adapter_name) + + if getattr(model, "is_gradient_checkpointing", True): + model = self._prepare_model_for_gradient_checkpointing(model) + + # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid + # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected + # behavior we disable that in this line. + if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): + self.base_model.config.pretraining_tp = 1 + + @property + def peft_config(self) -> dict[str, PeftConfig]: + if self._is_prompt_learning: + return self._peft_config + return self.base_model.peft_config + + @property + def active_adapters(self) -> list[str]: + try: + adapters = self.base_model.active_adapters + except AttributeError: + adapters = self.active_adapter + if isinstance(adapters, str): + adapters = [adapters] + return adapters + + @peft_config.setter + def peft_config(self, value: dict[str, PeftConfig]): + if self._is_prompt_learning: + self._peft_config = value + else: + self.base_model.peft_config = value + + def save_pretrained( + self, + save_directory: str, + safe_serialization: bool = True, + selected_adapters: Optional[list[str]] = None, + save_embedding_layers: Union[str, bool] = "auto", + is_main_process: bool = True, + **kwargs: Any, + ) -> None: + r""" + This function saves the adapter model and the adapter configuration files to a directory, so that it can be + reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] + method. + + Args: + save_directory (`str`): + Directory where the adapter model and configuration files will be saved (will be created if it does not + exist). + safe_serialization (`bool`, *optional*): + Whether to save the adapter files in safetensors format, defaults to `True`. + selected_adapters (`List[str]`, *optional*): + A list of adapters to be saved. If `None`, will default to all adapters. + save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): + If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common + embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. + and automatically sets the boolean flag. This only works for 🤗 transformers models. + is_main_process (`bool`, *optional*): + Whether the process calling this is the main process or not. Will default to `True`. Will not save the + checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments passed along to the `push_to_hub` method. + """ + if os.path.isfile(save_directory): + raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") + + if selected_adapters is None: + selected_adapters = list(self.peft_config.keys()) + else: + if any( + selected_adapter_name not in list(self.peft_config.keys()) + for selected_adapter_name in selected_adapters + ): + raise ValueError( + f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" + f" {list(self.peft_config.keys())} - got {selected_adapters}." + ) + + if is_main_process: + os.makedirs(save_directory, exist_ok=True) + self.create_or_update_model_card(save_directory) + + for adapter_name in selected_adapters: + peft_config = self.peft_config[adapter_name] + # save only the trainable weights + output_state_dict = get_peft_model_state_dict( + self, + state_dict=kwargs.get("state_dict", None), + adapter_name=adapter_name, + save_embedding_layers=save_embedding_layers, + ) + output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory + os.makedirs(output_dir, exist_ok=True) + + if is_main_process and safe_serialization: + # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 + # Safetensors does not allow tensor aliasing. + # We're going to remove aliases before saving + ptrs = collections.defaultdict(list) + for name, tensor in output_state_dict.items(): + # Sometimes in the state_dict we have non-tensor objects. + # e.g. in bitsandbytes we have some `str` objects in the state_dict + if isinstance(tensor, torch.Tensor): + ptrs[id_tensor_storage(tensor)].append(name) + else: + # In the non-tensor case, fall back to the pointer of the object itself + ptrs[id(tensor)].append(name) + + # These are all the pointers of shared tensors. + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + + for _, names in shared_ptrs.items(): + # Here we just clone the shared tensors to avoid tensor aliasing which is + # not supported in safetensors. + for shared_tensor_name in names[1:]: + output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() + + safe_save_file( + output_state_dict, + os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), + metadata={"format": "pt"}, + ) + elif is_main_process: + torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + + # save the config and change the inference mode to `True` + if peft_config.base_model_name_or_path is None: + peft_config.base_model_name_or_path = ( + self.base_model.__dict__.get("name_or_path", None) + if peft_config.is_prompt_learning + else self.base_model.model.__dict__.get("name_or_path", None) + ) + inference_mode = peft_config.inference_mode + peft_config.inference_mode = True + + if peft_config.task_type is None: + # deal with auto mapping + base_model_class = self._get_base_model_class( + is_prompt_tuning=peft_config.is_prompt_learning, + ) + parent_library = base_model_class.__module__ + + auto_mapping_dict = { + "base_model_class": base_model_class.__name__, + "parent_library": parent_library, + } + else: + auto_mapping_dict = None + + if is_main_process: + peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) + peft_config.inference_mode = inference_mode + + @classmethod + def from_pretrained( + cls, + model: torch.nn.Module, + model_id: Union[str, os.PathLike], + adapter_name: str = "default", + is_trainable: bool = False, + config: Optional[PeftConfig] = None, + **kwargs: Any, + ) -> PeftModel: + r""" + Instantiate a PEFT model from a pretrained model and loaded PEFT weights. + + Note that the passed `model` may be modified inplace. + + Args: + model ([`torch.nn.Module`]): + The model to be adapted. For 🤗 Transformers models, the model should be initialized with the + [`~transformers.PreTrainedModel.from_pretrained`]. + model_id (`str` or `os.PathLike`): + The name of the PEFT configuration to use. Can be either: + - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face + Hub. + - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` + method (`./my_peft_config_directory/`). + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to be loaded. This is useful for loading multiple adapters. + is_trainable (`bool`, *optional*, defaults to `False`): + Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be + used for inference. + config ([`~peft.PeftConfig`], *optional*): + The configuration object to use instead of an automatically loaded configuration. This configuration + object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already + loaded before calling `from_pretrained`. + kwargs: (`optional`): + Additional keyword arguments passed along to the specific PEFT configuration class. + """ + from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING + + # load the config + if config is None: + config = PEFT_TYPE_TO_CONFIG_MAPPING[ + PeftConfig._get_peft_type( + model_id, + subfolder=kwargs.get("subfolder", None), + revision=kwargs.get("revision", None), + cache_dir=kwargs.get("cache_dir", None), + use_auth_token=kwargs.get("use_auth_token", None), + token=kwargs.get("token", None), + ) + ].from_pretrained(model_id, **kwargs) + elif isinstance(config, PeftConfig): + config.inference_mode = not is_trainable + else: + raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") + + if (getattr(model, "hf_device_map", None) is not None) and len( + set(model.hf_device_map.values()).intersection({"cpu", "disk"}) + ) > 0: + remove_hook_from_submodules(model) + + if config.is_prompt_learning and is_trainable: + raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") + else: + config.inference_mode = not is_trainable + + if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): + model = cls(model, config, adapter_name) + else: + model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) + model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) + return model + + def _setup_prompt_encoder(self, adapter_name: str): + config = self.peft_config[adapter_name] + if not hasattr(self, "prompt_encoder"): + self.prompt_encoder = torch.nn.ModuleDict({}) + self.prompt_tokens = {} + transformer_backbone = None + for name, module in self.base_model.named_children(): + for param in module.parameters(): + param.requires_grad = False + if isinstance(module, PreTrainedModel): + # Make sure to freeze Tranformers model + if transformer_backbone is None: + transformer_backbone = module + self.transformer_backbone_name = name + if transformer_backbone is None: + transformer_backbone = self.base_model + + if config.num_transformer_submodules is None: + config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 + + for named_param, value in list(transformer_backbone.named_parameters()): + # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] + # the actual unsharded shape is stored in "ds_shape" attribute + # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig + # has been called before + # For reference refer to issue: https://github.com/huggingface/peft/issues/996 + deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) + + if value.shape[0] == self.base_model.config.vocab_size or ( + deepspeed_distributed_tensor_shape is not None + and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size + ): + self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) + break + + if config.peft_type == PeftType.PROMPT_TUNING: + prompt_encoder = PromptEmbedding(config, self.word_embeddings) + elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: + prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) + elif config.peft_type == PeftType.P_TUNING: + prompt_encoder = PromptEncoder(config) + elif config.peft_type == PeftType.PREFIX_TUNING: + prompt_encoder = PrefixEncoder(config) + else: + raise ValueError("Not supported") + + prompt_encoder = prompt_encoder.to(self.device) + self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) + self.prompt_tokens[adapter_name] = torch.arange( + config.num_virtual_tokens * config.num_transformer_submodules + ).long() + + def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): + r""" + Prepares the model for gradient checkpointing if necessary + """ + if not ( + getattr(model, "is_loaded_in_8bit", False) + or getattr(model, "is_loaded_in_4bit", False) + or getattr(model, "is_quantized", False) + ): + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + elif hasattr(model, "get_input_embeddings"): + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + return model + + def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: + """ + Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning + method. + """ + prompt_encoder = self.prompt_encoder[adapter_name] + prompt_tokens = ( + self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) + ) + if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: + prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] + + if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: + prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) + else: + prompt_embeddings = prompt_encoder(prompt_tokens) + + return prompt_embeddings[0].detach().cpu() + + def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. + """ + peft_config = self.active_peft_config + prompt_encoder = self.prompt_encoder[self.active_adapter] + prompt_tokens = ( + self.prompt_tokens[self.active_adapter] + .unsqueeze(0) + .expand(batch_size, -1) + .to(prompt_encoder.embedding.weight.device) + ) + if peft_config.peft_type == PeftType.PREFIX_TUNING: + prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] + if peft_config.inference_mode: + past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) + else: + past_key_values = prompt_encoder(prompt_tokens) + if self.base_model_torch_dtype is not None: + past_key_values = past_key_values.to(self.base_model_torch_dtype) + past_key_values = past_key_values.view( + batch_size, + peft_config.num_virtual_tokens, + peft_config.num_layers * 2, + peft_config.num_attention_heads, + peft_config.token_dim // peft_config.num_attention_heads, + ) + if peft_config.num_transformer_submodules == 2: + past_key_values = torch.cat([past_key_values, past_key_values], dim=2) + past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( + peft_config.num_transformer_submodules * 2 + ) + if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: + post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] + past_key_values = post_process_fn(past_key_values) + return past_key_values + else: + if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: + prompts = prompt_encoder(prompt_tokens, task_ids) + else: + if peft_config.inference_mode: + prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) + else: + prompts = prompt_encoder(prompt_tokens) + return prompts + + def get_nb_trainable_parameters(self) -> tuple[int, int]: + r""" + Returns the number of trainable parameters and the number of all parameters in the model. + """ + trainable_params = 0 + all_param = 0 + for _, param in self.named_parameters(): + num_params = param.numel() + # if using DS Zero 3 and the weights are initialized empty + if num_params == 0 and hasattr(param, "ds_numel"): + num_params = param.ds_numel + + # Due to the design of 4bit linear layers from bitsandbytes + # one needs to multiply the number of parameters by 2 to get + # the correct number of parameters + if param.__class__.__name__ == "Params4bit": + num_bytes = param.quant_storage.itemsize if hasattr(param, "quant_storage") else 1 + num_params = num_params * 2 * num_bytes + + all_param += num_params + if param.requires_grad: + trainable_params += num_params + + return trainable_params, all_param + + def print_trainable_parameters(self) -> None: + """ + Prints the number of trainable parameters in the model. + + Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from + num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns + (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. + For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for + prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number + of trainable parameters of the backbone transformer model which can be different. + """ + trainable_params, all_param = self.get_nb_trainable_parameters() + + print( + f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" + ) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.base_model, name) + + @contextmanager + def _enable_peft_forward_hooks(self, *args, **kwargs): + # If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this + # runs without any changes + if hasattr(self.base_model, "_enable_peft_forward_hooks"): + with self.base_model._enable_peft_forward_hooks(*args, **kwargs): + yield + return + else: + # nothing to enable + yield + return + + def forward(self, *args: Any, **kwargs: Any): + """ + Forward pass of the model. + """ + with self._enable_peft_forward_hooks(*args, **kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.get_base_model()(*args, **kwargs) + + def generate(self, *args, **kwargs): + with self._enable_peft_forward_hooks(*args, **kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.get_base_model().generate(*args, **kwargs) + + def _get_base_model_class(self, is_prompt_tuning=False): + """ + Returns the base model class. + """ + if not is_prompt_tuning: + return self.base_model.model.__class__ + return self.base_model.__class__ + + @contextmanager + def disable_adapter(self): + """ + Context manager that disables the adapter module. Use this to run inference on the base model. + + Example: + + ```py + >>> with model.disable_adapter(): + ... model(inputs) + ``` + """ + try: + if self.peft_config[self.active_adapter].is_prompt_learning: + # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and + # letting the underlying methods deal with it, same as how LoRA does it. + old_forward = self.forward + self.forward = self.base_model.forward + old_prepare_inputs_for_generation = self.prepare_inputs_for_generation + self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation + else: + self.base_model.disable_adapter_layers() + yield + finally: + if self.peft_config[self.active_adapter].is_prompt_learning: + self.forward = old_forward + self.prepare_inputs_for_generation = old_prepare_inputs_for_generation + else: + self.base_model.enable_adapter_layers() + + def get_base_model(self) -> torch.nn.Module: + """ + Returns the base model. + """ + return ( + self.base_model + if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY) + else self.base_model.model + ) + + def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None: + """ + Add an adapter to the model based on the passed configuration. + + This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. + + The name for the new adapter should be unique. + + The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active + adapter. + + Args: + adapter_name (`str`): + The name of the adapter to be added. + peft_config ([`PeftConfig`]): + The configuration of the adapter to be added. + """ + if peft_config.peft_type != self.peft_type: + raise ValueError( + f"Cannot combine adapters with different peft types. " + f"Found {self.peft_type} and {peft_config.peft_type}." + ) + + try: + if peft_config.is_prompt_learning: + self.peft_config[adapter_name] = peft_config + if hasattr(self.config, "to_dict"): + dict_config = self.config.to_dict() + else: + dict_config = self.config + + peft_config = _prepare_prompt_learning_config(peft_config, dict_config) + self._setup_prompt_encoder(adapter_name) + elif peft_config.is_adaption_prompt: + self.base_model.add_adapter(adapter_name, peft_config) + else: + self.peft_config[adapter_name] = peft_config + self.base_model.inject_adapter(self.base_model.model, adapter_name) + except Exception: # something went wrong, roll back + if adapter_name in self.peft_config: + del self.peft_config[adapter_name] + raise + + self.set_additional_trainable_modules(peft_config, adapter_name) + + def set_additional_trainable_modules(self, peft_config, adapter_name): + if getattr(peft_config, "modules_to_save", None) is not None: + if self.modules_to_save is None: + self.modules_to_save = set(peft_config.modules_to_save) + else: + self.modules_to_save.update(peft_config.modules_to_save) + _set_trainable(self, adapter_name) + + @classmethod + def _split_kwargs(cls, kwargs: dict[str, Any]): + _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) + hf_hub_download_kwargs = {} + other_kwargs = {} + + for key, value in kwargs.items(): + if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: + hf_hub_download_kwargs[key] = value + else: + other_kwargs[key] = value + + return hf_hub_download_kwargs, other_kwargs + + def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): + """ + Load a trained adapter into the model. + + The name for the new adapter should be unique. + + The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active + adapter. + + Args: + adapter_name (`str`): + The name of the adapter to be added. + peft_config ([`PeftConfig`]): + The configuration of the adapter to be added. + is_trainable (`bool`, *optional*, defaults to `False`): + Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be + used for inference. + kwargs: (`optional`): + Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. + """ + from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING + + hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) + torch_device = infer_device() + + if adapter_name not in self.peft_config: + # load the config + peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ + PeftConfig._get_peft_type( + model_id, + **hf_hub_download_kwargs, + ) + ].from_pretrained( + model_id, + **hf_hub_download_kwargs, + ) + if peft_config.is_prompt_learning and is_trainable: + raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") + else: + peft_config.inference_mode = not is_trainable + self.add_adapter(adapter_name, peft_config) + + adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) + + # load the weights into the model + load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) + if ( + (getattr(self, "hf_device_map", None) is not None) + and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) + and len(self.peft_config) == 1 + ): + device_map = kwargs.get("device_map", "auto") + max_memory = kwargs.get("max_memory", None) + offload_dir = kwargs.get("offload_folder", None) + offload_index = kwargs.get("offload_index", None) + + dispatch_model_kwargs = {} + # Safety checker for previous `accelerate` versions + # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ + if "offload_index" in inspect.signature(dispatch_model).parameters: + dispatch_model_kwargs["offload_index"] = offload_index + + no_split_module_classes = self._no_split_modules + + if device_map != "sequential": + max_memory = get_balanced_memory( + self, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + low_zero=(device_map == "balanced_low_0"), + ) + if isinstance(device_map, str): + device_map = infer_auto_device_map( + self, max_memory=max_memory, no_split_module_classes=no_split_module_classes + ) + dispatch_model( + self, + device_map=device_map, + offload_dir=offload_dir, + **dispatch_model_kwargs, + ) + hook = AlignDevicesHook(io_same_device=True) + if self.peft_config[adapter_name].is_prompt_learning: + remove_hook_from_submodules(self.prompt_encoder) + add_hook_to_module(self.get_base_model(), hook) + + # Set model in evaluation mode to deactivate Dropout modules by default + if not is_trainable: + self.eval() + return load_result + + def set_adapter(self, adapter_name: str) -> None: + """ + Sets the active adapter. + + Only one adapter can be active at a time. + + Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str`): + The name of the adapter to be set as active. The adapter must be loaded first. + """ + if adapter_name not in self.peft_config: + raise ValueError(f"Adapter {adapter_name} not found.") + self.active_adapter = adapter_name + if not self.peft_config[adapter_name].is_prompt_learning: + self.base_model.set_adapter(adapter_name) + _set_adapter(self, adapter_name) + + @property + def base_model_torch_dtype(self): + return getattr(self.base_model, "dtype", None) + + @property + def active_peft_config(self): + return self.peft_config[self.active_adapter] + + def create_or_update_model_card(self, output_dir: str): + """ + Updates or create model card to include information about peft: + 1. Adds `peft` library tag + 2. Adds peft version + 3. Adds base model info + 4. Adds quantization information if it was used + """ + + filename = os.path.join(output_dir, "README.md") + + card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) + + card.data["library_name"] = "peft" + + model_config = getattr(self, "config", None) + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + if model_config is not None and "_name_or_path" in model_config: + card.data["base_model"] = model_config["_name_or_path"] + + lines = card.text.splitlines() + + quantization_config = None + if hasattr(model_config, "quantization_config"): + quantization_config = self.config.quantization_config.to_dict() + training_config_text = "" + quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" + # Adds quantization information if it was used + if quantization_config is not None: + training_config_text += f"\n{quantization_prefix}\n" + training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) + training_config_text += "\n" + + training_procedure_heading = "## Training procedure" + if quantization_prefix not in lines and bool(training_config_text): + if training_procedure_heading in lines: + lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) + else: + lines.append(f"{training_procedure_heading}\n{training_config_text}") + + # Adds peft version + framework_block_heading = "### Framework versions" + if f"- PEFT {__version__}" not in lines: + if framework_block_heading in lines: + lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") + else: + lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") + + card.text = "\n".join(lines) + card.save(filename) + + +class PeftModelForSequenceClassification(PeftModel): + """ + Peft model for sequence classification tasks. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + - **cls_layer_name** (`str`) -- The name of the classification layer. + + Example: + + ```py + >>> from transformers import AutoModelForSequenceClassification + >>> from peft import PeftModelForSequenceClassification, get_peft_config + + >>> config = { + ... "peft_type": "PREFIX_TUNING", + ... "task_type": "SEQ_CLS", + ... "inference_mode": False, + ... "num_virtual_tokens": 20, + ... "token_dim": 768, + ... "num_transformer_submodules": 1, + ... "num_attention_heads": 12, + ... "num_layers": 12, + ... "encoder_hidden_size": 768, + ... "prefix_projection": False, + ... "postprocess_past_key_value_function": None, + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForSequenceClassification(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + if self.modules_to_save is None: + self.modules_to_save = {"classifier", "score"} + else: + self.modules_to_save.update({"classifier", "score"}) + + for name, _ in self.base_model.named_children(): + if any(module_name in name for module_name in self.modules_to_save): + self.cls_layer_name = name + break + + # to make sure classifier layer is trainable + _set_trainable(self, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) + else: + if kwargs.get("token_type_ids", None) is not None: + kwargs["token_type_ids"] = torch.cat( + ( + torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), + kwargs["token_type_ids"], + ), + dim=1, + ).long() + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def _prefix_tuning_forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + batch_size = _get_batch_size(input_ids, inputs_embeds) + past_key_values = self.get_prompt(batch_size) + fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) + kwargs.update( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "past_key_values": past_key_values, + } + ) + if "past_key_values" in fwd_params: + return self.base_model(labels=labels, **kwargs) + else: + transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) + fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) + if "past_key_values" not in fwd_params: + raise ValueError("Model does not support past key values which are required for prefix tuning.") + outputs = transformer_backbone_name(**kwargs) + pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] + if "dropout" in [name for name, _ in list(self.base_model.named_children())]: + pooled_output = self.base_model.dropout(pooled_output) + logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.base_model.num_labels == 1: + self.config.problem_type = "regression" + elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.base_model.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class PeftModelForCausalLM(PeftModel): + """ + Peft model for causal language modeling. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + + Example: + + ```py + >>> from transformers import AutoModelForCausalLM + >>> from peft import PeftModelForCausalLM, get_peft_config + + >>> config = { + ... "peft_type": "PREFIX_TUNING", + ... "task_type": "CAUSAL_LM", + ... "inference_mode": False, + ... "num_virtual_tokens": 20, + ... "token_dim": 1280, + ... "num_transformer_submodules": 1, + ... "num_attention_heads": 20, + ... "num_layers": 36, + ... "encoder_hidden_size": 1280, + ... "prefix_projection": False, + ... "postprocess_past_key_value_function": None, + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") + >>> peft_model = PeftModelForCausalLM(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + if self.base_model.config.model_type == "mpt": + if inputs_embeds is not None: + raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds") + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") + kwargs["token_type_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size) + return self.base_model( + input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs + ) + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + # concat prompt labels + if labels is not None: + prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) + kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def generate(self, *args, **kwargs): + peft_config = self.active_peft_config + self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation + if hasattr(self.base_model, "model"): + self.base_model.model.generation_config = self.generation_config + else: + self.base_model.generation_config = self.generation_config + try: + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(*args, **kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + outputs = self.base_model.generate(*args, **kwargs) + else: + outputs = self.base_model.generate(**kwargs) + except: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + raise + else: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + return outputs + + def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs): + peft_config = self.active_peft_config + model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) + + # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format + # for some architectures which requires a special fix for prompt tuning etc. + # TODO: starting with transformers 4.38, all architectures should support caching. + uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0") + uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0") + transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"] + uses_cache = uses_transformers_4_38 or ( + uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs + ) + + if peft_config.peft_type == PeftType.POLY: + model_kwargs["task_ids"] = task_ids + if peft_config.is_prompt_learning: + if uses_cache and (model_kwargs["past_key_values"] is not None): + # change in the logic of `prepare_inputs_for_generation` makes the below code necessary + # In prompt learning methods, past key values are longer when compared to the `input_ids`. + # As such only consider the last input ids in the autogressive generation phase. + if model_kwargs["past_key_values"][0][0].shape[-2] >= model_kwargs["input_ids"].shape[1]: + model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:] + + if model_kwargs.get("attention_mask", None) is not None: + size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens + prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device) + model_kwargs["attention_mask"] = torch.cat( + (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 + ) + + if model_kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + model_kwargs["position_ids"] = None + + if kwargs.get("token_type_ids", None) is not None: + warnings.warn( + "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" + ) + kwargs["token_type_ids"] = None + + if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) + model_kwargs["past_key_values"] = past_key_values + else: + if model_kwargs["past_key_values"] is None: + inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) + prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) + model_kwargs["input_ids"] = None + + # For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is + # passed in the forward pass to keep track of the position ids of the cache. We have to + # pop that from `model_kwargs` as `cache_position` is properly created by the model, using the passed + # `inputs_embeds`: https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956 + _ = model_kwargs.pop("cache_position", None) + + return model_kwargs + + +class PeftModelForSeq2SeqLM(PeftModel): + """ + Peft model for sequence-to-sequence language modeling. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + + Example: + + ```py + >>> from transformers import AutoModelForSeq2SeqLM + >>> from peft import PeftModelForSeq2SeqLM, get_peft_config + + >>> config = { + ... "peft_type": "LORA", + ... "task_type": "SEQ_2_SEQ_LM", + ... "inference_mode": False, + ... "r": 8, + ... "target_modules": ["q", "v"], + ... "lora_alpha": 32, + ... "lora_dropout": 0.1, + ... "fan_in_fan_out": False, + ... "enable_lora": None, + ... "bias": "none", + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") + >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation + self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( + self.base_model._prepare_encoder_decoder_kwargs_for_generation + ) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + decoder_input_ids=None, + decoder_attention_mask=None, + decoder_inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_inputs_embeds=decoder_inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if decoder_attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + decoder_attention_mask.device + ) + if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: + decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) + + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") + kwargs["token_type_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size) + return self.base_model( + input_ids=input_ids, + decoder_input_ids=decoder_input_ids, + decoder_inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + **kwargs, + ) + elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + attention_mask.device + ) + kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) + + prompts = self.get_prompt(batch_size=batch_size) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) + + return self.base_model( + inputs_embeds=inputs_embeds, + decoder_input_ids=decoder_input_ids, + decoder_inputs_embeds=decoder_inputs_embeds, + **kwargs, + ) + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + if decoder_inputs_embeds is None and decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + decoder_inputs_embeds = self.word_embeddings(decoder_input_ids) + + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + attention_mask.device + ) + kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) + # concat prompt labels + if labels is not None: + if peft_config.num_transformer_submodules == 1: + kwargs["labels"] = labels + elif peft_config.num_transformer_submodules == 2: + prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) + kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) + if peft_config.num_transformer_submodules == 1: + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + elif peft_config.num_transformer_submodules == 2: + decoder_inputs_embeds = torch.cat( + (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1 + ) + return self.base_model( + inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs + ) + + def generate(self, **kwargs): + peft_config = self.active_peft_config + self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation + self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( + self._prepare_encoder_decoder_kwargs_for_generation + ) + try: + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + outputs = self.base_model.generate(**kwargs) + else: + if "input_ids" not in kwargs: + raise ValueError("input_ids must be provided for Peft model generation") + if kwargs.get("position_ids", None) is not None: + warnings.warn( + "Position ids are not supported for parameter efficient tuning. Ignoring position ids." + ) + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn( + "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" + ) + kwargs["token_type_ids"] = None + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + outputs = self.base_model.generate(**kwargs) + elif peft_config.peft_type in [ + PeftType.PROMPT_TUNING, + PeftType.P_TUNING, + PeftType.MULTITASK_PROMPT_TUNING, + ]: + kwargs = deepcopy(kwargs) + + if "encoder_outputs" in kwargs: + del kwargs["encoder_outputs"] + warnings.warn( + "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it." + ) + + input_ids = kwargs.pop("input_ids") + inputs_embeds = self.word_embeddings(input_ids) + batch_size = inputs_embeds.shape[0] + prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None)) + prompts = prompts.to(inputs_embeds.dtype) + + inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) + kwargs["inputs_embeds"] = inputs_embeds + + if "attention_mask" in kwargs: + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( + kwargs["attention_mask"].device + ) + kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) + + return self.base_model.generate(**kwargs) + else: + raise NotImplementedError + except: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( + self.base_model_prepare_encoder_decoder_kwargs_for_generation + ) + raise + else: + self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation + self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( + self.base_model_prepare_encoder_decoder_kwargs_for_generation + ) + return outputs + + def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): + peft_config = self.active_peft_config + model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) + if peft_config.peft_type == PeftType.POLY: + model_kwargs["task_ids"] = task_ids + if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: + batch_size = model_kwargs["decoder_input_ids"].shape[0] + past_key_values = self.get_prompt(batch_size) + model_kwargs["past_key_values"] = past_key_values + + return model_kwargs + + +class PeftModelForTokenClassification(PeftModel): + """ + Peft model for token classification tasks. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + - **cls_layer_name** (`str`) -- The name of the classification layer. + + Example: + + ```py + >>> from transformers import AutoModelForSequenceClassification + >>> from peft import PeftModelForTokenClassification, get_peft_config + + >>> config = { + ... "peft_type": "PREFIX_TUNING", + ... "task_type": "TOKEN_CLS", + ... "inference_mode": False, + ... "num_virtual_tokens": 20, + ... "token_dim": 768, + ... "num_transformer_submodules": 1, + ... "num_attention_heads": 12, + ... "num_layers": 12, + ... "encoder_hidden_size": 768, + ... "prefix_projection": False, + ... "postprocess_past_key_value_function": None, + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForTokenClassification(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + if self.modules_to_save is None: + self.modules_to_save = {"classifier", "score"} + else: + self.modules_to_save.update({"classifier", "score"}) + + for name, _ in self.base_model.named_children(): + if any(module_name in name for module_name in self.modules_to_save): + self.cls_layer_name = name + break + + # to make sure classifier layer is trainable + _set_trainable(self, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if not peft_config.is_prompt_learning: + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "labels": labels, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) + else: + if kwargs.get("token_type_ids", None) is not None: + kwargs["token_type_ids"] = torch.cat( + ( + torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), + kwargs["token_type_ids"], + ), + dim=1, + ).long() + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def _prefix_tuning_forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + batch_size = _get_batch_size(input_ids, inputs_embeds) + past_key_values = self.get_prompt(batch_size) + fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) + kwargs.update( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "past_key_values": past_key_values, + } + ) + if "past_key_values" in fwd_params: + return self.base_model(labels=labels, **kwargs) + else: + transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) + fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) + if "past_key_values" not in fwd_params: + raise ValueError("Model does not support past key values which are required for prefix tuning.") + outputs = transformer_backbone_name(**kwargs) + sequence_output = outputs[0] + if "dropout" in [name for name, _ in list(self.base_model.named_children())]: + sequence_output = self.base_model.dropout(sequence_output) + logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class PeftModelForQuestionAnswering(PeftModel): + """ + Peft model for extractive question answering. + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + - **cls_layer_name** (`str`) -- The name of the classification layer. + + Example: + + ```py + >>> from transformers import AutoModelForQuestionAnswering + >>> from peft import PeftModelForQuestionAnswering, get_peft_config + + >>> config = { + ... "peft_type": "LORA", + ... "task_type": "QUESTION_ANS", + ... "inference_mode": False, + ... "r": 16, + ... "target_modules": ["query", "value"], + ... "lora_alpha": 32, + ... "lora_dropout": 0.05, + ... "fan_in_fan_out": False, + ... "bias": "none", + ... } + + >>> peft_config = get_peft_config(config) + >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForQuestionAnswering(model, peft_config) + >>> peft_model.print_trainable_parameters() + trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013 + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: + super().__init__(model, peft_config, adapter_name) + if self.modules_to_save is None: + self.modules_to_save = {"qa_outputs"} + else: + self.modules_to_save.update({"qa_outputs"}) + + for name, _ in self.base_model.named_children(): + if any(module_name in name for module_name in self.modules_to_save): + self.cls_layer_name = name + break + + # to make sure classifier layer is trainable + _set_trainable(self, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if not peft_config.is_prompt_learning: + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + start_positions=start_positions, + end_positions=end_positions, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "start_positions": start_positions, + "end_positions": end_positions, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) + else: + if kwargs.get("token_type_ids", None) is not None: + kwargs["token_type_ids"] = torch.cat( + ( + torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), + kwargs["token_type_ids"], + ), + dim=1, + ).long() + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) + + def _prefix_tuning_forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + batch_size = _get_batch_size(input_ids, inputs_embeds) + past_key_values = self.get_prompt(batch_size) + fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) + kwargs.update( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "inputs_embeds": inputs_embeds, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + "past_key_values": past_key_values, + } + ) + if "past_key_values" in fwd_params: + return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs) + else: + transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) + fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) + if "past_key_values" not in fwd_params: + raise ValueError("Model does not support past key values which are required for prefix tuning.") + outputs = transformer_backbone_name(**kwargs) + sequence_output = outputs[0] + if "dropout" in [name for name, _ in list(self.base_model.named_children())]: + sequence_output = self.base_model.dropout(sequence_output) + logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class PeftModelForFeatureExtraction(PeftModel): + """ + Peft model for extracting features/embeddings from transformer models + + Args: + model ([`~transformers.PreTrainedModel`]): Base transformer model. + peft_config ([`PeftConfig`]): Peft config. + + **Attributes**: + - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. + + Example: + + ```py + >>> from transformers import AutoModel + >>> from peft import PeftModelForFeatureExtraction, get_peft_config + + >>> config = { + ... "peft_type": "LORA", + ... "task_type": "FEATURE_EXTRACTION", + ... "inference_mode": False, + ... "r": 16, + ... "target_modules": ["query", "value"], + ... "lora_alpha": 32, + ... "lora_dropout": 0.05, + ... "fan_in_fan_out": False, + ... "bias": "none", + ... } + >>> peft_config = get_peft_config(config) + >>> model = AutoModel.from_pretrained("bert-base-cased") + >>> peft_model = PeftModelForFeatureExtraction(model, peft_config) + >>> peft_model.print_trainable_parameters() + ``` + """ + + def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"): + super().__init__(model, peft_config, adapter_name) + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, + ): + peft_config = self.active_peft_config + if not peft_config.is_prompt_learning: + if peft_config.peft_type == PeftType.POLY: + kwargs["task_ids"] = task_ids + + with self._enable_peft_forward_hooks(**kwargs): + kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} + return self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) + + batch_size = _get_batch_size(input_ids, inputs_embeds) + if attention_mask is not None: + # concat prompt attention mask + prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) + + if kwargs.get("position_ids", None) is not None: + warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") + kwargs["position_ids"] = None + if kwargs.get("token_type_ids", None) is not None: + warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") + kwargs["token_type_ids"] = None + kwargs.update( + { + "attention_mask": attention_mask, + "output_attentions": output_attentions, + "output_hidden_states": output_hidden_states, + "return_dict": return_dict, + } + ) + + if peft_config.peft_type == PeftType.PREFIX_TUNING: + past_key_values = self.get_prompt(batch_size) + return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + prompts = self.get_prompt(batch_size=batch_size) + prompts = prompts.to(inputs_embeds.dtype) + inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) + return self.base_model(inputs_embeds=inputs_embeds, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/peft/py.typed b/env-llmeval/lib/python3.10/site-packages/peft/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b47baa668177ec80b3ec142f1555c5b90f13dcca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + +# coding=utf-8 +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel +from .lora import LoraConfig, LoraModel, LoftQConfig +from .loha import LoHaConfig, LoHaModel +from .lokr import LoKrConfig, LoKrModel +from .ia3 import IA3Config, IA3Model +from .adalora import AdaLoraConfig, AdaLoraModel +from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType +from .prefix_tuning import PrefixEncoder, PrefixTuningConfig +from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit +from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit +from .oft import OFTConfig, OFTModel +from .mixed import MixedModel +from .poly import PolyConfig, PolyModel diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdb8a540bed454fd95633265e8cdceb3e792e3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .config import AdaLoraConfig +from .gptq import SVDQuantLinear +from .layer import AdaLoraLayer, RankAllocator, SVDLinear +from .model import AdaLoraModel + + +__all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"] + + +def __getattr__(name): + if (name == "SVDLinear8bitLt") and is_bnb_available(): + from .bnb import SVDLinear8bitLt + + return SVDLinear8bitLt + + if (name == "SVDLinear4bit") and is_bnb_4bit_available(): + from .bnb import SVDLinear4bit + + return SVDLinear4bit + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c16c4b2437462e388a6dcebd9438fac9841b1c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d23af483ec8e84b72159bc5ac5f3b5ffe469bec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..339f49d81c9b2cdfa9cd536c2d8b9afba4e89583 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..290f04310c0aace69554b832e5779f777cfd8a61 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e0fe7f904196d95efbf6107efa634dd38881706 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f3e911583451e7b116b1151de9c58037716192f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/__pycache__/model.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c32a815cef22b938b840a1b6013592a338936b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/bnb.py @@ -0,0 +1,145 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +import torch + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .layer import AdaLoraLayer + + +if is_bnb_available(): + + class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer): + # Low-rank matrix for SVD-based adaptation + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + result = self.base_layer(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + if x.dtype != torch.float32: + x = x.float() + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling / ranknum + # inplace operation on view is forbidden for MatMul8bitLtBackward, so avoid it + result = result + output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep + + +if is_bnb_4bit_available(): + + class SVDLinear4bit(torch.nn.Module, AdaLoraLayer): + # Low-rank matrix for SVD-based adaptation + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + result = self.base_layer(x, *args, **kwargs) + + if self.disable_adapters: + return result + + # As per Tim Dettmers, for 4bit, we need to defensively clone here. + # The reason is that in some cases, an error can occur that backprop + # does not work on a manipulated view. This issue may be solved with + # newer PyTorch versions but this would need extensive testing to be + # sure. + result = result.clone() + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + compute_dtype = lora_A.dtype + if x.dtype != compute_dtype: + x = x.to(compute_dtype) + + output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling / ranknum + result += output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/config.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/config.py new file mode 100644 index 0000000000000000000000000000000000000000..93905ff28b3fb868e71d4a266d5200c46fec7248 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/config.py @@ -0,0 +1,52 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Optional + +from peft.tuners.lora import LoraConfig +from peft.utils import PeftType + + +@dataclass +class AdaLoraConfig(LoraConfig): + """ + This is the configuration class to store the configuration of a [`~peft.AdaLora`]. + + Args: + target_r (`int`): The target average rank of incremental matrix. + init_r (`int`): The initial rank for each incremental matrix. + tinit (`int`): The steps of initial fine-tuning warmup. + tfinal (`int`): The step of final fine-tuning. + deltaT (`int`): The time internval between two budget allocations. + beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. + beta2 (`float`): The hyperparameter of EMA for undertainty quantification. + orth_reg_weight (`float`): The coefficient of orthogonal regularization. + total_step (`int`): The total training steps that should be specified before training. + rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. + """ + + target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) + init_r: int = field(default=12, metadata={"help": "Initial Lora matrix dimension."}) + tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) + tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) + deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) + beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) + beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) + orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) + total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) + rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) + + def __post_init__(self): + self.peft_type = PeftType.ADALORA diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py new file mode 100644 index 0000000000000000000000000000000000000000..910377c5db5908727ed4753fd15b24e68821ce00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/gptq.py @@ -0,0 +1,72 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from .layer import AdaLoraLayer + + +class SVDQuantLinear(torch.nn.Module, AdaLoraLayer): + def __init__( + self, + base_layer, + adapter_name, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + + # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter + # for backwards compatibility + self.quant_linear_module = base_layer + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + result = self.quant_linear_module(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + if x.dtype != torch.float32: + x = x.float() + + output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum + # TODO: here, the dtype conversion is applied on the *whole expression*, + # not the intermediate result, unlike for SVDLinear8bitLT and + # SVDLinear4bit, is that correct? + if requires_conversion: + output = output.to(expected_dtype) + result += output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/layer.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..0fb77aaadf7ac3a89d8d52538117bc28249d07a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/layer.py @@ -0,0 +1,347 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Any, List, Optional + +import torch +from torch import nn + +from peft.tuners.lora import LoraLayer +from peft.tuners.tuners_utils import check_adapters_to_merge +from peft.utils import transpose + + +class AdaLoraLayer(LoraLayer): + # List all names of layers that may contain adapter weights + # Note: ranknum doesn't need to be included as it is not an nn.Module + adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B") + # other_param_names is defined in LoraLayer + + def __init__(self, base_layer: nn.Module) -> None: + super().__init__(base_layer) + self.lora_E = nn.ParameterDict({}) + self.lora_A = nn.ParameterDict({}) + self.lora_B = nn.ParameterDict({}) + self.ranknum = nn.ParameterDict({}) + + def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): + if r < 0: + # note: r == 0 is allowed for AdaLora, see #1539 + raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}") + + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = nn.Identity() + + self.lora_dropout[adapter_name] = lora_dropout_layer + # Actual trainable parameters + # Right singular vectors + self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features)) + # Singular values + self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1)) + # Left singular vectors + self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r)) + # The current rank + self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False) + self.ranknum[adapter_name].data.fill_(float(r)) + self.ranknum[adapter_name].requires_grad = False + self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r) + if init_lora_weights: + self.reset_lora_parameters(adapter_name) + + if hasattr(self.get_base_layer(), "qweight"): + # QuantLinear + self.to(self.get_base_layer().qweight.device) + else: + self.to(self.get_base_layer().weight.device) + self.set_adapter(self.active_adapters) + + def reset_lora_parameters(self, adapter_name): + if adapter_name in self.lora_A.keys(): + nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02) + nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02) + nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02) + + +class SVDLinear(nn.Module, AdaLoraLayer): + # SVD-based adaptation by a dense layer + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + fan_in_fan_out: bool = False, + init_lora_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + AdaLoraLayer.__init__(self, base_layer) + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + + self.fan_in_fan_out = fan_in_fan_out + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + base_layer = self.get_base_layer() + if active_adapter in self.lora_A.keys(): + if safe_merge: + # Note that safe_merge will be slower than the normal merge + # because of the copy operation. + orig_weights = base_layer.weight.data.clone() + orig_weights += self.get_delta_weight(active_adapter) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = orig_weights + else: + base_layer.weight.data += self.get_delta_weight(active_adapter) + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.lora_A.keys(): + self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) + + def get_delta_weight(self, adapter) -> torch.Tensor: + return ( + transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out) + * self.scaling[adapter] + / (self.ranknum[adapter] + 1e-5) + ) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + lora_E = self.lora_E[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + ranknum = self.ranknum[active_adapter] + 1e-5 + + x = x.to(lora_A.dtype) + result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "adalora." + rep + + +class RankAllocator: + """ + The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY + + Args: + config ([`AdaLoraConfig`]): The configuration of the AdaLora model. + model: the model that we apply AdaLoRA to. + + """ + + def __init__(self, model, peft_config, adapter_name): + self.peft_config = peft_config + self.adapter_name = adapter_name + self.beta1 = peft_config.beta1 + self.beta2 = peft_config.beta2 + assert self.beta1 > 0 and self.beta1 < 1 + assert self.beta2 > 0 and self.beta2 < 1 + + self.reset_ipt() + self._set_budget_scheduler(model) + + def set_total_step(self, total_step): + self.peft_config.total_step = total_step + + def reset_ipt(self): + self.ipt = {} + self.exp_avg_ipt = {} + self.exp_avg_unc = {} + + def _set_budget_scheduler(self, model): + self.init_bgt = 0 + self.name_set = set() + for n, p in model.named_parameters(): + if f"lora_A.{self.adapter_name}" in n: + self.init_bgt += p.size(0) + self.name_set.add(n.replace("lora_A", "%s")) + self.name_set = sorted(self.name_set) + # The total final rank budget + self.target_bgt = self.peft_config.target_r * len(self.name_set) + + def budget_schedule(self, step: int): + tinit = self.peft_config.tinit + tfinal = self.peft_config.tfinal + total_step = self.peft_config.total_step + # Initial warmup + if step <= tinit: + budget = self.init_bgt + mask_ind = False + # Final fine-tuning + elif step > total_step - tfinal: + budget = self.target_bgt + mask_ind = True + else: + # Budget decreasing with a cubic scheduler + mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit) + budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt) + mask_ind = True if step % self.peft_config.deltaT == 0 else False + return budget, mask_ind + + def update_ipt(self, model): + # Update the sensitivity and uncertainty for every weight + for n, p in model.named_parameters(): + if "lora_" in n and self.adapter_name in n: + if n not in self.ipt: + self.ipt[n] = torch.zeros_like(p) + self.exp_avg_ipt[n] = torch.zeros_like(p) + self.exp_avg_unc[n] = torch.zeros_like(p) + with torch.no_grad(): + self.ipt[n] = (p * p.grad).abs().detach() + # Sensitivity smoothing + self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n] + # Uncertainty quantification + self.exp_avg_unc[n] = ( + self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs() + ) + + def _element_score(self, n): + return self.exp_avg_ipt[n] * self.exp_avg_unc[n] + + def _combine_ipt(self, ipt_E, ipt_AB): + ipt_AB = ipt_AB.sum(dim=1, keepdim=False) + sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1) + return sum_ipt + + def mask_to_budget(self, model, budget): + value_ipt = {} + vector_ipt = {} + triplet_ipt = {} + # Get the importance score for A, E, B + for n, p in model.named_parameters(): + if f"lora_A.{self.adapter_name}" in n: + entry_ipt = self._element_score(n) + comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True) + name_m = n.replace("lora_A", "%s") + if name_m not in vector_ipt: + vector_ipt[name_m] = [comb_ipt] + else: + vector_ipt[name_m].append(comb_ipt) + if f"lora_B.{self.adapter_name}" in n: + entry_ipt = self._element_score(n) + comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1) + name_m = n.replace("lora_B", "%s") + if name_m not in vector_ipt: + vector_ipt[name_m] = [comb_ipt] + else: + vector_ipt[name_m].append(comb_ipt) + if f"lora_E.{self.adapter_name}" in n: + entry_ipt = self._element_score(n) + name_m = n.replace("lora_E", "%s") + value_ipt[name_m] = entry_ipt + + all_score = [] + # Calculate the score for each triplet + for name_m in vector_ipt: + ipt_E = value_ipt[name_m] + ipt_AB = torch.cat(vector_ipt[name_m], dim=1) + sum_ipt = self._combine_ipt(ipt_E, ipt_AB) + name_E = name_m % "lora_E" + triplet_ipt[name_E] = sum_ipt.view(-1, 1) + all_score.append(sum_ipt.view(-1)) + + # Get the threshold by ranking ipt + mask_threshold = torch.kthvalue( + torch.cat(all_score), + k=self.init_bgt - budget, + )[0].item() + + rank_pattern = {} + # Mask the unimportant triplets + with torch.no_grad(): + for n, p in model.named_parameters(): + if f"lora_E.{self.adapter_name}" in n: + p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0) + rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist() + return rank_pattern + + def update_and_allocate(self, model, global_step, force_mask=False): + # # Update the importance score and allocate the budget + if global_step < self.peft_config.total_step - self.peft_config.tfinal: + self.update_ipt(model) + budget, mask_ind = self.budget_schedule(global_step) + # Allocate the budget according to importance scores + if mask_ind or force_mask: + rank_pattern = self.mask_to_budget(model, budget) + else: + rank_pattern = None + return budget, rank_pattern + + def mask_using_rank_pattern(self, model, rank_pattern): + # Mask the unimportant triplets + is_adapter_name_truncated = False + if self.adapter_name not in next(iter(rank_pattern.keys())): + is_adapter_name_truncated = True + + with torch.no_grad(): + for n, p in model.named_parameters(): + if f"lora_E.{self.adapter_name}" in n: + key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "") + mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device) + p.masked_fill_(~mask.bool(), 0.0) diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/model.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/model.py new file mode 100644 index 0000000000000000000000000000000000000000..bf334b39ce6caf74c05e81b180fea31363a2a62e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/adalora/model.py @@ -0,0 +1,346 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +import torch +from transformers.pytorch_utils import Conv1D + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available +from peft.tuners.lora import LoraConfig, LoraModel +from peft.tuners.tuners_utils import BaseTunerLayer +from peft.utils import ( + TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, + _freeze_adapter, + _get_submodules, + get_auto_gptq_quant_linear, + get_quantization_config, +) + +from .gptq import SVDQuantLinear +from .layer import AdaLoraLayer, RankAllocator, SVDLinear + + +class AdaLoraModel(LoraModel): + """ + Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: + https://openreview.net/forum?id=lq62uWRJjiY + + Args: + model ([`transformers.PreTrainedModel`]): The model to be adapted. + config ([`AdaLoraConfig`]): The configuration of the AdaLora model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The AdaLora model. + + Example:: + + >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig + >>> config = AdaLoraConfig( + peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"], + lora_dropout=0.01, + ) + >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default") + + **Attributes**: + - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. + - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. + """ + + # Note: don't redefine prefix here, it should be inherited from LoraModel + + def __init__(self, model, config, adapter_name): + super().__init__(model, config, adapter_name) + + traininable_mode_counter = 0 + for config in self.peft_config.values(): + if not config.inference_mode: + traininable_mode_counter += 1 + + if traininable_mode_counter > 1: + raise ValueError( + "AdaLoraModel supports only 1 trainable adapter. " + "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." + ) + + if self.peft_config[adapter_name].inference_mode: + _freeze_adapter(self.model, adapter_name) + else: + self.trainable_adapter_name = adapter_name + self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name) + + def _check_new_adapter_config(self, config: LoraConfig) -> None: + """ + A helper method to check the config when a new adapter is being added. + + Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. + + """ + super()._check_new_adapter_config(config) + + traininable_mode_counter = 0 + for config_ in self.peft_config.values(): + if not config_.inference_mode: + traininable_mode_counter += 1 + + if traininable_mode_counter > 1: + raise ValueError( + f"{self.__class__.__name__} supports only 1 trainable adapter. " + "When using multiple adapters, set inference_mode to True for all adapters except the one " + "you want to train." + ) + + def _create_and_replace( + self, + lora_config, + adapter_name, + target, + target_name, + parent, + current_key, + ): + kwargs = { + "r": lora_config.init_r, + "lora_alpha": lora_config.lora_alpha, + "lora_dropout": lora_config.lora_dropout, + "fan_in_fan_out": lora_config.fan_in_fan_out, + "init_lora_weights": lora_config.init_lora_weights, + "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), + "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), + } + if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available(): + raise ImportError( + "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. " + "You can install it with `pip install bitsandbytes`." + ) + + quantization_config = get_quantization_config(self.model, method="gptq") + if quantization_config is not None: + kwargs["gptq_quantization_config"] = quantization_config + + # If it is not an AdaLoraLayer, create a new module, else update it with new adapters + if not isinstance(target, AdaLoraLayer): + new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs) + if adapter_name != self.active_adapter: + # adding an additional adapter: it is not automatically trainable + new_module.requires_grad_(False) + self._replace_module(parent, target_name, new_module, target) + else: + target.update_layer( + adapter_name, + lora_config.init_r, + lora_config.lora_alpha, + lora_config.lora_dropout, + lora_config.init_lora_weights, + ) + + @staticmethod + def _create_new_module(lora_config, adapter_name, target, **kwargs): + # avoid eager bnb import + if is_bnb_available(): + import bitsandbytes as bnb + + from .bnb import SVDLinear8bitLt + if is_bnb_4bit_available(): + from .bnb import SVDLinear4bit + + gptq_quantization_config = kwargs.get("gptq_quantization_config", None) + AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) + + loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) + loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): + kwargs.update( + { + "has_fp16_weights": target_base_layer.state.has_fp16_weights, + "memory_efficient_backward": target_base_layer.state.memory_efficient_backward, + "threshold": target_base_layer.state.threshold, + "index": target_base_layer.index, + } + ) + new_module = SVDLinear8bitLt(target, adapter_name, **kwargs) + elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): + fourbit_kwargs = kwargs.copy() + fourbit_kwargs.update( + { + "compute_dtype": target_base_layer.compute_dtype, + "compress_statistics": target_base_layer.weight.compress_statistics, + "quant_type": target_base_layer.weight.quant_type, + } + ) + new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs) + elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear): + new_module = SVDQuantLinear(target, adapter_name, **kwargs) + else: + if isinstance(target_base_layer, torch.nn.Linear): + if kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " + "Setting fan_in_fan_out to False." + ) + kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False + elif isinstance(target_base_layer, Conv1D): + if not kwargs["fan_in_fan_out"]: + warnings.warn( + "fan_in_fan_out is set to False but the target module is `Conv1D`. " + "Setting fan_in_fan_out to True." + ) + kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True + else: + raise ValueError( + f"Target module {target} is not supported. " + f"Currently, only `torch.nn.Linear` and `Conv1D` are supported." + ) + new_module = SVDLinear(target, adapter_name, **kwargs) + + return new_module + + @staticmethod + def _prepare_adapter_config(peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[ + model_config["model_type"] + ] + return peft_config + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def forward(self, *args, **kwargs): + outputs = self.model.forward(*args, **kwargs) + + if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor): + # Calculate the orthogonal regularization + orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight + + if orth_reg_weight <= 0: + raise ValueError("orth_reg_weight should be greater than 0. ") + + regu_loss = 0 + num_param = 0 + for n, p in self.model.named_parameters(): + if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n: + para_cov = p @ p.T if "lora_A" in n else p.T @ p + I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741 + I.requires_grad = False + num_param += 1 + regu_loss += torch.norm(para_cov - I, p="fro") + if num_param > 0: + regu_loss = regu_loss / num_param + else: + regu_loss = 0 + outputs.loss += orth_reg_weight * regu_loss + return outputs + + def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name): + lora_config = self.peft_config[adapter_name] + for name, rank_idx in rank_pattern.items(): + if isinstance(rank_idx, list): + rank = sum(rank_idx) + elif isinstance(rank_idx, torch.Tensor): + rank_idx = rank_idx.view(-1) + rank = rank_idx.sum().item() + else: + raise ValueError("Unexpected type of rank_idx") + key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) + _, target, _ = _get_submodules(self.model, key) + lora_E_weights = target.lora_E[adapter_name][rank_idx] + lora_A_weights = target.lora_A[adapter_name][rank_idx] + lora_B_weights = target.lora_B[adapter_name][:, rank_idx] + ranknum = target.ranknum[adapter_name] + target.update_layer( + adapter_name, + rank, + lora_config.lora_alpha, + lora_config.lora_dropout, + lora_config.init_lora_weights, + ) + with torch.no_grad(): + if rank > 0: + target.lora_E[adapter_name].copy_(lora_E_weights) + target.lora_A[adapter_name].copy_(lora_A_weights) + target.lora_B[adapter_name].copy_(lora_B_weights) + # The scaling is exactly as the previous + target.ranknum[adapter_name].copy_(ranknum) + + def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name): + for name, rank_idx in rank_pattern.items(): + rank = sum(rank_idx) + prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) + for layer in ["lora_E", "lora_A", "lora_B"]: + key = f"base_model.model.{prefix}.{layer}.{adapter_name}" + if layer != "lora_B": + state_dict[key] = ( + state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key] + ) + else: + state_dict[key] = ( + state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key] + ) + return state_dict + + def update_and_allocate(self, global_step): + """ + This method updates Adalora budget and mask. + + This should be called in every training step after `loss.backward()` and before `zero_grad()`. + + `tinit`, `tfinal` and `deltaT` are handled with in the method. + + Args: + global_step (`int`): The current training step, it is used to calculate adalora budget. + + Example: + + ```python + >>> loss = model(**input).loss + >>> loss.backward() + >>> optimizer.step() + >>> model.base_model.update_and_allocate(i_step) + >>> optimizer.zero_grad() + ``` + """ + lora_config = self.peft_config[self.trainable_adapter_name] + # Update the importance score and allocate the budget + if global_step < lora_config.total_step - lora_config.tfinal: + _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step) + if rank_pattern: + lora_config.rank_pattern = rank_pattern + # Finalize the budget allocation + elif global_step == lora_config.total_step - lora_config.tfinal: + _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True) + # for some reason, this freezes the trainable parameters and nothing gets updates + # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name) + lora_config.rank_pattern = rank_pattern + self.rankallocator.reset_ipt() + # Currently using inefficient way to mask the unimportant weights using the rank pattern + # due to problem mentioned above + elif global_step > lora_config.total_step - lora_config.tfinal: + self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern) + # Pass the function and do forward propagation + else: + return None diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..763d0133a285eda4cf2aa68f624ea2c1b3a447a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .config import IA3Config +from .layer import Conv2d, IA3Layer, Linear +from .model import IA3Model + + +__all__ = ["Conv2d", "IA3Config", "IA3Layer", "IA3Model", "Linear"] + + +def __getattr__(name): + if (name == "Linear8bitLt") and is_bnb_available(): + from .bnb import Linear8bitLt + + return Linear8bitLt + + if (name == "Linear4bit") and is_bnb_4bit_available(): + from .bnb import Linear4bit + + return Linear4bit + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py new file mode 100644 index 0000000000000000000000000000000000000000..628e3ce7229528a0b3157da349b2b34153573c51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/bnb.py @@ -0,0 +1,129 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +import torch + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .layer import IA3Layer + + +if is_bnb_available(): + + class Linear8bitLt(torch.nn.Module, IA3Layer): + # (IA)^3 implemented in a dense layer + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + is_feedforward: bool, + init_ia3_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + self._active_adapter = adapter_name + self.update_layer(adapter_name, init_ia3_weights) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + if self.disable_adapters: + return self.base_layer(x) + + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + ia3_scaling *= self.ia3_l[active_adapter].flatten() + + requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) + if requires_conversion: + x = x.float() + if self.is_feedforward: + result = self.base_layer(x * ia3_scaling) + expected_dtype = result.dtype + else: + result = self.base_layer(x) + expected_dtype = result.dtype + result = result * ia3_scaling + + if requires_conversion: + result = result.to(expected_dtype) + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "ia3." + rep + + +if is_bnb_4bit_available(): + + class Linear4bit(torch.nn.Module, IA3Layer): + # IA3 implemented in a dense layer + def __init__( + self, + base_layer: torch.nn.Module, + adapter_name: str, + is_feedforward: bool, + init_ia3_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + + # Freezing the pre-trained weight matrix + self.get_base_layer().weight.requires_grad = False + self._active_adapter = adapter_name + self.update_layer(adapter_name, init_ia3_weights) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + # note: no check for self.merged because merging is not supported (yet) + if self.disable_adapters: + return self.base_layer(x) + + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + ia3_scaling *= self.ia3_l[active_adapter].flatten() + + requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) + if requires_conversion: + x = x.float() + if self.is_feedforward: + result = self.base_layer(x * ia3_scaling) + expected_dtype = result.dtype + else: + result = self.base_layer(x) + expected_dtype = result.dtype + result = result * ia3_scaling + + result = result.clone() + # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch. + # This has been duplicated here. + + if requires_conversion: + result = result.to(expected_dtype) + + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "ia3." + rep diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/layer.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..9ea04e6873f857bbda6f5828a3fb5095a118c7cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/ia3/layer.py @@ -0,0 +1,307 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Any, List, Optional + +import torch +import torch.nn as nn +from transformers.pytorch_utils import Conv1D + +from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge +from peft.utils import transpose + + +class IA3Layer(BaseTunerLayer): + # All names of layers that may contain adapter weights + adapter_layer_names = ("ia3_l",) + + def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None: + self.base_layer = base_layer + self.ia3_l = nn.ParameterDict({}) + # Mark the weight as unmerged + self._disable_adapters = False + self.merged_adapters = [] + self.is_feedforward = is_feedforward + + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + in_features, out_features = base_layer.in_features, base_layer.out_features + elif isinstance(base_layer, nn.Conv2d): + in_features, out_features = base_layer.in_channels, base_layer.out_channels + elif isinstance(base_layer, nn.Embedding): + in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim + elif isinstance(base_layer, Conv1D): + in_features, out_features = ( + base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape + ) + else: + raise ValueError(f"Unsupported layer type {type(base_layer)}") + self.in_features = in_features + self.out_features = out_features + + def update_layer(self, adapter_name, init_ia3_weights): + # This code works for linear layers, override for other layer types + # Actual trainable parameters + if self.is_feedforward: + weight = torch.randn((1, self.in_features)) + else: + weight = torch.randn((self.out_features, 1)) + self.ia3_l[adapter_name] = nn.Parameter(weight) + if init_ia3_weights: + self.reset_ia3_parameters(adapter_name) + self.to(self.get_base_layer().weight.device) + self.set_adapter(self.active_adapters) + + def reset_ia3_parameters(self, adapter_name): + if adapter_name in self.ia3_l.keys(): + # initialize learned vector with torch.ones + nn.init.constant_(self.ia3_l[adapter_name], 1.0) + + +class Linear(nn.Module, IA3Layer): + # (IA)^3 implemented in a dense layer + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) + is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer + is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later + init_ia3_weights: bool = True, # whether to initialize IA3 weights + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + self.fan_in_fan_out = fan_in_fan_out + self.is_target_conv_1d_layer = is_target_conv_1d_layer + self._active_adapter = adapter_name + self.update_layer(adapter_name, init_ia3_weights) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + if safe_merge: + orig_weights = base_layer.weight.data + orig_weights = torch.mul(orig_weights, ia3_l) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + base_layer.weight.data = orig_weights + else: + base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) + + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + + warnings.warn("Unmerge result can be inaccurate for (IA)^3.") + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + # Add tolerace to avoid division by zero + ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8 + base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + dtype = previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + dtype = self.ia3_l[active_adapter].dtype + ia3_scaling *= self.ia3_l[active_adapter].flatten() + + if self.is_feedforward: + x = x.to(dtype) + # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype + # e.g. bf16 vs fp32. Is that okay? + interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) + result = self.base_layer(interm, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + result = result.to(dtype) * ia3_scaling + + result = result.to(previous_dtype) + return result + + +class Conv2d(nn.Module, IA3Layer): + def __init__( + self, + base_layer: nn.Module, + adapter_name: str, + fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) + is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer + init_ia3_weights: bool = True, + **kwargs, + ) -> None: + super().__init__() + IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) + self.fan_in_fan_out = fan_in_fan_out + self._active_adapter = adapter_name + + self.update_layer(adapter_name, init_ia3_weights) + + def update_layer(self, adapter_name, init_ia3_weights): + # Actual trainable parameters + if self.is_feedforward: + weight = torch.randn((1, self.in_features, 1, 1)) + else: + weight = torch.randn((1, self.out_features, 1, 1)) + self.ia3_l[adapter_name] = nn.Parameter(weight) + if init_ia3_weights: + self.reset_ia3_parameters(adapter_name) + self.to(self.get_base_layer().weight.device) + self.set_adapter(self.active_adapters) + + def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If True, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + ia3_scaling = self.ia3_l[active_adapter].data + if not self.is_feedforward: + ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) + + if safe_merge: + output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone() + + if not torch.isfinite(output_weight).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = output_weight + else: + base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) + + self.merged_adapters.append(active_adapter) + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + + warnings.warn("Unmerge result can be inaccurate for (IA)^3.") + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self.ia3_l.keys(): + base_layer = self.get_base_layer() + # divide by (IA)^3 vector. Add tolerace to avoid division by zero + ia3_scaling = self.ia3_l[active_adapter].data + if not self.is_feedforward: + ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) + base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8) + + if not self.is_feedforward and (base_layer.bias is not None): + scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) + base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) + + def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + dtype = previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + ia3_scaling = 1 + for active_adapter in self.active_adapters: + if active_adapter not in self.ia3_l.keys(): + continue + dtype = self.ia3_l[active_adapter].dtype + ia3_scaling *= self.ia3_l[active_adapter] + + if self.is_feedforward: + x = x.to(dtype) + # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype + # e.g. bf16 vs fp32. Is that okay? + interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) + result = self.base_layer(interm, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + result = result.to(dtype) * ia3_scaling + + result = result.to(previous_dtype) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..39c750ae8e8c2c8ac567f76c6ea70b638f29cb24 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/lycoris_utils.py @@ -0,0 +1,428 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import warnings +from abc import abstractmethod +from dataclasses import dataclass, field +from typing import Any, Optional, Union + +import torch +import torch.nn as nn +from tqdm import tqdm + +from peft.config import PeftConfig +from peft.utils import ( + ModulesToSaveWrapper, + _get_submodules, +) + +from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists + + +@dataclass +class LycorisConfig(PeftConfig): + r""" + A base config for LyCORIS like adapters + """ + + rank_pattern: Optional[dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " + "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" + ) + }, + ) + alpha_pattern: Optional[dict] = field( + default_factory=dict, + metadata={ + "help": ( + "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. " + "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" + ) + }, + ) + + +class LycorisLayer(BaseTunerLayer): + r""" + A base layer for LyCORIS like adapters + """ + + # adapter_layer_names needs to be defined on the child class + other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout") + + def __init__(self, base_layer: nn.Module) -> None: + self.base_layer = base_layer + self.r = {} + self.alpha = {} + self.scaling = {} + self.rank_dropout = {} + self.module_dropout = {} + + # Tuner info + self._disable_adapters = False + self.merged_adapters = [] + + @property + @abstractmethod + def _available_adapters(self) -> set[str]: + ... + + def _init_empty_weights(self, cls, *args, **kwargs) -> None: + # A helper method that allows to initialize the layer of the given class without spending time to initialize the + # model weights. The implementation is inspired by + # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used + # directly. + # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of + # omitting important logic inside that __init__. + kwargs = kwargs.copy() + final_device = kwargs.pop("device", "cpu") + cls.__init__(self, *args, device="meta", **kwargs) + self.to_empty(device=final_device) + + @abstractmethod + def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs): + ... + + # TODO: refactor LoRA to use the same approach + @abstractmethod + def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + """Activations added on top of the base layer output (i.e. after the base layer forward pass)""" + + @abstractmethod + def get_delta_weight(self, adapter_name: str) -> torch.Tensor: + ... + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + """ + Merge the active adapter weights into the base weights + + Args: + safe_merge (`bool`, *optional*): + If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If `None`, all active adapters will be merged. + Defaults to `None`. + """ + adapter_names = check_adapters_to_merge(self, adapter_names) + if not adapter_names: + # no adapter to merge + return + + for active_adapter in adapter_names: + if active_adapter in self._available_adapters: + base_layer = self.get_base_layer() + if safe_merge: + orig_weights = base_layer.weight.data.clone() + orig_weights += self.get_delta_weight(active_adapter) + + if not torch.isfinite(orig_weights).all(): + raise ValueError( + f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" + ) + + base_layer.weight.data = orig_weights + else: + base_layer.weight.data += self.get_delta_weight(active_adapter) + self.merged_adapters.append(active_adapter) + + @abstractmethod + def reset_adapter_parameters(self, adapter_name: str): + ... + + def set_scale(self, adapter, scale): + if adapter not in self._available_adapters: + # Ignore the case where the adapter is not in the layer + return + self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter] + + def scale_layer(self, scale: float) -> None: + if scale == 1: + return + + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + self.scaling[active_adapter] *= scale + + def unmerge(self) -> None: + """ + This method unmerges all merged adapter layers from the base weights. + """ + if not self.merged: + warnings.warn("Already unmerged. Nothing to do.") + return + while len(self.merged_adapters) > 0: + active_adapter = self.merged_adapters.pop() + if active_adapter in self._available_adapters: + self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) + + def unscale_layer(self, scale=None) -> None: + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + if scale is None: + self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter] + else: + self.scaling[active_adapter] /= scale + + @abstractmethod + def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs): + ... + + +class LycorisTuner(BaseTuner): + r""" + A base tuner for LyCORIS like adapters + """ + + prefix: str + layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]] + + def __init__(self, model, config, adapter_name): + super().__init__(model, config, adapter_name) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + @staticmethod + def _check_target_module_exists(config, key): + return check_target_module_exists(config, key) + + @abstractmethod + def _create_and_replace( + self, + config: LycorisConfig, + adapter_name: str, + target: Union[LycorisLayer, nn.Module], + target_name, + parent, + current_key, + ): + ... + + @classmethod + def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer: + # Find corresponding subtype of provided target module + new_module_cls = None + for subtype, target_cls in cls.layers_mapping.items(): + if ( + hasattr(target, "base_layer") + and isinstance(target.get_base_layer(), subtype) + and isinstance(target, BaseTunerLayer) + ): + # nested tuner layers are allowed + new_module_cls = target_cls + break + elif isinstance(target, subtype): + new_module_cls = target_cls + break + + # We didn't find corresponding type, so adapter for this layer is not supported + if new_module_cls is None: + supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) + raise ValueError( + f"Target module of type {type(target)} not supported, " + f"currently only adapters for {supported_modules} are supported" + ) + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if isinstance(target_base_layer, torch.nn.Conv2d): + new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) + elif isinstance(target_base_layer, torch.nn.Linear): + new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) + else: + supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) + raise ValueError( + f"Target module of type {type(target)} not supported, " + f"currently only adapters for {supported_modules} are supported" + ) + + return new_module + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if self.prefix not in n: + p.requires_grad = False + + @staticmethod + def _prepare_adapter_config(peft_config, model_config): + if peft_config.target_modules is None: + raise ValueError("Please specify `target_modules` in `peft_config`") + return peft_config + + def _replace_module(self, parent, child_name, new_module, child): + setattr(parent, child_name, new_module) + # It's not necessary to set requires_grad here, as that is handled by + # _mark_only_adapters_as_trainable + + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if self.prefix in name: + module.to(child.weight.device) + + def _set_adapter_layers(self, enabled=True): + for module in self.model.modules(): + if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def _unload_and_optionally_merge( + self, + merge: bool = True, + progressbar: bool = False, + safe_merge: bool = False, + adapter_names: Optional[list[str]] = None, + ): + if merge: + if getattr(self.model, "quantization_method", None) == "gptq": + raise ValueError("Cannot merge LOHA layers when the model is gptq quantized") + + self._unloading_checks(adapter_names) + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + desc = "Unloading " + ("and merging " if merge else "") + "model" + for key in tqdm(key_list, disable=not progressbar, desc=desc): + try: + parent, target, target_name = _get_submodules(self.model, key) + except AttributeError: + continue + + if hasattr(target, "base_layer"): + if merge: + target.merge(safe_merge=safe_merge, adapter_names=adapter_names) + self._replace_module(parent, target_name, target.get_base_layer(), target) + elif isinstance(target, ModulesToSaveWrapper): + # save any additional trainable modules part of `modules_to_save` + new_module = target.modules_to_save[target.active_adapter] + if hasattr(new_module, "base_layer"): + # check if the module is itself a tuner layer + if merge: + new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) + new_module = new_module.get_base_layer() + setattr(parent, target_name, new_module) + + return self.model + + def enable_adapter_layers(self) -> None: + """Enable all adapters. + + Call this if you have previously disabled all adapters and want to re-enable them. + """ + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self) -> None: + """Disable all adapters. + + When disabling all adapters, the model output corresponds to the output of the base model. + """ + self._set_adapter_layers(enabled=False) + + def merge_and_unload( + self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None + ) -> torch.nn.Module: + r""" + This method merges the adapter layers into the base model. This is needed if someone wants to use the base + model as a standalone model. + + Args: + progressbar (`bool`): + whether to show a progressbar indicating the unload and merge process + safe_merge (`bool`): + whether to activate the safe merging check to check if there is any potential Nan in the adapter + weights + adapter_names (`List[str]`, *optional*): + The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults + to `None`. + + """ + return self._unload_and_optionally_merge( + progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names + ) + + def unload(self) -> torch.nn.Module: + """ + Gets back the base model by removing all the lora modules without merging. This gives back the original base + model. + """ + return self._unload_and_optionally_merge(merge=False) + + def set_adapter(self, adapter_name: str | list[str]) -> None: + """Set the active adapter(s). + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. + """ + for module in self.model.modules(): + if isinstance(module, LycorisLayer): + if module.merged: + warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") + module.unmerge() + module.set_adapter(adapter_name) + + def delete_adapter(self, adapter_name: str) -> None: + """ + Deletes an existing adapter. + + Args: + adapter_name (`str`): Name of the adapter to be deleted. + """ + if adapter_name not in list(self.peft_config.keys()): + raise ValueError(f"Adapter {adapter_name} does not exist") + del self.peft_config[adapter_name] + + key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] + new_adapter = None + for key in key_list: + _, target, _ = _get_submodules(self.model, key) + if isinstance(target, LycorisLayer): + target.delete_adapter(adapter_name) + if new_adapter is None: + new_adapter = target.active_adapters[:] + + self.active_adapter = new_adapter or [] diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..214f7722486485bea4ede3b5c1a433aac447dd2b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit +from .model import MultitaskPromptEmbedding + + +__all__ = ["MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "MultitaskPromptEmbedding"] diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aed2762bfb7a8faf7cf65fe9ca32a89d2407933d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a982743243f29541d799ba93e0705f9c3976ec00 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06952c8f79db273e68591bf7f7c43fe1be56078b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..67a3c323a299063900d42a6e464672898b13be7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/config.py @@ -0,0 +1,61 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +from dataclasses import dataclass, field +from typing import Optional, Union + +from peft.tuners.prompt_tuning import PromptTuningConfig +from peft.utils import PeftType + + +class MultitaskPromptTuningInit(str, enum.Enum): + # initialize prompt with text + TEXT = "TEXT" + # initialize prompt with random matrix + RANDOM = "RANDOM" + # average the prefix and column matrices obtained during source training + AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS" + # pick prefix and column matrices for a particular task obtained during source training + EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK" + # only use the prompt embeddings trained during source training + ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED" + + +@dataclass +class MultitaskPromptTuningConfig(PromptTuningConfig): + prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field( + default=MultitaskPromptTuningInit.RANDOM, + metadata={ + "help": ( + "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, " + "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED." + ), + }, + ) + prompt_tuning_init_state_dict_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The path of source state dict. This is required when training the downstream target prompt from " + "the pretrained source prompt" + ), + }, + ) + prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"}) + num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"}) + num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"}) + + def __post_init__(self): + self.peft_type = PeftType.MULTITASK_PROMPT_TUNING diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py new file mode 100644 index 0000000000000000000000000000000000000000..66498c9f00deddbf3259a4f1095a0c5d4202b0d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/multitask_prompt_tuning/model.py @@ -0,0 +1,115 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from peft.tuners.prompt_tuning import PromptEmbedding +from peft.utils import TaskType + +from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit + + +# This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and +# constitutes the work done at MIT-IBM Watson Research Lab. + + +class MultitaskPromptEmbedding(PromptEmbedding): + def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings): + super().__init__(config, word_embeddings) + + self.num_tasks = config.num_tasks + self.num_ranks = config.num_ranks + self.num_virtual_tokens = config.num_virtual_tokens + + self.num_transformer_submodules = config.num_transformer_submodules + if self.num_transformer_submodules is None: + self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 + + self.token_dim = config.token_dim + + total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules + + self.prefix_task_cols = torch.nn.Parameter( + torch.normal( + mean=0, + std=0.02, + size=(self.num_tasks, total_virtual_tokens, self.num_ranks), + ) + ) + self.prefix_task_rows = torch.nn.Parameter( + torch.normal( + mean=0, + std=0.02, + size=(self.num_tasks, self.num_ranks, self.token_dim), + ) + ) + + if config.prompt_tuning_init in [ + MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, + MultitaskPromptTuningInit.EXACT_SOURCE_TASK, + MultitaskPromptTuningInit.ONLY_SOURCE_SHARED, + ]: + if config.prompt_tuning_init_state_dict_path is None: + raise ValueError( + f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} " + "init method" + ) + + # TODO: There should be an option for safetensors + state_dict: dict = torch.load( + config.prompt_tuning_init_state_dict_path, + map_location=word_embeddings.weight.device, + ) + + if config.prompt_tuning_init in [ + MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, + MultitaskPromptTuningInit.EXACT_SOURCE_TASK, + ]: + prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"] + prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"] + + if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS: + prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True) + prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True) + elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK: + prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0) + prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0) + + state_dict = { + "embedding.weight": state_dict["prompt_embeddings"], + "prefix_task_cols": prefix_task_cols_, + "prefix_task_rows": prefix_task_rows_, + } + + self.load_state_dict(state_dict, strict=True) + elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED: + state_dict = { + "embedding.weight": state_dict["prompt_embeddings"], + } + + self.load_state_dict(state_dict, strict=False) + + def forward(self, indices, task_ids): + if task_ids is None: + raise ValueError("task_ids cannot be None") + + prompt_embeddings = self.embedding(indices) + + task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids) + task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids) + task_prompts = torch.matmul(task_cols, task_rows) + + prompt_embeddings *= task_prompts + + return prompt_embeddings diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde5f3f30a851d4ab7ee5e7eba19539d8e6f56aa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4799d1cbb24b016e08e2a84f0793c15849019b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/oft/__pycache__/layer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f368695edbd7fb7bb3c68d9e918bd16752b873 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import PolyConfig +from .layer import Linear, PolyLayer +from .model import PolyModel + + +__all__ = ["Linear", "PolyConfig", "PolyLayer", "PolyModel"] diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e5c507c74b33c686d1b9c56771f79594a17d38b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bef3b9d3b6556da75ec381d0df873217a1dba42 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0db7aaa08bc626ba937c68ddb506d30135980010 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/layer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ca74cdbfc4778f0579308484d29ce5c7fdc385 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/model.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/router.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/router.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f9c8cb36703461fdcc890bd632730ecd21353a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/__pycache__/router.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/config.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/config.py new file mode 100644 index 0000000000000000000000000000000000000000..3abbc93b022dd53b5fd5c373b029dba9084a0b9b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/config.py @@ -0,0 +1,89 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import List, Literal, Optional, Union + +from peft.config import PeftConfig +from peft.utils import PeftType + + +@dataclass +class PolyConfig(PeftConfig): + """ + This is the configuration class to store the configuration of a [`PolyModel`]. + - [Polytropon (Poly)](https://arxiv.org/abs/2202.13914) + - [Multi-Head Routing (MHR)](https://arxiv.org/abs/2211.03831) + + Args: + r (`int`): Attention dimension of each Lora in Poly. + target_modules (`Union[List[str],str]`): The names of the modules to apply Poly to. + modules_to_save (`List[str]`): List of modules apart from Poly layers to be set as trainable + and saved in the final checkpoint. + init_weights (bool): Whether to perform initialization of Poly weights. + poly_type (`Literal["poly"]`): The variant of the Poly module to use. Currently, only "poly" + is supported. + n_tasks (`int`): The number of tasks in a multitasking scenario. + n_skills (`int`): The number of skills (LoRA) in each Poly layer. + n_splits (`int`): The number of splits within each LoRA of a Poly layer. A value greater + than 1 indicates the use of Multi-Head Routing (MHR). + """ + + r: int = field(default=8, metadata={"help": "Lora attention dimension"}) + target_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": "List of module names or regex expression of the module names to replace with Poly." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " + }, + ) + modules_to_save: Optional[List[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + init_weights: bool = field( + default=True, + metadata={ + "help": ( + "Whether to initialize the weights of the Poly layers with their default initialization. Don't change " + "this setting, except if you know exactly what you're doing." + ), + }, + ) + poly_type: Literal["poly"] = field( + default="poly", + metadata={"help": 'Type of Poly modules to be used. Currently only "poly" is supported.'}, + ) + n_tasks: int = field( + default=1, + metadata={"help": "Number of tasks in multitasking scenario."}, + ) + n_skills: int = field( + default=4, + metadata={"help": "Number of skills (LoRA) in each Poly layer."}, + ) + n_splits: int = field( + default=1, + metadata={"help": "Number of splits within each LoRA of a Poly layer."}, + ) + + def __post_init__(self): + self.peft_type = PeftType.POLY + self.target_modules = ( + set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules + ) diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/layer.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..debb40beee29b1cfdf2072a293d4c61042280227 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/layer.py @@ -0,0 +1,171 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Any + +import torch +import torch.nn as nn + +from peft.tuners.tuners_utils import BaseTunerLayer + +from .config import PolyConfig +from .router import get_router + + +class PolyLayer(BaseTunerLayer): + # All names of layers that may contain (trainable) adapter weights + adapter_layer_names = ("poly_lora_A", "poly_lora_B", "poly_router") + # All names of other parameters that may contain adapter-related parameters + other_param_names = ("r", "n_tasks", "n_skills", "n_splits") + + def __init__(self, base_layer: nn.Module, **kwargs): + self.base_layer = base_layer + self.r = {} + self.n_tasks = {} + self.n_skills = {} + self.n_splits = {} + self.poly_type = {} + self.poly_router = nn.ModuleDict() + self.poly_lora_A = nn.ParameterDict() + self.poly_lora_B = nn.ParameterDict() + self.kwargs = kwargs + + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + in_features, out_features = base_layer.in_features, base_layer.out_features + else: + raise ValueError(f"Unsupported layer type {type(base_layer)}") + + self.in_features = in_features + self.out_features = out_features + + def update_layer(self, adapter_name, poly_config): + if poly_config.r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {poly_config.r}") + + self.r[adapter_name] = poly_config.r + self.n_tasks[adapter_name] = poly_config.n_tasks + self.n_skills[adapter_name] = poly_config.n_skills + self.n_splits[adapter_name] = poly_config.n_splits + self.poly_type[adapter_name] = poly_config.poly_type + + self.poly_lora_A[adapter_name] = nn.Parameter( + torch.empty( + poly_config.n_splits, + poly_config.n_skills, + self.in_features // poly_config.n_splits, + poly_config.r, + ) + ) + self.poly_lora_B[adapter_name] = nn.Parameter( + torch.empty( + poly_config.n_splits, + poly_config.n_skills, + poly_config.r, + self.out_features // poly_config.n_splits, + ) + ) + self.poly_router[adapter_name] = get_router(poly_config) + + self.reset_poly_parameters(adapter_name, init_weights=poly_config.init_weights) + + weight = getattr(self.get_base_layer(), "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + self.set_adapter(self.active_adapters) + + def reset_poly_parameters(self, adapter_name, init_weights): + if adapter_name in self.poly_lora_A.keys(): + # initialize A the same way as the default for nn.Linear + # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L269 + n_splits, n_skills, d, r = self.poly_lora_A[adapter_name].shape + for skill in range(n_skills): + for split in range(n_splits): + param = torch.empty((r, d)) + torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5)) + self.poly_lora_A[adapter_name].data[split, skill, :, :] = param.T + + if init_weights: + # initialize B to zero + torch.nn.init.zeros_(self.poly_lora_B[adapter_name]) + else: + # initialize B the same way as the default for nn.Linear + n_splits, n_skills, r, d = self.poly_lora_B[adapter_name].shape + for skill in range(n_skills): + for split in range(n_splits): + param = torch.empty((d, r)) + torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5)) + self.poly_lora_B[adapter_name].data[split, skill, :, :] = param.T + + # initialized router + self.poly_router[adapter_name].reset() + + +class Linear(nn.Module, PolyLayer): + # Lora implemented in a dense layer + def __init__( + self, + base_layer, + adapter_name: str, + poly_config: PolyConfig, + **kwargs, + ) -> None: + super().__init__() + PolyLayer.__init__(self, base_layer, **kwargs) + + self._active_adapter = adapter_name + self.update_layer(adapter_name, poly_config) + + def forward(self, x: torch.Tensor, *args: Any, task_ids: torch.Tensor = None, **kwargs: Any) -> torch.Tensor: + previous_dtype = x.dtype + if self.disable_adapters: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + for active_adapter in self.active_adapters: + if active_adapter not in self.poly_lora_A.keys(): + continue + + r = self.r[active_adapter] + poly_router = self.poly_router[active_adapter] + poly_lora_A = self.poly_lora_A[active_adapter] + poly_lora_B = self.poly_lora_B[active_adapter] + + # Combine the output of LoRAs + # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L293 + mixing_weights = poly_router(task_ids=task_ids, input_ids=x) + bs, n_splits, n_skills = mixing_weights.size() + + # A is n_splits, n_skills, D // n_splits, rank + # we want bs, n_splits, D // n_splits, rank + A = torch.einsum("bqs,qsdr->bqdr", (mixing_weights, poly_lora_A)) + B = torch.einsum("bqs,qsrd->bqrd", (mixing_weights, poly_lora_B)) + + A = A.reshape(bs, self.in_features, r) + B = B.transpose(1, 2).reshape(bs, r, self.out_features) + + x = x.to(A.dtype) + result += x.bmm(A).bmm(B) / r + + result = result.to(previous_dtype) + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "poly." + rep diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/model.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/model.py new file mode 100644 index 0000000000000000000000000000000000000000..943a287955ad060289b1a4aa0f9c4f1e493ccbce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/model.py @@ -0,0 +1,187 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from contextlib import contextmanager +from dataclasses import asdict +from enum import Enum +from typing import Any + +import torch +from torch import nn + +from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists +from peft.utils import ( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, + ModulesToSaveWrapper, +) + +from .config import PolyConfig +from .layer import Linear, PolyLayer + + +class PolyModel(BaseTuner): + prefix: str = "poly_" + + def __init__(self, model, config, adapter_name) -> None: + super().__init__(model, config, adapter_name) + + @staticmethod + def _check_target_module_exists(poly_config, key): + return check_target_module_exists(poly_config, key) + + def _create_and_replace( + self, + poly_config: PolyConfig, + adapter_name: str, + target: nn.Module, + target_name: str, + parent: nn.Module, + **optional_kwargs: Any, + ): + if isinstance(target, PolyLayer): + target.update_layer(adapter_name, poly_config) + else: + new_module = self._create_new_module( + poly_config, + adapter_name, + target, + ) + if adapter_name != self.active_adapter: + # adding an additional adapter: it is not automatically trainable + new_module.requires_grad_(False) + self._replace_module(parent, target_name, new_module, target) + + def _replace_module(self, parent, child_name, new_module, child): + setattr(parent, child_name, new_module) + # It's not necessary to set requires_grad here, as that is handled by + # _mark_only_adapters_as_trainable + + # child layer wraps the original module, unpack it + if hasattr(child, "base_layer"): + child = child.base_layer + + if not hasattr(new_module, "base_layer"): + new_module.weight = child.weight + if hasattr(child, "bias"): + new_module.bias = child.bias + + if getattr(child, "state", None) is not None: + if hasattr(new_module, "base_layer"): + new_module.base_layer.state = child.state + else: + new_module.state = child.state + new_module.to(child.weight.device) + + # dispatch to correct device + for name, module in new_module.named_modules(): + if (self.prefix in name) or ("ranknum" in name): + weight = child.qweight if hasattr(child, "qweight") else child.weight + module.to(weight.device) + + def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: + for n, p in model.named_parameters(): + if self.prefix not in n: + p.requires_grad = False + + @staticmethod + def _create_new_module(poly_config, adapter_name, target, **kwargs): + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + if isinstance(target_base_layer, torch.nn.Linear): + return Linear(target, adapter_name, poly_config, **kwargs) + else: + raise ValueError( + f"Target module {target} is not supported. Currently, only the following modules are supported: " + "`torch.nn.Linear`." + ) + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self.model, name) + + def get_peft_config_as_dict(self, inference: bool = False): + config_dict = {} + for key, value in self.peft_config.items(): + config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} + if inference: + config["inference_mode"] = True + config_dict[key] = config + return config + + def _set_adapter_layers(self, enabled=True): + for module in self.model.modules(): + if isinstance(module, (PolyLayer, ModulesToSaveWrapper)): + module.enable_adapters(enabled) + + def enable_adapter_layers(self): + self._set_adapter_layers(enabled=True) + + def disable_adapter_layers(self): + self._set_adapter_layers(enabled=False) + + def set_adapter(self, adapter_name): + for module in self.model.modules(): + if isinstance(module, PolyLayer): + module.set_adapter(adapter_name) + + def _prepare_adapter_config(self, peft_config, model_config): + if peft_config.target_modules is None: + if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: + raise ValueError("Please specify `target_modules` in `peft_config`") + peft_config.target_modules = set( + TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] + ) + return peft_config + + def _register_pre_hooks(self, task_ids): + """Helper method to register pre hooks.""" + if task_ids is None: + return [] + + def pre_hook(_, args, kwargs): + kwargs["task_ids"] = task_ids + return args, kwargs + + handles = [] + + for module in self.model.modules(): + if isinstance(module, Linear): + handle = module.register_forward_pre_hook(pre_hook, with_kwargs=True) + handles.append(handle) + + return handles + + @contextmanager + def _manage_pre_hooks(self, task_ids): + """Context manager to handle the lifecycle of pre hooks.""" + handles = self._register_pre_hooks(task_ids) + try: + yield + finally: + for handle in handles: + handle.remove() + + def forward(self, *args, task_ids=None, **kwargs): + with self._manage_pre_hooks(task_ids): + return self.model(*args, **kwargs) + + def generate(self, *args, task_ids=None, **kwargs): + with self._manage_pre_hooks(task_ids): + return self.model.generate(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/router.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/router.py new file mode 100644 index 0000000000000000000000000000000000000000..0249398a9fc36d53bc0b4f022a8410514688a9f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/poly/router.py @@ -0,0 +1,83 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod + +import torch +from torch import nn +from torch.distributions.relaxed_bernoulli import RelaxedBernoulli + +from .config import PolyConfig + + +EPS = 1e-12 + + +def get_router(poly_config: PolyConfig) -> nn.Module: + if poly_config.poly_type == "poly": + return PolyRouter(poly_config) + else: + raise ValueError( + f"Unsupported poly_type: {poly_config.poly_type}. " + "Currently, only the following types are supported: " + "`poly`." + ) + + +class Router(nn.Module, ABC): + @abstractmethod + def reset(self): + ... + + @abstractmethod + def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): + ... + + +class PolyRouter(Router): + # It's a simplified implementation of + # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L138 + def __init__(self, poly_config: PolyConfig): + super().__init__() + + self.poly_type = poly_config.poly_type + self.n_tasks = poly_config.n_tasks + self.n_skills = poly_config.n_skills + self.n_splits = poly_config.n_splits + + self.module_logits = nn.Parameter(torch.empty((self.n_tasks, self.n_splits * self.n_skills))) + + def reset(self): + torch.nn.init.uniform_(self.module_logits, -1e-3, 1e-3) + + def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): + if task_ids is None: + raise ValueError("task_ids should not be None.") + if task_ids.max().item() >= self.n_tasks: + raise ValueError(f"Only {self.n_tasks} tasks available. Found task id = {task_ids.max().item()}") + + # move task id to input's device + task_ids = task_ids.to(self.module_logits.device) + + module_logits = self.module_logits[task_ids] + module_logits = module_logits.view(-1, self.n_splits, self.n_skills) + + if self.training: + module_logits = RelaxedBernoulli(temperature=1.0, logits=module_logits).rsample() + else: + module_logits = torch.sigmoid(module_logits) + + module_weights = module_logits / (module_logits.sum(dim=-1, keepdim=True) + EPS) + + return module_weights diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..28f4bedbb43bcf2b22146d60e0e1f2fe7b19d9eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import PrefixTuningConfig +from .model import PrefixEncoder + + +__all__ = ["PrefixTuningConfig", "PrefixEncoder"] diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b66d03bc21fb6312e0b753946e8b04a2055cb718 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..344d30c6087e9b28492967f7189674d867443a59 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d70cab6194694ef521a0813d787c293fd855d6fd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/config.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..39203ff7beb571f067331798051e085a49273211 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/config.py @@ -0,0 +1,41 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from peft.config import PromptLearningConfig +from peft.utils import PeftType + + +@dataclass +class PrefixTuningConfig(PromptLearningConfig): + """ + This is the configuration class to store the configuration of a [`PrefixEncoder`]. + + Args: + encoder_hidden_size (`int`): The hidden size of the prompt encoder. + prefix_projection (`bool`): Whether to project the prefix embeddings. + """ + + encoder_hidden_size: int = field( + default=None, + metadata={"help": "The hidden size of the encoder"}, + ) + prefix_projection: bool = field( + default=False, + metadata={"help": "Whether to project the prefix tokens"}, + ) + + def __post_init__(self): + self.peft_type = PeftType.PREFIX_TUNING diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/model.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/model.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd51892a3cc074406791f6bc7d1b088d25148e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/prefix_tuning/model.py @@ -0,0 +1,80 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py +# with some refactor +import torch + + +class PrefixEncoder(torch.nn.Module): + r""" + The `torch.nn` model to encode the prefix. + + Args: + config ([`PrefixTuningConfig`]): The configuration of the prefix encoder. + + Example: + + ```py + >>> from peft import PrefixEncoder, PrefixTuningConfig + + >>> config = PrefixTuningConfig( + ... peft_type="PREFIX_TUNING", + ... task_type="SEQ_2_SEQ_LM", + ... num_virtual_tokens=20, + ... token_dim=768, + ... num_transformer_submodules=1, + ... num_attention_heads=12, + ... num_layers=12, + ... encoder_hidden_size=768, + ... ) + >>> prefix_encoder = PrefixEncoder(config) + ``` + + **Attributes**: + - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder. + - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if + `prefix_projection` is `True`. + - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings. + + Input shape: (`batch_size`, `num_virtual_tokens`) + + Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`) + """ + + def __init__(self, config): + super().__init__() + self.prefix_projection = config.prefix_projection + token_dim = config.token_dim + num_layers = config.num_layers + encoder_hidden_size = config.encoder_hidden_size + num_virtual_tokens = config.num_virtual_tokens + if self.prefix_projection and not config.inference_mode: + # Use a two-layer MLP to encode the prefix + self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim) + self.transform = torch.nn.Sequential( + torch.nn.Linear(token_dim, encoder_hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim), + ) + else: + self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.transform(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values diff --git a/env-llmeval/lib/python3.10/site-packages/peft/tuners/tuners_utils.py b/env-llmeval/lib/python3.10/site-packages/peft/tuners/tuners_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..69b26a2bb1be9a2f7ee01e24503ee59ea7d90d29 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/peft/tuners/tuners_utils.py @@ -0,0 +1,767 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import copy +import logging +import re +import warnings +from abc import ABC, abstractmethod +from contextlib import contextmanager +from typing import Any, Optional, Union + +import torch +from accelerate.hooks import AlignDevicesHook +from accelerate.utils import named_module_tensors, offload_state_dict +from torch import nn +from transformers import PreTrainedModel +from transformers.pytorch_utils import Conv1D + +from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND + +from ..config import PeftConfig +from ..utils import ModulesToSaveWrapper, _get_submodules + + +logger = logging.getLogger(__name__) + + +@contextmanager +def onload_layer(layer): + r""" + A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the + CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the + base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are + offloaded. + + If the module has no offloaded sub-modules, this function does nothing. + + Args: + layer ('torch.nn.Module'): + layer with tuners to be merged + """ + + offloaded_modules = [] + for name, module in layer.named_modules(): + if name in ["", "base_layer"]: + continue + if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: + module._hf_hook.pre_forward(module) + offloaded_modules.append(module) + + base_layer_offload = False + if hasattr(layer, "base_layer") and ( + hasattr(layer.base_layer, "_hf_hook") + and isinstance(layer.base_layer._hf_hook, AlignDevicesHook) + and layer.base_layer._hf_hook.offload + ): + if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values(): + # retrieve the name of the original disk-offload directory + offload_folder = layer.base_layer._hf_hook.weights_map.dataset.save_folder + layer.base_layer._hf_hook.pre_forward(layer.base_layer) + base_layer_offload = True + + yield + + for module in offloaded_modules: + module._hf_hook.post_forward(module, torch.tensor([])) + + if base_layer_offload: + # re-make weights map (must be on cpu to send params to the disk via memmap if disk offload) + layer.base_layer._hf_hook.weights_map = { + name: param.to("cpu") for name, param in named_module_tensors(layer.base_layer) + } + # offload weights map to disk if original device is the disk + if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values(): + # rewrite directory with merged weights + offload_state_dict(offload_folder, layer.base_layer._hf_hook.weights_map) + layer.base_layer._hf_hook.post_forward(layer.base_layer, torch.tensor([])) + + +class BaseTuner(nn.Module, ABC): + r""" + A base tuner model that provides the common methods and attributes for all tuners that are injectable into a + torch.nn.Module + + For adding a new Tuner class, one needs to overwrite the following methods: + + - **_prepare_adapter_config**: + A private method to eventually prepare the adapter config, for example in case the field `target_modules` is + missing. + - **_create_and_replace**: + A private method to create and replace the target module with the adapter module. + - **_check_target_module_exists**: + A private helper method to check if the passed module's key name matches any of the target modules in the + adapter_config. + + The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. + + Attributes: + model (`torch.nn.Module`): + The model to which the adapter tuner layers will be attached. + forward (`Callable`): + The forward method of the model. + peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): + The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also + pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new + dictionary with a key `adapter_name` and a value of that peft config. + config (`dict[str, Any]`): + The model configuration object, it should be a dictionary of `str` to `Any` objects. + targeted_module_names (`list[str]`): + The list of module names that were actually adapted. Can be useful to inspect if you want to quickly + double-check that the `config.target_modules` where specified correctly. + """ + + def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None: + super().__init__() + + self.model = model + self.targeted_module_names: list[str] = [] + + # For advanced developers, if you want to attach multiple adapters to your + # model, just add a `peft_config` dict attribute to your model. + if not hasattr(self, "peft_config"): + self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config + else: + logger.info( + "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" + " in the model. Make sure to know what you are doing!" + ) + if isinstance(peft_config, PeftConfig): + self.peft_config[adapter_name] = peft_config + else: + # user is adding a dict of PeftConfigs + self.peft_config.update(peft_config) + + self.active_adapter = adapter_name + self.inject_adapter(self.model, adapter_name) + + # Copy the peft_config in the injected model. + self.model.peft_config = self.peft_config + + @property + def active_adapters(self) -> list[str]: + if isinstance(self.active_adapter, str): + return [self.active_adapter] + # is already a list of str + return self.active_adapter + + def forward(self, *args: Any, **kwargs: Any): + return self.model.forward(*args, **kwargs) + + @abstractmethod + def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: + r""" + A private method to eventually prepare the adapter config. For transformers based models, if + `peft_config.target_modules` is None, we can automatically infer the target modules from the + `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to + automatically infer it for all tuner models. + + Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. + + Args: + peft_config (`PeftConfig`): + The adapter config. + model_config (`dict`): + The transformers model config, that config should contain the `model_type` key. + """ + ... + + def _prepare_model(self, peft_config: PeftConfig, model: nn.Module): + r""" + A private method to modify the model structure before adapter is applied. + + See `peft.tuner.lora.LoraModel._prepare_model` for an example. + + Args: + peft_config (`PeftConfig`): + The prepared adapter config. + model (`nn.Module`): + The model that is going to be adapted. + """ + pass + + @abstractmethod + def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: + r""" + A helper private method to check if the passed module's key name matches any of the target modules in the + `peft_config.target_modules` list. If it does, return `True`, else return `False`. + + Args: + peft_config (`PeftConfig`): + The adapter config. + key (`str`): + The module's key name. + """ + ... + + @abstractmethod + def _create_and_replace( + self, + peft_config: PeftConfig, + adapter_name: str, + target: nn.Module, + target_name: str, + parent: nn.Module, + current_key: str, + ) -> None: + r""" + Inplace replacement of the target module with the adapter layer. This method needs to be overridden by all the + tuner classes. + + Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. + + Args: + peft_config (`PeftConfig`): + The adapter config. + adapter_name (`str`): + The adapter name. + target (`nn.Module`): + The target module. + target_name (`str`): + The target module's name. + parent (`nn.Module`): + The parent module. + current_key (`str`): + The key of the current target being adapted. + """ + ... + + @abstractmethod + def _mark_only_adapters_as_trainable(self, model: nn.Module): + r""" + A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to + be overridden for all tuner classes to match the correct key names. + + Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. + """ + ... + + def _check_new_adapter_config(self, config: PeftConfig) -> None: + """ + A helper method to check the config when a new adapter is being added. + + Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. + + """ + pass + + def _check_merge_allowed(self): + """Helper method to check whether the adapter can be merged. + + Raise a ValueError if it is not possible to merge the adapter with the given configuration. + """ + pass + + def inject_adapter(self, model: nn.Module, adapter_name: str): + r""" + Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the + hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. + + The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. + + Args: + model (`nn.Module`): + The model to be tuned. + adapter_name (`str`): + The adapter name. + """ + peft_config = self.peft_config[adapter_name] + # Note: If possible, all checks should be performed *at the start of this method*. + # This way, we can raise early if something goes wrong, without leaving the model + # in a bad (half-initialized) state. + self._check_new_adapter_config(peft_config) + + _check_for_modules_to_save = getattr(peft_config, "modules_to_save", None) is not None + _has_modules_to_save = False + + model_config = getattr(model, "config", {"model_type": "custom"}) + if hasattr(model_config, "to_dict"): + model_config = model_config.to_dict() + + peft_config = self._prepare_adapter_config(peft_config, model_config) + + self._prepare_model(peft_config, model) + is_target_modules_in_base_model = False + key_list = [key for key, _ in model.named_modules()] + + # update peft_config.target_modules if required + peft_config = _maybe_include_all_linear_layers(peft_config, model) + + for key in key_list: + # Check for modules_to_save in case + if _check_for_modules_to_save and any( + key.endswith(f"{module_to_save}") for module_to_save in peft_config.modules_to_save + ): + # Optionally set the modules to save + parent, target, target_name = _get_submodules(model, key) + + if not isinstance(target, ModulesToSaveWrapper): + new_module = ModulesToSaveWrapper(target, adapter_name) + setattr(parent, target_name, new_module) + else: + target.update(adapter_name) + + _has_modules_to_save = True + continue + + if not self._check_target_module_exists(peft_config, key): + continue + + self.targeted_module_names.append(key) + is_target_modules_in_base_model = True + parent, target, target_name = _get_submodules(model, key) + self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key) + + if not is_target_modules_in_base_model: + raise ValueError( + f"Target modules {peft_config.target_modules} not found in the base model. " + f"Please check the target modules and try again." + ) + + self._mark_only_adapters_as_trainable(model) + + if self.peft_config[adapter_name].inference_mode: + for n, p in model.named_parameters(): + if adapter_name in n: + p.requires_grad = False + + if _has_modules_to_save: + if not hasattr(model, "modules_to_save"): + model.modules_to_save = set(peft_config.modules_to_save) + else: + model.modules_to_save.update(set(peft_config.modules_to_save)) + + def merge_adapter(self, adapter_names: Optional[list[str]] = None) -> None: + """ + This method merges the adapter layers into the base model. + + Merging adapters can lead to a speed up of the forward pass. A copy of the adapter weights is still kept in + memory, which is required to unmerge the adapters. In order to merge the adapter weights without keeping them + in memory, please call `merge_and_unload`. + + Args: + safe_merge (`bool`, *optional*): + If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs + before merging the weights. This is useful if you want to check if the merge operation will produce + NaNs. Defaults to `False`. + adapter_names (`list[str]`, *optional*): + The list of adapter names that should be merged. If `None`, all active adapters will be merged. + Defaults to `None`. + """ + self._check_merge_allowed() + for module in self.model.modules(): + if isinstance(module, BaseTunerLayer): + with onload_layer(module): + module.merge(adapter_names=adapter_names) + + def unmerge_adapter(self): + """ + This method unmerges all merged adapter layers from the base model. + """ + for module in self.model.modules(): + if isinstance(module, BaseTunerLayer): + with onload_layer(module): + module.unmerge() + + def _unloading_checks(self, adapter_names: Optional[list[str]]): + adapters_to_consider = adapter_names or self.active_adapters + is_modules_to_save_available = any( + self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider + ) + if is_modules_to_save_available and len(adapters_to_consider) > 1: + raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.") + + +class BaseTunerLayer(ABC): + r""" + A tuner layer mixin that provides the common methods and attributes for all tuners. + + Args: + is_pluggable (`bool`, *optional*): + Whether the adapter layer can be plugged to any pytorch module + active_adapters (Union[List[`str`], `str`], *optional*): + The name of the active adapter. + """ + + active_adapter = None + + # All names of layers that may contain adapter (trainable) weights + adapter_layer_names: tuple[str] = () + # All names of other parameters that may contain adapter-related parameters + other_param_names: tuple[str] = () + + # indicates whether all adapters should be disabled + _disable_adapters: bool = False + + # the currently active adapter(s) + _active_adapter: str | list[str] = "default" + + # List all merged adapters + merged_adapters: list[str] = [] + + def get_base_layer(self) -> nn.Module: + """ + (Recursively) get the base_layer. + + This is necessary for the case that the tuner layer wraps another tuner layer. + + """ + base_layer = self + while hasattr(base_layer, "base_layer"): + base_layer = base_layer.base_layer + return base_layer + + @property + def weight(self) -> torch.Tensor: + # This is required for some transformers code, e.g. for T5, weight is accessed as: + # self.wo.weight + # where "wo" is the adapter layer. + # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers + # /models/t5/modeling_t5.py#L292 + base_layer = self.get_base_layer() + if hasattr(base_layer, "qweight"): + # QuantLinear + weight = base_layer.qweight + else: + # Other layers + weight = base_layer.weight + return weight + + @property + def bias(self) -> torch.Tensor: + base_layer = self.get_base_layer() + return base_layer.bias + + def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: + raise NotImplementedError + + def unmerge(self) -> None: + raise NotImplementedError + + @property + def merged(self) -> bool: + return bool(self.merged_adapters) + + @property + def disable_adapters(self) -> bool: + # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method + return self._disable_adapters + + @property + def active_adapter(self) -> str: + # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method + return self._active_adapter + + @property + def active_adapters(self): + if isinstance(self.active_adapter, str): + return [self.active_adapter] + # is already a list of str + return self.active_adapter + + def enable_adapters(self, enabled: bool) -> None: + """Toggle the enabling and disabling of adapters + + Takes care of setting the requires_grad flag for the adapter weights. + + Args: + enabled (bool): True to enable adapters, False to disable adapters + """ + if enabled: + self.set_adapter(self.active_adapters) + self._disable_adapters = False + else: + # disable grads on all adapter layers + for layer_name in self.adapter_layer_names: + layer = getattr(self, layer_name) + layer.requires_grad_(False) + self._disable_adapters = True + + def set_adapter(self, adapter_names: str | list[str]) -> None: + """Set the active adapter(s). + + Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is + not desired, use the following code. + + ```py + >>> for name, param in model_peft.named_parameters(): + ... if ...: # some check on name (ex. if 'lora' in name) + ... param.requires_grad = False + ``` + + Args: + adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. + """ + if isinstance(adapter_names, str): + adapter_names = [adapter_names] + + # Deactivate grads on the inactive adapter and activate grads on the active adapter + for layer_name in self.adapter_layer_names: + module_dict = getattr(self, layer_name) + for key, layer in module_dict.items(): + if key in adapter_names: + # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may + # happen if a completely different adapter layer is being activated. + layer.requires_grad_(True) + else: + layer.requires_grad_(False) + + self._active_adapter = adapter_names + + def _all_available_adapter_names(self) -> list[str]: + """Return a sorted list of all available adapter names""" + adapter_names = set() + for name in self.adapter_layer_names + self.other_param_names: + # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter + # names + attr = getattr(self, name) + if hasattr(attr, "keys"): + adapter_names.update(attr.keys()) + return sorted(adapter_names) + + def delete_adapter(self, adapter_name: str) -> None: + """ + Delete an adapter from the layer + + This should be called on all adapter layers, or else we will get an inconsistent state. + + This method will also set a new active adapter if the deleted adapter was an active adapter. It is important + that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. + + Args: + adapter_name (`str`): The name of the adapter to delete + + """ + for attr in self.adapter_layer_names + self.other_param_names: + if adapter_name in getattr(self, attr): + del getattr(self, attr)[adapter_name] + + if adapter_name in self.active_adapters: + # choose a new active adapter + active_adapters = self.active_adapters[:] + active_adapters.remove(adapter_name) + if active_adapters: + self.set_adapter(active_adapters) + else: + # no active adapters left, set a new default adapter + # here we get the list of all adapters existing adapter names and choose the first one + remaining_adapters = self._all_available_adapter_names() + if not remaining_adapters: + self.set_adapter([]) + else: + new_active_adapter = remaining_adapters[0] + warnings.warn( + f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " + f"{new_active_adapter}." + ) + self.set_adapter(remaining_adapters[0]) + + +def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None: + """A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. + + Args: + config (`LoraConfig` | `LycorisConfig`): A config to match target modules from + key (`str`): A key to search any matches in config + + Returns: + `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or + None if no match found + """ + if isinstance(config.target_modules, str): + target_module_found = re.fullmatch(config.target_modules, key) + elif key in config.target_modules: + # this module is specified directly in target_modules + target_module_found = True + else: + target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules) + + layer_indexes = getattr(config, "layers_to_transform", None) + layers_pattern = getattr(config, "layers_pattern", None) + + is_using_layer_indexes = layer_indexes is not None and ( + len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True + ) + if is_using_layer_indexes and target_module_found: + layer_index = None + # TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave + # For now, empty layers_pattern means any layer pattern is ok + if layers_pattern is None or len(layers_pattern) == 0: + layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key) + else: + layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern + for pattern in layers_pattern: + layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key) + if layer_index is not None: + break + + if layer_index is None: + target_module_found = False + else: + layer_index = int(layer_index.group(1)) + if isinstance(layer_indexes, int): + target_module_found = layer_index == layer_indexes + else: + target_module_found = layer_index in layer_indexes + + return target_module_found + + +def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict: + """ + A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. + """ + config = tuner.peft_config[adapter_name] + key_list = [key for key, _ in tuner.model.named_modules()] + module_dict = {"matched": [], "unmatched": []} + for key in key_list: + if tuner._check_target_module_exists(config, key): + module_dict["matched"].append(key) + else: + module_dict["unmatched"].append(key) + return module_dict + + +def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig: + """ + Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from + the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py + """ + + # if `target_modules` is a string, convert to lower case and check if it matches "all-linear" + if not ( + isinstance(peft_config.target_modules, str) + and peft_config.target_modules.lower() == INCLUDE_LINEAR_LAYERS_SHORTHAND + ): + return peft_config + + if not isinstance(model, PreTrainedModel): + raise ValueError( + f"Only instances of PreTrainedModel support `target_modules={INCLUDE_LINEAR_LAYERS_SHORTHAND!r}`" + ) + + linear_classes = (torch.nn.Linear, Conv1D) + + linear_module_names = set() + for name, module in model.named_modules(): + # match with all linear classes. + if isinstance(module, linear_classes): + names = name.rsplit(".", 1)[-1] # get the base name + linear_module_names.add(names) + + # ignore the last classification head for text generation models + output_emb = model.get_output_embeddings() + if output_emb is not None: + last_module_name = [name for name, module in model.named_modules() if module is output_emb][0] + linear_module_names -= {last_module_name} + peft_config.target_modules = linear_module_names + return peft_config + + +def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]] = None) -> list[str]: + """ + Helper function to check which adapters should be merged. + + Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already + merged. + + """ + if adapter_names is None: + adapter_names = module.active_adapters + + if module.merged: + merged_adapters = set(module.merged_adapters) + adapter_names = [name for name in adapter_names if name not in merged_adapters] + + if adapter_names: + warnings.warn( + f"Already following adapters were merged {','.join(module.merged_adapters)}. " + f"You are now additionally merging {','.join(adapter_names)}." + ) + else: + warnings.warn("All adapters are already merged, nothing to do.") + + return adapter_names + + +def clone_module(module: nn.Module, share_weights=False): + """Clone a module in a pytorch model. + + Clones a module of a model, optionally sharing all the parameters between the original and the clone. Simplifies + reusing a module when manipulating the architecture of a model. + """ + clone = copy.deepcopy(module) + + def _share_weights(src: nn.Module, dst: nn.Module): + for name, param in src.named_parameters(recurse=False): + dst.register_parameter(name, param) + + if share_weights: + for name, submodule in module.named_modules(): + _share_weights(submodule, clone.get_submodule(name)) + + return clone + + +def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]]): + """Replicate layers in a transfomer model with weight sharing. + + This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module + list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3, + 4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`. + """ + while hasattr(model, "model"): + model = model.model + # Some variants of the bert model nest the main model under the bert attribute. + if hasattr(model, "bert"): + model = model.bert + + model_type = None + layers: nn.ModuleList = None + if hasattr(model, "layers"): + model_type = "llama" + layers = model.layers + elif hasattr(model, "encoder") and hasattr(model.encoder, "layer"): + model_type = "bert" + layers = model.encoder.layer + elif hasattr(model, "h"): + model_type = "falcon" + layers = model.h + if not model_type or not isinstance(layers, nn.ModuleList): + raise ValueError( + "Could not locate the layers attribute in the model. " + "Expected Llama, Bert or Falcon compatible architectures." + ) + + new_layers = [] + for start, end in layer_map: + for i in range(start, end): + current_idx = len(new_layers) + new_layers.append(clone_module(layers[i], share_weights=True)) + # This is a hack needed to work around the layer_idx introduced in HF transformers. + for submodule in new_layers[-1].modules(): + if hasattr(submodule, "layer_idx"): + submodule.layer_idx = current_idx + layers = nn.ModuleList(new_layers) + if model_type == "llama": + model.layers = layers + elif model_type == "bert": + model.encoder.layer = layers + elif model_type == "falcon": + model.h = layers + else: + raise ValueError("Unexpected model type, need to handle post-processing of layers.") + if hasattr(model.config, "num_hidden_layers"): # Common to Llama, Bert, Falcon. + model.config.num_hidden_layers = len(new_layers) diff --git a/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..da03c107ad92ade6a1ee86dc766b9892bbd4f85e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/METADATA @@ -0,0 +1,220 @@ +Metadata-Version: 2.1 +Name: pybind11 +Version: 2.12.0 +Summary: Seamless operability between C++11 and Python +Home-page: https://github.com/pybind/pybind11 +Download-URL: https://github.com/pybind/pybind11/tarball/v2.12.0 +Author: Wenzel Jakob +Author-email: wenzel.jakob@epfl.ch +License: BSD +Project-URL: Documentation, https://pybind11.readthedocs.io/ +Project-URL: Bug Tracker, https://github.com/pybind/pybind11/issues +Project-URL: Discussions, https://github.com/pybind/pybind11/discussions +Project-URL: Changelog, https://pybind11.readthedocs.io/en/latest/changelog.html +Project-URL: Chat, https://gitter.im/pybind/Lobby +Keywords: C++11,Python bindings +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Utilities +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: C++ +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: global +Requires-Dist: pybind11-global ==2.12.0 ; extra == 'global' + +.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png + :alt: pybind11 logo + +**pybind11 — Seamless operability between C++11 and Python** + +|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status| + +|Repology| |PyPI package| |Conda-forge| |Python Versions| + +`Setuptools example `_ +• `Scikit-build example `_ +• `CMake example `_ + +.. start + + +**pybind11** is a lightweight header-only library that exposes C++ types +in Python and vice versa, mainly to create Python bindings of existing +C++ code. Its goals and syntax are similar to the excellent +`Boost.Python `_ +library by David Abrahams: to minimize boilerplate code in traditional +extension modules by inferring type information using compile-time +introspection. + +The main issue with Boost.Python—and the reason for creating such a +similar project—is Boost. Boost is an enormously large and complex suite +of utility libraries that works with almost every C++ compiler in +existence. This compatibility has its cost: arcane template tricks and +workarounds are necessary to support the oldest and buggiest of compiler +specimens. Now that C++11-compatible compilers are widely available, +this heavy machinery has become an excessively large and unnecessary +dependency. + +Think of this library as a tiny self-contained version of Boost.Python +with everything stripped away that isn't relevant for binding +generation. Without comments, the core header files only require ~4K +lines of code and depend on Python (3.6+, or PyPy) and the C++ +standard library. This compact implementation was possible thanks to +some C++11 language features (specifically: tuples, lambda functions and +variadic templates). Since its creation, this library has grown beyond +Boost.Python in many ways, leading to dramatically simpler binding code in many +common situations. + +Tutorial and reference documentation is provided at +`pybind11.readthedocs.io `_. +A PDF version of the manual is available +`here `_. +And the source code is always available at +`github.com/pybind/pybind11 `_. + + +Core features +------------- + + +pybind11 can map the following core C++ features to Python: + +- Functions accepting and returning custom data structures per value, + reference, or pointer +- Instance methods and static methods +- Overloaded functions +- Instance attributes and static attributes +- Arbitrary exception types +- Enumerations +- Callbacks +- Iterators and ranges +- Custom operators +- Single and multiple inheritance +- STL data structures +- Smart pointers with reference counting like ``std::shared_ptr`` +- Internal references with correct reference counting +- C++ classes with virtual (and pure virtual) methods can be extended + in Python +- Integrated NumPy support (NumPy 2 requires pybind11 2.12+) + +Goodies +------- + +In addition to the core functionality, pybind11 provides some extra +goodies: + +- Python 3.6+, and PyPy3 7.3 are supported with an implementation-agnostic + interface (pybind11 2.9 was the last version to support Python 2 and 3.5). + +- It is possible to bind C++11 lambda functions with captured + variables. The lambda capture data is stored inside the resulting + Python function object. + +- pybind11 uses C++11 move constructors and move assignment operators + whenever possible to efficiently transfer custom data types. + +- It's easy to expose the internal storage of custom data types through + Pythons' buffer protocols. This is handy e.g. for fast conversion + between C++ matrix classes like Eigen and NumPy without expensive + copy operations. + +- pybind11 can automatically vectorize functions so that they are + transparently applied to all entries of one or more NumPy array + arguments. + +- Python's slice-based access and assignment operations can be + supported with just a few lines of code. + +- Everything is contained in just a few header files; there is no need + to link against any additional libraries. + +- Binaries are generally smaller by a factor of at least 2 compared to + equivalent bindings generated by Boost.Python. A recent pybind11 + conversion of PyRosetta, an enormous Boost.Python binding project, + `reported `_ + a binary size reduction of **5.4x** and compile time reduction by + **5.8x**. + +- Function signatures are precomputed at compile time (using + ``constexpr``), leading to smaller binaries. + +- With little extra effort, C++ types can be pickled and unpickled + similar to regular Python objects. + +Supported compilers +------------------- + +1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or + newer) +2. GCC 4.8 or newer +3. Microsoft Visual Studio 2017 or newer +4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI) +5. Cygwin/GCC (previously tested on 2.5.1) +6. NVCC (CUDA 11.0 tested in CI) +7. NVIDIA PGI (20.9 tested in CI) + +About +----- + +This project was created by `Wenzel +Jakob `_. Significant features and/or +improvements to the code were contributed by Jonas Adler, Lori A. Burns, +Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel +Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov, Johan Mabille, Tomasz Miąsko, +Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim +Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart. + +We thank Google for a generous financial contribution to the continuous +integration infrastructure used by this project. + + +Contributing +~~~~~~~~~~~~ + +See the `contributing +guide `_ +for information on building and contributing to pybind11. + +License +~~~~~~~ + +pybind11 is provided under a BSD-style license that can be found in the +`LICENSE `_ +file. By using, distributing, or contributing to this project, you agree +to the terms and conditions of this license. + +.. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest + :target: http://pybind11.readthedocs.org/en/latest +.. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg + :target: http://pybind11.readthedocs.org/en/stable +.. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg + :target: https://gitter.im/pybind/Lobby +.. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg + :target: https://github.com/pybind/pybind11/actions +.. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true + :target: https://ci.appveyor.com/project/wjakob/pybind11 +.. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg + :target: https://pypi.org/project/pybind11/ +.. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg + :target: https://github.com/conda-forge/pybind11-feedstock +.. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg + :target: https://repology.org/project/python:pybind11/versions +.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg + :target: https://pypi.org/project/pybind11/ +.. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github + :target: https://github.com/pybind/pybind11/discussions diff --git a/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f425a727285b18f998f80ef3bf33bef37d797ce1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/RECORD @@ -0,0 +1,60 @@ +../../../bin/pybind11-config,sha256=lRTlOOGrVDn1dIspMasn48Ye61llM5pDIYo6dGaSyuw,242 +pybind11-2.12.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pybind11-2.12.0.dist-info/LICENSE,sha256=g5ZbhDuY9nDTqFvQQe1LNyyOxQ17SlmVqDrGl7pnXcs,1684 +pybind11-2.12.0.dist-info/METADATA,sha256=MeLhLdk3U_NbRXD34xF1zQZbkETbU5TPjOB7QROSUh0,9514 +pybind11-2.12.0.dist-info/RECORD,, +pybind11-2.12.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +pybind11-2.12.0.dist-info/entry_points.txt,sha256=Q_kAwEJBDz8wHD0V50hY3AvchDk3Pfyeox2YHrAcWZ0,105 +pybind11-2.12.0.dist-info/top_level.txt,sha256=d1mqwSpUlmlZhXDQ9Y57eNlXc3dVDM1toKmfC1kJbvU,9 +pybind11/__init__.py,sha256=4-WhH9Ac6P8D_FqnflpOch8XlaZrkXbe95FspbMvwu0,429 +pybind11/__main__.py,sha256=ATLlhFlhBxDXxxXEfnf2F1RcRhuWN1ziMwbmrGuhif0,1544 +pybind11/__pycache__/__init__.cpython-310.pyc,, +pybind11/__pycache__/__main__.cpython-310.pyc,, +pybind11/__pycache__/_version.cpython-310.pyc,, +pybind11/__pycache__/commands.cpython-310.pyc,, +pybind11/__pycache__/setup_helpers.cpython-310.pyc,, +pybind11/_version.py,sha256=v3xaYVbYgxyXxYRPjYCaanioPokR2maNxXiY2U439Nk,228 +pybind11/commands.py,sha256=iJBFWhXHqlC_JMAgMjMIn6H_hizvorS572upGU1roGA,1207 +pybind11/include/pybind11/attr.h,sha256=QPjH7BfhL8QFwHHkrDak8gNOLMlb1itAO5fobjdoLp8,24334 +pybind11/include/pybind11/buffer_info.h,sha256=m_VE_hfWPKl-KgUZy9aVQdPg1xtoaDaBgkurIX7aGig,7750 +pybind11/include/pybind11/cast.h,sha256=j5UvHFBOE3o-8kB2UcBNumV-dv9pLWn2Gf1uh-fz7pY,71139 +pybind11/include/pybind11/chrono.h,sha256=A23naeloqn-1NKVAABOsJtHU9Vz8lfvrAICuLk-7qBM,8458 +pybind11/include/pybind11/common.h,sha256=ATg9Bt1pwF8qnNuI086fprM4CUTdrZdk_g2HXE1Sf6A,120 +pybind11/include/pybind11/complex.h,sha256=AaDZ-rEmK4tFaue-K9P5y3TxxnaQF6JwZ_6LAzkdLQI,2096 +pybind11/include/pybind11/detail/class.h,sha256=J3yQxEpB9cg68riM3WnR5W9mzxraCJxmgQyHvONPPSM,28563 +pybind11/include/pybind11/detail/common.h,sha256=ww8qY6xFAjDhwTN8R3z-f4KI9itmVRRwG4H5vxYEfk0,53771 +pybind11/include/pybind11/detail/descr.h,sha256=k1nvytx1zhMh8ERL2xS8Unbxcio5fa7eZIqnTsZ0orE,5962 +pybind11/include/pybind11/detail/init.h,sha256=xJ_nyNwZh1j_a0d8K9fCloZ0-MIfh4X_vHja4CFwVF0,17858 +pybind11/include/pybind11/detail/internals.h,sha256=j0CmJRrMvSLOHFxn5yeq5lqTqBcjSoA0kT0v_VvgmgM,29033 +pybind11/include/pybind11/detail/type_caster_base.h,sha256=9AmJNWNFnbAmlty11TZEj4dcIDBItN_5EbHz3beDenE,49892 +pybind11/include/pybind11/detail/typeid.h,sha256=jw5pr9m72vkDsloT8vxl9wj17VJGcEdXDyziBlt89Js,1625 +pybind11/include/pybind11/eigen.h,sha256=-HmSA1kgwCQ-GHUt7PHtTEc-vxqw9xARpF8PHWJip28,316 +pybind11/include/pybind11/eigen/common.h,sha256=dIeqmK7IzW5K4k2larPnA1A863rDp38U9YbNIwiIyYk,378 +pybind11/include/pybind11/eigen/matrix.h,sha256=CS8NpkZI8Y8ty0NFQC7GZcUlM5o8_1Abv1GbGltsbkA,32135 +pybind11/include/pybind11/eigen/tensor.h,sha256=U7wM4vClaDAwWCKAqwmsCPiA2B3rAszIT3tV_yQusUw,18490 +pybind11/include/pybind11/embed.h,sha256=xD-oEg56PadTig9a8FOcMgbsL64jaie7hwG3y6DWPEI,13459 +pybind11/include/pybind11/eval.h,sha256=7re-O2Eor1yD0Q_KgFkHIjKD17ejzII687Yszl9_KfE,4731 +pybind11/include/pybind11/functional.h,sha256=XY1Rj5_x2nb9AT0OzB9skt6OMOn6klNSkT0uBrRIkLo,5051 +pybind11/include/pybind11/gil.h,sha256=IAR_w0RupvFS5bLfw66ZV91OE9WC1p1ztOFSaxHGvZo,8517 +pybind11/include/pybind11/gil_safe_call_once.h,sha256=tPoJICumDjCcfFsFkltDGLj7c42NbgdhSt0ERkrSGKQ,3876 +pybind11/include/pybind11/iostream.h,sha256=K5rPXoCYN325r1PptcJCIhPhgtRtTJQjMr7bvUIOwxk,8862 +pybind11/include/pybind11/numpy.h,sha256=iaVp3boyb4GkVgY2vgBXbFaLwoHPb6rmSlOM44-eFU4,84243 +pybind11/include/pybind11/operators.h,sha256=224RoAXcv1la4NNY9rQ3aD_AeC8S9ZKx3HVK1O8B4MU,9103 +pybind11/include/pybind11/options.h,sha256=qXvmnj--9fZSp56NYefnB3W5V17ppHlY1Srgo3DNBpw,2734 +pybind11/include/pybind11/pybind11.h,sha256=zwcJLUvVmiZPpzvkt0Lu9IysI5Xs1ptCw9Y7C689jJU,129569 +pybind11/include/pybind11/pytypes.h,sha256=ehwy0s9uSGkByshl2l90nd25D0Mop3RNY09JTRkHUME,98953 +pybind11/include/pybind11/stl.h,sha256=aMi1OCCw2Zb-IRLSlAtQEJJHtWsRJiLT9dKDMHST1Ic,15532 +pybind11/include/pybind11/stl/filesystem.h,sha256=refLexmdcqOM6Qjo9QMB6heA5bQ7GZrP6DCvVBv0R1M,4185 +pybind11/include/pybind11/stl_bind.h,sha256=TA3A3guojho4GWsaP8SQfqbphF_HJ62-Sj2M8-CnxVA,28472 +pybind11/include/pybind11/type_caster_pyobject_ptr.h,sha256=H7pKBYTvUlibiJQEcKmeAkygSQwoCkuIyukNSDmVq-U,1929 +pybind11/include/pybind11/typing.h,sha256=rnjXxUTOp6EKJ4bwGCNV5Jortun-gBezC5s4SH-o8Yw,3600 +pybind11/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pybind11/setup_helpers.py,sha256=DZfrJeCTrHZDUpYVui7BDntZYtIp65UUQiVg8__Xd3Q,17492 +pybind11/share/cmake/pybind11/FindPythonLibsNew.cmake,sha256=5yX3-6c9CpjqBM4NfgwqK91XgOI-0hOb5sPOMKMuigA,12183 +pybind11/share/cmake/pybind11/pybind11Common.cmake,sha256=BlxskJajbjfLdkGSX4jO05tFCH4m96W32TjHiiiwt4Q,15032 +pybind11/share/cmake/pybind11/pybind11Config.cmake,sha256=4leRiHT_QZBFTnZsZKCkh70sjOd-odOWzomPnep-5HE,7952 +pybind11/share/cmake/pybind11/pybind11ConfigVersion.cmake,sha256=saYag2OKwhcTsAhc0dpS3o88VFvY7GDyx8gGf8N0-ss,1403 +pybind11/share/cmake/pybind11/pybind11NewTools.cmake,sha256=j9cUliW9A46IzvMzLv9WtMgz5EblY4Hqda_1SY99INY,10970 +pybind11/share/cmake/pybind11/pybind11Targets.cmake,sha256=ymsjrVrcUP03W9M-HBX_mpIQSb8ZE4aDCGKDIoTXvd8,4271 +pybind11/share/cmake/pybind11/pybind11Tools.cmake,sha256=34Dh2KSsg2FqkTzfDjjxNKOBr7kMy8_Gg3fkP_DCO_E,8569 +pybind11/share/pkgconfig/pybind11.pc,sha256=R0BoqMlsJGZAxWTVSQZsCvJC3UcjW4cZzpvhKmSprDA,171 diff --git a/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/entry_points.txt b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..8de5a647622cd4cbe433550be7a6b91be72e304a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pybind11-config = pybind11.__main__:main + +[pipx.run] +pybind11 = pybind11.__main__:main diff --git a/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e47c59fd7ced4c11e813c3ef82d919443a5f1d33 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pybind11 diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f1c11289f6a54cb07ebdbf31d02e8e81b18b07f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2003-2019 Stuart Bishop + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..2cb10460745926764ee300e57e7789229bd98c91 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/METADATA @@ -0,0 +1,649 @@ +Metadata-Version: 2.1 +Name: pytz +Version: 2024.1 +Summary: World timezone definitions, modern and historical +Home-page: http://pythonhosted.org/pytz +Author: Stuart Bishop +Author-email: stuart@stuartbishop.net +Maintainer: Stuart Bishop +Maintainer-email: stuart@stuartbishop.net +License: MIT +Download-URL: https://pypi.org/project/pytz/ +Keywords: timezone,tzinfo,datetime,olson,time +Platform: Independent +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.4 +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +License-File: LICENSE.txt + +pytz - World Timezone Definitions for Python +============================================ + +:Author: Stuart Bishop + +Introduction +~~~~~~~~~~~~ + +pytz brings the Olson tz database into Python. This library allows +accurate and cross platform timezone calculations using Python 2.4 +or higher. It also solves the issue of ambiguous times at the end +of daylight saving time, which you can read more about in the Python +Library Reference (``datetime.tzinfo``). + +Almost all of the Olson timezones are supported. + +.. note:: + + Projects using Python 3.9 or later should be using the support + now included as part of the standard library, and third party + packages work with it such as `tzdata `_. + pytz offers no advantages beyond backwards compatibility with + code written for earlier versions of Python. + +.. note:: + + This library differs from the documented Python API for + tzinfo implementations; if you want to create local wallclock + times you need to use the ``localize()`` method documented in this + document. In addition, if you perform date arithmetic on local + times that cross DST boundaries, the result may be in an incorrect + timezone (ie. subtract 1 minute from 2002-10-27 1:00 EST and you get + 2002-10-27 0:59 EST instead of the correct 2002-10-27 1:59 EDT). A + ``normalize()`` method is provided to correct this. Unfortunately these + issues cannot be resolved without modifying the Python datetime + implementation (see PEP-431). + + +Installation +~~~~~~~~~~~~ + +This package can either be installed using ``pip`` or from a tarball using the +standard Python distutils. + +If you are installing using ``pip``, you don't need to download anything as the +latest version will be downloaded for you from PyPI:: + + pip install pytz + +If you are installing from a tarball, run the following command as an +administrative user:: + + python setup.py install + + +pytz for Enterprise +~~~~~~~~~~~~~~~~~~~ + +Available as part of the Tidelift Subscription. + +The maintainers of pytz and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. `_. + + +Example & Usage +~~~~~~~~~~~~~~~ + +Localized times and date arithmetic +----------------------------------- + +>>> from datetime import datetime, timedelta +>>> from pytz import timezone +>>> import pytz +>>> utc = pytz.utc +>>> utc.zone +'UTC' +>>> eastern = timezone('US/Eastern') +>>> eastern.zone +'US/Eastern' +>>> amsterdam = timezone('Europe/Amsterdam') +>>> fmt = '%Y-%m-%d %H:%M:%S %Z%z' + +This library only supports two ways of building a localized time. The +first is to use the ``localize()`` method provided by the pytz library. +This is used to localize a naive datetime (datetime with no timezone +information): + +>>> loc_dt = eastern.localize(datetime(2002, 10, 27, 6, 0, 0)) +>>> print(loc_dt.strftime(fmt)) +2002-10-27 06:00:00 EST-0500 + +The second way of building a localized time is by converting an existing +localized time using the standard ``astimezone()`` method: + +>>> ams_dt = loc_dt.astimezone(amsterdam) +>>> ams_dt.strftime(fmt) +'2002-10-27 12:00:00 CET+0100' + +Unfortunately using the tzinfo argument of the standard datetime +constructors ''does not work'' with pytz for many timezones. + +>>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=amsterdam).strftime(fmt) # /!\ Does not work this way! +'2002-10-27 12:00:00 LMT+0018' + +It is safe for timezones without daylight saving transitions though, such +as UTC: + +>>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=pytz.utc).strftime(fmt) # /!\ Not recommended except for UTC +'2002-10-27 12:00:00 UTC+0000' + +The preferred way of dealing with times is to always work in UTC, +converting to localtime only when generating output to be read +by humans. + +>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) +>>> loc_dt = utc_dt.astimezone(eastern) +>>> loc_dt.strftime(fmt) +'2002-10-27 01:00:00 EST-0500' + +This library also allows you to do date arithmetic using local +times, although it is more complicated than working in UTC as you +need to use the ``normalize()`` method to handle daylight saving time +and other timezone transitions. In this example, ``loc_dt`` is set +to the instant when daylight saving time ends in the US/Eastern +timezone. + +>>> before = loc_dt - timedelta(minutes=10) +>>> before.strftime(fmt) +'2002-10-27 00:50:00 EST-0500' +>>> eastern.normalize(before).strftime(fmt) +'2002-10-27 01:50:00 EDT-0400' +>>> after = eastern.normalize(before + timedelta(minutes=20)) +>>> after.strftime(fmt) +'2002-10-27 01:10:00 EST-0500' + +Creating local times is also tricky, and the reason why working with +local times is not recommended. Unfortunately, you cannot just pass +a ``tzinfo`` argument when constructing a datetime (see the next +section for more details) + +>>> dt = datetime(2002, 10, 27, 1, 30, 0) +>>> dt1 = eastern.localize(dt, is_dst=True) +>>> dt1.strftime(fmt) +'2002-10-27 01:30:00 EDT-0400' +>>> dt2 = eastern.localize(dt, is_dst=False) +>>> dt2.strftime(fmt) +'2002-10-27 01:30:00 EST-0500' + +Converting between timezones is more easily done, using the +standard astimezone method. + +>>> utc_dt = datetime.fromtimestamp(1143408899, tz=utc) +>>> utc_dt.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> au_tz = timezone('Australia/Sydney') +>>> au_dt = utc_dt.astimezone(au_tz) +>>> au_dt.strftime(fmt) +'2006-03-27 08:34:59 AEDT+1100' +>>> utc_dt2 = au_dt.astimezone(utc) +>>> utc_dt2.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> utc_dt == utc_dt2 +True + +You can take shortcuts when dealing with the UTC side of timezone +conversions. ``normalize()`` and ``localize()`` are not really +necessary when there are no daylight saving time transitions to +deal with. + +>>> utc_dt = datetime.fromtimestamp(1143408899, tz=utc) +>>> utc_dt.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> au_tz = timezone('Australia/Sydney') +>>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) +>>> au_dt.strftime(fmt) +'2006-03-27 08:34:59 AEDT+1100' +>>> utc_dt2 = au_dt.astimezone(utc) +>>> utc_dt2.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' + + +``tzinfo`` API +-------------- + +The ``tzinfo`` instances returned by the ``timezone()`` function have +been extended to cope with ambiguous times by adding an ``is_dst`` +parameter to the ``utcoffset()``, ``dst()`` && ``tzname()`` methods. + +>>> tz = timezone('America/St_Johns') + +>>> normal = datetime(2009, 9, 1) +>>> ambiguous = datetime(2009, 10, 31, 23, 30) + +The ``is_dst`` parameter is ignored for most timestamps. It is only used +during DST transition ambiguous periods to resolve that ambiguity. + +>>> print(tz.utcoffset(normal, is_dst=True)) +-1 day, 21:30:00 +>>> print(tz.dst(normal, is_dst=True)) +1:00:00 +>>> tz.tzname(normal, is_dst=True) +'NDT' + +>>> print(tz.utcoffset(ambiguous, is_dst=True)) +-1 day, 21:30:00 +>>> print(tz.dst(ambiguous, is_dst=True)) +1:00:00 +>>> tz.tzname(ambiguous, is_dst=True) +'NDT' + +>>> print(tz.utcoffset(normal, is_dst=False)) +-1 day, 21:30:00 +>>> tz.dst(normal, is_dst=False).seconds +3600 +>>> tz.tzname(normal, is_dst=False) +'NDT' + +>>> print(tz.utcoffset(ambiguous, is_dst=False)) +-1 day, 20:30:00 +>>> tz.dst(ambiguous, is_dst=False) +datetime.timedelta(0) +>>> tz.tzname(ambiguous, is_dst=False) +'NST' + +If ``is_dst`` is not specified, ambiguous timestamps will raise +an ``pytz.exceptions.AmbiguousTimeError`` exception. + +>>> print(tz.utcoffset(normal)) +-1 day, 21:30:00 +>>> print(tz.dst(normal)) +1:00:00 +>>> tz.tzname(normal) +'NDT' + +>>> import pytz.exceptions +>>> try: +... tz.utcoffset(ambiguous) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) +pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 +>>> try: +... tz.dst(ambiguous) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) +pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 +>>> try: +... tz.tzname(ambiguous) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) +pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 + + +Problems with Localtime +~~~~~~~~~~~~~~~~~~~~~~~ + +The major problem we have to deal with is that certain datetimes +may occur twice in a year. For example, in the US/Eastern timezone +on the last Sunday morning in October, the following sequence +happens: + + - 01:00 EDT occurs + - 1 hour later, instead of 2:00am the clock is turned back 1 hour + and 01:00 happens again (this time 01:00 EST) + +In fact, every instant between 01:00 and 02:00 occurs twice. This means +that if you try and create a time in the 'US/Eastern' timezone +the standard datetime syntax, there is no way to specify if you meant +before of after the end-of-daylight-saving-time transition. Using the +pytz custom syntax, the best you can do is make an educated guess: + +>>> loc_dt = eastern.localize(datetime(2002, 10, 27, 1, 30, 00)) +>>> loc_dt.strftime(fmt) +'2002-10-27 01:30:00 EST-0500' + +As you can see, the system has chosen one for you and there is a 50% +chance of it being out by one hour. For some applications, this does +not matter. However, if you are trying to schedule meetings with people +in different timezones or analyze log files it is not acceptable. + +The best and simplest solution is to stick with using UTC. The pytz +package encourages using UTC for internal timezone representation by +including a special UTC implementation based on the standard Python +reference implementation in the Python documentation. + +The UTC timezone unpickles to be the same instance, and pickles to a +smaller size than other pytz tzinfo instances. The UTC implementation +can be obtained as pytz.utc, pytz.UTC, or pytz.timezone('UTC'). + +>>> import pickle, pytz +>>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) +>>> naive = dt.replace(tzinfo=None) +>>> p = pickle.dumps(dt, 1) +>>> naive_p = pickle.dumps(naive, 1) +>>> len(p) - len(naive_p) +17 +>>> new = pickle.loads(p) +>>> new == dt +True +>>> new is dt +False +>>> new.tzinfo is dt.tzinfo +True +>>> pytz.utc is pytz.UTC is pytz.timezone('UTC') +True + +Note that some other timezones are commonly thought of as the same (GMT, +Greenwich, Universal, etc.). The definition of UTC is distinct from these +other timezones, and they are not equivalent. For this reason, they will +not compare the same in Python. + +>>> utc == pytz.timezone('GMT') +False + +See the section `What is UTC`_, below. + +If you insist on working with local times, this library provides a +facility for constructing them unambiguously: + +>>> loc_dt = datetime(2002, 10, 27, 1, 30, 00) +>>> est_dt = eastern.localize(loc_dt, is_dst=True) +>>> edt_dt = eastern.localize(loc_dt, is_dst=False) +>>> print(est_dt.strftime(fmt) + ' / ' + edt_dt.strftime(fmt)) +2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500 + +If you pass None as the is_dst flag to localize(), pytz will refuse to +guess and raise exceptions if you try to build ambiguous or non-existent +times. + +For example, 1:30am on 27th Oct 2002 happened twice in the US/Eastern +timezone when the clocks where put back at the end of Daylight Saving +Time: + +>>> dt = datetime(2002, 10, 27, 1, 30, 00) +>>> try: +... eastern.localize(dt, is_dst=None) +... except pytz.exceptions.AmbiguousTimeError: +... print('pytz.exceptions.AmbiguousTimeError: %s' % dt) +pytz.exceptions.AmbiguousTimeError: 2002-10-27 01:30:00 + +Similarly, 2:30am on 7th April 2002 never happened at all in the +US/Eastern timezone, as the clocks where put forward at 2:00am skipping +the entire hour: + +>>> dt = datetime(2002, 4, 7, 2, 30, 00) +>>> try: +... eastern.localize(dt, is_dst=None) +... except pytz.exceptions.NonExistentTimeError: +... print('pytz.exceptions.NonExistentTimeError: %s' % dt) +pytz.exceptions.NonExistentTimeError: 2002-04-07 02:30:00 + +Both of these exceptions share a common base class to make error handling +easier: + +>>> isinstance(pytz.AmbiguousTimeError(), pytz.InvalidTimeError) +True +>>> isinstance(pytz.NonExistentTimeError(), pytz.InvalidTimeError) +True + + +A special case is where countries change their timezone definitions +with no daylight savings time switch. For example, in 1915 Warsaw +switched from Warsaw time to Central European time with no daylight savings +transition. So at the stroke of midnight on August 5th 1915 the clocks +were wound back 24 minutes creating an ambiguous time period that cannot +be specified without referring to the timezone abbreviation or the +actual UTC offset. In this case midnight happened twice, neither time +during a daylight saving time period. pytz handles this transition by +treating the ambiguous period before the switch as daylight savings +time, and the ambiguous period after as standard time. + + +>>> warsaw = pytz.timezone('Europe/Warsaw') +>>> amb_dt1 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=True) +>>> amb_dt1.strftime(fmt) +'1915-08-04 23:59:59 WMT+0124' +>>> amb_dt2 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=False) +>>> amb_dt2.strftime(fmt) +'1915-08-04 23:59:59 CET+0100' +>>> switch_dt = warsaw.localize(datetime(1915, 8, 5, 00, 00, 00), is_dst=False) +>>> switch_dt.strftime(fmt) +'1915-08-05 00:00:00 CET+0100' +>>> str(switch_dt - amb_dt1) +'0:24:01' +>>> str(switch_dt - amb_dt2) +'0:00:01' + +The best way of creating a time during an ambiguous time period is +by converting from another timezone such as UTC: + +>>> utc_dt = datetime(1915, 8, 4, 22, 36, tzinfo=pytz.utc) +>>> utc_dt.astimezone(warsaw).strftime(fmt) +'1915-08-04 23:36:00 CET+0100' + +The standard Python way of handling all these ambiguities is not to +handle them, such as demonstrated in this example using the US/Eastern +timezone definition from the Python documentation (Note that this +implementation only works for dates between 1987 and 2006 - it is +included for tests only!): + +>>> from pytz.reference import Eastern # pytz.reference only for tests +>>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern) +>>> str(dt) +'2002-10-27 00:30:00-04:00' +>>> str(dt + timedelta(hours=1)) +'2002-10-27 01:30:00-05:00' +>>> str(dt + timedelta(hours=2)) +'2002-10-27 02:30:00-05:00' +>>> str(dt + timedelta(hours=3)) +'2002-10-27 03:30:00-05:00' + +Notice the first two results? At first glance you might think they are +correct, but taking the UTC offset into account you find that they are +actually two hours appart instead of the 1 hour we asked for. + +>>> from pytz.reference import UTC # pytz.reference only for tests +>>> str(dt.astimezone(UTC)) +'2002-10-27 04:30:00+00:00' +>>> str((dt + timedelta(hours=1)).astimezone(UTC)) +'2002-10-27 06:30:00+00:00' + + +Country Information +~~~~~~~~~~~~~~~~~~~ + +A mechanism is provided to access the timezones commonly in use +for a particular country, looked up using the ISO 3166 country code. +It returns a list of strings that can be used to retrieve the relevant +tzinfo instance using ``pytz.timezone()``: + +>>> print(' '.join(pytz.country_timezones['nz'])) +Pacific/Auckland Pacific/Chatham + +The Olson database comes with a ISO 3166 country code to English country +name mapping that pytz exposes as a dictionary: + +>>> print(pytz.country_names['nz']) +New Zealand + + +What is UTC +~~~~~~~~~~~ + +'UTC' is `Coordinated Universal Time`_. It is a successor to, but distinct +from, Greenwich Mean Time (GMT) and the various definitions of Universal +Time. UTC is now the worldwide standard for regulating clocks and time +measurement. + +All other timezones are defined relative to UTC, and include offsets like +UTC+0800 - hours to add or subtract from UTC to derive the local time. No +daylight saving time occurs in UTC, making it a useful timezone to perform +date arithmetic without worrying about the confusion and ambiguities caused +by daylight saving time transitions, your country changing its timezone, or +mobile computers that roam through multiple timezones. + +.. _Coordinated Universal Time: https://en.wikipedia.org/wiki/Coordinated_Universal_Time + + +Helpers +~~~~~~~ + +There are two lists of timezones provided. + +``all_timezones`` is the exhaustive list of the timezone names that can +be used. + +>>> from pytz import all_timezones +>>> len(all_timezones) >= 500 +True +>>> 'Etc/Greenwich' in all_timezones +True + +``common_timezones`` is a list of useful, current timezones. It doesn't +contain deprecated zones or historical zones, except for a few I've +deemed in common usage, such as US/Eastern (open a bug report if you +think other timezones are deserving of being included here). It is also +a sequence of strings. + +>>> from pytz import common_timezones +>>> len(common_timezones) < len(all_timezones) +True +>>> 'Etc/Greenwich' in common_timezones +False +>>> 'Australia/Melbourne' in common_timezones +True +>>> 'US/Eastern' in common_timezones +True +>>> 'Canada/Eastern' in common_timezones +True +>>> 'Australia/Yancowinna' in all_timezones +True +>>> 'Australia/Yancowinna' in common_timezones +False + +Both ``common_timezones`` and ``all_timezones`` are alphabetically +sorted: + +>>> common_timezones_dupe = common_timezones[:] +>>> common_timezones_dupe.sort() +>>> common_timezones == common_timezones_dupe +True +>>> all_timezones_dupe = all_timezones[:] +>>> all_timezones_dupe.sort() +>>> all_timezones == all_timezones_dupe +True + +``all_timezones`` and ``common_timezones`` are also available as sets. + +>>> from pytz import all_timezones_set, common_timezones_set +>>> 'US/Eastern' in all_timezones_set +True +>>> 'US/Eastern' in common_timezones_set +True +>>> 'Australia/Victoria' in common_timezones_set +False + +You can also retrieve lists of timezones used by particular countries +using the ``country_timezones()`` function. It requires an ISO-3166 +two letter country code. + +>>> from pytz import country_timezones +>>> print(' '.join(country_timezones('ch'))) +Europe/Zurich +>>> print(' '.join(country_timezones('CH'))) +Europe/Zurich + + +Internationalization - i18n/l10n +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pytz is an interface to the IANA database, which uses ASCII names. The `Unicode Consortium's Unicode Locales (CLDR) `_ +project provides translations. Python packages such as +`Babel `_ +and Thomas Khyn's `l18n `_ package can be used +to access these translations from Python. + + +License +~~~~~~~ + +MIT license. + +This code is also available as part of Zope 3 under the Zope Public +License, Version 2.1 (ZPL). + +I'm happy to relicense this code if necessary for inclusion in other +open source projects. + + +Latest Versions +~~~~~~~~~~~~~~~ + +This package will be updated after releases of the Olson timezone +database. The latest version can be downloaded from the `Python Package +Index `_. The code that is used +to generate this distribution is hosted on Github and available +using git:: + + git clone https://github.com/stub42/pytz.git + +Announcements of new releases are made on +`Launchpad `_, and the +`Atom feed `_ +hosted there. + + +Bugs, Feature Requests & Patches +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Bugs should be reported on `Github `_. +Feature requests are unlikely to be considered, and efforts instead directed +to timezone support now built into Python or packages that work with it. + + +Security Issues +~~~~~~~~~~~~~~~ + +Reports about security issues can be made via `Tidelift `_. + + +Issues & Limitations +~~~~~~~~~~~~~~~~~~~~ + +- This project is in maintenance mode. Projects using Python 3.9 or later + are best served by using the timezone functionaly now included in core + Python and packages that work with it such as `tzdata `_. + +- Offsets from UTC are rounded to the nearest whole minute, so timezones + such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This + was a limitation of the Python datetime library. + +- If you think a timezone definition is incorrect, I probably can't fix + it. pytz is a direct translation of the Olson timezone database, and + changes to the timezone definitions need to be made to this source. + If you find errors they should be reported to the time zone mailing + list, linked from http://www.iana.org/time-zones. + + +Further Reading +~~~~~~~~~~~~~~~ + +More info than you want to know about timezones: +https://data.iana.org/time-zones/tz-link.html + + +Contact +~~~~~~~ + +Stuart Bishop + + diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3e8b5b44c170e83e95ff9ca97abf9ef133d5fa47 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/RECORD @@ -0,0 +1,622 @@ +pytz-2024.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytz-2024.1.dist-info/LICENSE.txt,sha256=vosaN-vibFkqkPbA6zMQOn84POL010mMCvmlJpkKB7g,1088 +pytz-2024.1.dist-info/METADATA,sha256=2mOz3YzpRCJtu0iklrKsUm8a8BmJglIL_qqGhhduPJk,22325 +pytz-2024.1.dist-info/RECORD,, +pytz-2024.1.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +pytz-2024.1.dist-info/top_level.txt,sha256=6xRYlt934v1yHb1JIrXgHyGxn3cqACvd-yE8ski_kcc,5 +pytz-2024.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pytz/__init__.py,sha256=RZJJJ1W2RyP9fllsMNO4w-yjJRpIazWJ9fvj5telYig,35101 +pytz/__pycache__/__init__.cpython-310.pyc,, +pytz/__pycache__/exceptions.cpython-310.pyc,, +pytz/__pycache__/lazy.cpython-310.pyc,, +pytz/__pycache__/reference.cpython-310.pyc,, +pytz/__pycache__/tzfile.cpython-310.pyc,, +pytz/__pycache__/tzinfo.cpython-310.pyc,, +pytz/exceptions.py,sha256=434ZcuLlpLQY9mWoGq7zJMV1TyiYvVgpKBU1qZkbDjM,1571 +pytz/lazy.py,sha256=toeR5uDWKBj6ezsUZ4elNP6CEMtK7CO2jS9A30nsFbo,5404 +pytz/reference.py,sha256=zUtCki7JFEmrzrjNsfMD7YL0lWDxynKc1Ubo4iXSs74,3778 +pytz/tzfile.py,sha256=K2y7pZs4vydpZVftrfAA_-hgw17y1Szc7z_QCse6udU,4723 +pytz/tzinfo.py,sha256=XfaVOoO3KsCvtUYaCd0fvgBXWZ8tgevGYUoBh_uiE60,19340 +pytz/zoneinfo/Africa/Abidjan,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Accra,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Addis_Ababa,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Algiers,sha256=vaFpjNVCwObnbfu82rOQzdJvN6nVgmpXpQ1aqzfzsqY,735 +pytz/zoneinfo/Africa/Asmara,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Asmera,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Bamako,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Bangui,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Banjul,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Bissau,sha256=IjuxDP6EZiDHFvl_bHS6NN7sdRxLKXllooBC829poak,194 +pytz/zoneinfo/Africa/Blantyre,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Brazzaville,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Bujumbura,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Cairo,sha256=Lft-GCLQhaSJm9VqUmsEFoHIS1Vhfa7pFJn9GZCpifs,2399 +pytz/zoneinfo/Africa/Casablanca,sha256=4RqVbw_F3ZucopIC2ivAJ8WDwj5wRODAB67tBpdXcgA,2429 +pytz/zoneinfo/Africa/Ceuta,sha256=Cw-2_nFDGbN8WqIsVpcauyZooWX8j3Kmx2PnC0fHut8,2052 +pytz/zoneinfo/Africa/Conakry,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Dakar,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Dar_es_Salaam,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Djibouti,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Douala,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/El_Aaiun,sha256=UWCCqQLJxd8qsTYw82kz9W1suwW5TRgnZw31sDWDz20,2295 +pytz/zoneinfo/Africa/Freetown,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Gaborone,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Harare,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Johannesburg,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 +pytz/zoneinfo/Africa/Juba,sha256=UVnIqEPJwHLTMC-r5qZQHNv9opoYVsKdq-ta_5XUw_Q,679 +pytz/zoneinfo/Africa/Kampala,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Khartoum,sha256=MYWDoJ3AcCItZdApoeOgtWWDDxquwTon5v5TOGP70-o,679 +pytz/zoneinfo/Africa/Kigali,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Kinshasa,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Lagos,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Libreville,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Lome,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Luanda,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Lubumbashi,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Lusaka,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Malabo,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Maputo,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 +pytz/zoneinfo/Africa/Maseru,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 +pytz/zoneinfo/Africa/Mbabane,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 +pytz/zoneinfo/Africa/Mogadishu,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Monrovia,sha256=-VsJW5cU4KdvfgYaQVv4lcuzmaKIVFMd42nO6RXOBdU,208 +pytz/zoneinfo/Africa/Nairobi,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Africa/Ndjamena,sha256=8T3A0Zm9Gj0Bvm6rd88t3GAXKiKdGUfHlIqYlkYI0KM,199 +pytz/zoneinfo/Africa/Niamey,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Nouakchott,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Ouagadougou,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Porto-Novo,sha256=z_6wKCzL1_ug5JP_hneh5abdUZeIUELkN_ladz-ESEY,235 +pytz/zoneinfo/Africa/Sao_Tome,sha256=MdjxpQ268uzJ7Zx1ZroFUtRUwqsJ6F_yY3AYV9FXw1I,254 +pytz/zoneinfo/Africa/Timbuktu,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Africa/Tripoli,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625 +pytz/zoneinfo/Africa/Tunis,sha256=OFVMEM4eYT2Ez0beuhEUCTSIpcFldWxsV2uEoTZIUNI,689 +pytz/zoneinfo/Africa/Windhoek,sha256=xuhvudrMH4alnVmouSTQI8YL8F_HbgsF2EQ7AZKzuHs,955 +pytz/zoneinfo/America/Adak,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 +pytz/zoneinfo/America/Anchorage,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371 +pytz/zoneinfo/America/Anguilla,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Antigua,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Araguaina,sha256=G6v9wYFZ8EB4WQfIsqRbbiiKd2b27j7Zt5dFjBbzx2o,870 +pytz/zoneinfo/America/Argentina/Buenos_Aires,sha256=JmU8lBwmy29gR6OmeytvFdMRx6ObJKnYNHmLyMmXX2M,1062 +pytz/zoneinfo/America/Argentina/Catamarca,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062 +pytz/zoneinfo/America/Argentina/ComodRivadavia,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062 +pytz/zoneinfo/America/Argentina/Cordoba,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062 +pytz/zoneinfo/America/Argentina/Jujuy,sha256=PGmAehypCxj0XCenCSWqylDIPbKLK0DlrwJK_24D590,1034 +pytz/zoneinfo/America/Argentina/La_Rioja,sha256=Um6XoVXhsr62ad1mWuebe6NY0ZHauBdR9tMGDgqCOHg,1076 +pytz/zoneinfo/America/Argentina/Mendoza,sha256=xcOVtvRyVYFAU90y2QYwpyQhpMLyAp7-Fxvku4kgl0c,1062 +pytz/zoneinfo/America/Argentina/Rio_Gallegos,sha256=F9ZKR4o8gLHX7QBuIjMapGIdmzJxpqwbouPgZ5MqDpY,1062 +pytz/zoneinfo/America/Argentina/Salta,sha256=h1KYrDNIapvDkYhi1PaB8WD1qWOe4vhhgDJWDCGV4jc,1034 +pytz/zoneinfo/America/Argentina/San_Juan,sha256=AI2GltA80mPNzhHxYycuEwIbO1ANXyIqBQZMpjqKqdQ,1076 +pytz/zoneinfo/America/Argentina/San_Luis,sha256=2ItGRcLVK2wx8MyJsHbIBBeAkU4B-MN5x1ZxNyZ7UJE,1088 +pytz/zoneinfo/America/Argentina/Tucuman,sha256=twO-FqtNJV8XOzWTvFQ-xnEcWCoDUHY3gpVIG0Mzbf8,1090 +pytz/zoneinfo/America/Argentina/Ushuaia,sha256=A6IbpVlY9IIPoSKMFRR9DMROdwXUSDc2HsASueOSnqo,1062 +pytz/zoneinfo/America/Aruba,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Asuncion,sha256=V8wwkUoNqyj0C-fUSADpU7HU8H3Qkr3jNPJ4SLsGUIc,2030 +pytz/zoneinfo/America/Atikokan,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Atka,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 +pytz/zoneinfo/America/Bahia,sha256=qi7dA6FofDhLxVMmd2L8bK3HeaQnc9X-jiijwyfhs3g,1010 +pytz/zoneinfo/America/Bahia_Banderas,sha256=L6iHYbA1Us1pljllFLEIAHW4ZaZhFKoG2Zr8TT5aY38,1152 +pytz/zoneinfo/America/Barbados,sha256=ima-Qrrhazu4Qfvu2Z0-e6E-GTiYknuJBu6c2yVG9LE,436 +pytz/zoneinfo/America/Belem,sha256=aZMUgtFDdHNISpqyQRYbmS2IBD-BAS3CaJnhu6onLCY,562 +pytz/zoneinfo/America/Belize,sha256=pkfLY2KfPchbeJa1pWcXmWAwp4ZlRvxWLVezXnrbkws,1614 +pytz/zoneinfo/America/Blanc-Sablon,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Boa_Vista,sha256=dMtaG11kGlJrgJJgGWEDZZAmnO_HfT3L4X8pI72LLFY,618 +pytz/zoneinfo/America/Bogota,sha256=Z1ernZZGQxulE8KFWHYWcM3SV1jn2_QEc1Q0OJzHRak,232 +pytz/zoneinfo/America/Boise,sha256=7HQsNPJiUheQgFz5kVLvTnf5xhXAYaeANqDskxKz2Vs,2410 +pytz/zoneinfo/America/Buenos_Aires,sha256=JmU8lBwmy29gR6OmeytvFdMRx6ObJKnYNHmLyMmXX2M,1062 +pytz/zoneinfo/America/Cambridge_Bay,sha256=_4xRlX3WdVpEcqoT6myD7NeTCXnn9OYk_iH006bwULo,2254 +pytz/zoneinfo/America/Campo_Grande,sha256=gINiXg5i2e6Rh2Nbo2bFqhPAJL4F4cAqGnBankXTDXw,1430 +pytz/zoneinfo/America/Cancun,sha256=lI4ZtiBtxKqNHvU47vRSwc5-GDl8JOdC2A6oc9s8iIo,834 +pytz/zoneinfo/America/Caracas,sha256=mUNMFdDzZLav_ePA1ocBdmqVBierkeEszTIFpNCm5J0,250 +pytz/zoneinfo/America/Catamarca,sha256=uMCJXXGYmNESHVvj5RYBZ0McrOdE14hwm17l25MgRW0,1062 +pytz/zoneinfo/America/Cayenne,sha256=4k7Iv1woX4atqePKrcvMQD2Vk9Tmma7rW_AW_R62pCc,184 +pytz/zoneinfo/America/Cayman,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Chicago,sha256=_roybr6I6sIAF6cYdIxGxoRpoef153Fty48dQ6bm9oY,3592 +pytz/zoneinfo/America/Chihuahua,sha256=ZAlPSsUfT3VGp1VdibnHIf-QsdEIqHuzX15wu2P2YQk,1102 +pytz/zoneinfo/America/Ciudad_Juarez,sha256=OQstyPrMxx3nNEbzgDhq_W0mK49-ApNMK7_6p-6dJ64,1538 +pytz/zoneinfo/America/Coral_Harbour,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Cordoba,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062 +pytz/zoneinfo/America/Costa_Rica,sha256=74rYa6lrgIkyls9PkHo8SCYl9oOqiuG5S7MWdnJelP4,316 +pytz/zoneinfo/America/Creston,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360 +pytz/zoneinfo/America/Cuiaba,sha256=GRJqkhRXNsOUcgjZddQxRIJdRYaw9pM_YLWbun88dkg,1402 +pytz/zoneinfo/America/Curacao,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Danmarkshavn,sha256=YRZAfUCoVtaL1L-MYMYMH1wyOaVQnfUo_gFnvMXSuzw,698 +pytz/zoneinfo/America/Dawson,sha256=rAHhyuMuyjf_eyA2SBG76MRBf_fj_xi5FAuiWVQgJhw,1614 +pytz/zoneinfo/America/Dawson_Creek,sha256=aJXCyP4j3ggE4wGCN-LrS9hpD_5zWHzQTeSAKTWEPUM,1050 +pytz/zoneinfo/America/Denver,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/America/Detroit,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230 +pytz/zoneinfo/America/Dominica,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Edmonton,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 +pytz/zoneinfo/America/Eirunepe,sha256=j5eExkjFaqtC-D8XK0rGzoF9yEgbSlTbPqVG9WKhEa8,642 +pytz/zoneinfo/America/El_Salvador,sha256=gvGN8Lkj-sGm2_rs8OUjAMf1oMtKp2Xes6UfWT0WqgU,224 +pytz/zoneinfo/America/Ensenada,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/America/Fort_Nelson,sha256=erfODr3DrSpz65kAdO7Ts2dGbZxvddEP6gx4BX3y2J0,2240 +pytz/zoneinfo/America/Fort_Wayne,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/America/Fortaleza,sha256=rjiSB0q1cBuMDOM9orW_uwe5UOLBwTlfjFotwOYe1mU,702 +pytz/zoneinfo/America/Glace_Bay,sha256=G8DGLGCapH_aYCF_OhaL5Qonf7FOAgAPwelO5htCWBc,2192 +pytz/zoneinfo/America/Godthab,sha256=KGXrMN-YkYpVCgLdpcfwMFQ77EsRAGsjUCG3yAUvVfw,1889 +pytz/zoneinfo/America/Goose_Bay,sha256=JgaLueghSvX2g725FOfIgpgvsqxZGykWOhAZWGpQZRY,3210 +pytz/zoneinfo/America/Grand_Turk,sha256=4YOFEPK60Bel2_fCsY6vSZxUcMJKjiKtyOf_Q0khEwU,1834 +pytz/zoneinfo/America/Grenada,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Guadeloupe,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Guatemala,sha256=dugUgCd6QY52yHkHuUP4jRWzo5x439IQigaYCvEF46Q,280 +pytz/zoneinfo/America/Guayaquil,sha256=j2UuIo-4RgSOlTNfu77mhZ92waNTeKFSvmoVemJooT0,232 +pytz/zoneinfo/America/Guyana,sha256=R0bOvCRDC8SRIexmhsduPdHbbRPwI2GviD9otExiUrk,248 +pytz/zoneinfo/America/Halifax,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424 +pytz/zoneinfo/America/Havana,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416 +pytz/zoneinfo/America/Hermosillo,sha256=WnlVBpVBG8ONnz0wpxteXmuvSzOGwSlAisvDd1GtKYA,456 +pytz/zoneinfo/America/Indiana/Indianapolis,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/America/Indiana/Knox,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444 +pytz/zoneinfo/America/Indiana/Marengo,sha256=f3tQ-lgMSUA7nvn64pXhKtJL7mWzGajoCega5MEJSbI,1738 +pytz/zoneinfo/America/Indiana/Petersburg,sha256=A88OHuM0Rg3iMLHjKgXq_d2jZCdVSytUQs-9W0KcFyQ,1920 +pytz/zoneinfo/America/Indiana/Tell_City,sha256=4dWqAr9Y2BXfL4pAQk-81c3gGl2cNdHXOD7_wJhhhn8,1700 +pytz/zoneinfo/America/Indiana/Vevay,sha256=H7VR2G-_sD_C5Rm4P3g1iRC1FWCPg4m0MGD3P1PLzsk,1430 +pytz/zoneinfo/America/Indiana/Vincennes,sha256=62mAxT7APFCaoygflnEzdOpe-fuW1yObI6m6EUUcS7A,1710 +pytz/zoneinfo/America/Indiana/Winamac,sha256=aZGM2jR8CH9BHSUq7XygiweDd6dorXLPXg246XsbR6s,1794 +pytz/zoneinfo/America/Indianapolis,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/America/Inuvik,sha256=6J-mapDnrk9A1LtswoE34tqSy_ufedcEBNxixkrEjIo,2074 +pytz/zoneinfo/America/Iqaluit,sha256=feOnxAN0N0r-M1qlkrA4JMyawoc0tqae0iiBCPDAs4k,2202 +pytz/zoneinfo/America/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482 +pytz/zoneinfo/America/Jujuy,sha256=PGmAehypCxj0XCenCSWqylDIPbKLK0DlrwJK_24D590,1034 +pytz/zoneinfo/America/Juneau,sha256=k7hxb0aGRnfnE-DBi3LkcjAzRPyAf0_Hw0vVFfjGeb0,2353 +pytz/zoneinfo/America/Kentucky/Louisville,sha256=tP072xV_n_vIQjxxcJ77AGeGj6yL1KPpn3fwids9g1U,2788 +pytz/zoneinfo/America/Kentucky/Monticello,sha256=LtdyCo85BrXQs6rlH61Ym-8KqWHH6PwAOjD0QxhIdzM,2368 +pytz/zoneinfo/America/Knox_IN,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444 +pytz/zoneinfo/America/Kralendijk,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/La_Paz,sha256=hqfD8LQHupdZhji2e93_9pOQAT-R7muzzjP0nyfbFXY,218 +pytz/zoneinfo/America/Lima,sha256=HHgTnDUnCZzibvL0MrG8qyOuvjmYYw3e3R5VbnxMZs8,392 +pytz/zoneinfo/America/Los_Angeles,sha256=aJd7ua1tGG_vxser02AQpm4wAI3LLTdgh6QcSYYecmg,2852 +pytz/zoneinfo/America/Louisville,sha256=tP072xV_n_vIQjxxcJ77AGeGj6yL1KPpn3fwids9g1U,2788 +pytz/zoneinfo/America/Lower_Princes,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Maceio,sha256=3R5DlSe32kQDmoSVIWpcyk2o7qohr-rliwqDSGFIMyQ,730 +pytz/zoneinfo/America/Managua,sha256=xBzF01AHn2E2fD8Qdy-DHFe36UqoeNpKPfChduBKWdk,430 +pytz/zoneinfo/America/Manaus,sha256=F6RLOOeOi9lymZiQmQ9pR8tFpPZ6EguNdPfOc6BhXDE,590 +pytz/zoneinfo/America/Marigot,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Martinique,sha256=fMs80kOU2YFvC0f9y2eje97JeAtTYBamXrnlTunNLzQ,232 +pytz/zoneinfo/America/Matamoros,sha256=fq-PqdmZrQ98UsFmHA9ivjBZv5GEBRTOuLQ5Cu5ajW8,1418 +pytz/zoneinfo/America/Mazatlan,sha256=RQQVwlEVHRp2X-c_0hJ46y54abTlqUuLkyrUUicyc5g,1128 +pytz/zoneinfo/America/Mendoza,sha256=xcOVtvRyVYFAU90y2QYwpyQhpMLyAp7-Fxvku4kgl0c,1062 +pytz/zoneinfo/America/Menominee,sha256=Arv9WLbfhNcpRsUjHDU757BEdwlp08Gt30AixG3gZ04,2274 +pytz/zoneinfo/America/Merida,sha256=ORJCGiO2mXG-kk5ZZGro1MNuKqRnJx6HJlvoezTMM90,1004 +pytz/zoneinfo/America/Metlakatla,sha256=twmieGTVY2V-U8nFxqvx7asYv8GVjeWdLtrOI7UApVI,1423 +pytz/zoneinfo/America/Mexico_City,sha256=A5MlfDUZ4O1-jMTRt0WPem7qqcW0Nrslls1hlc8C4-Q,1222 +pytz/zoneinfo/America/Miquelon,sha256=l5txBJYe9HTRZlILcbSL_HNDYrjUb0ouecNy7QEkg9c,1652 +pytz/zoneinfo/America/Moncton,sha256=Wmv-bk9aKKcWWzOpc1UFu67HOfwaIk2Wmh3LgqGctys,3154 +pytz/zoneinfo/America/Monterrey,sha256=vKBLVjG0bNVDI07M4WwOVv2KbrYJVNTLmc19iM2CvTU,980 +pytz/zoneinfo/America/Montevideo,sha256=dQEBE4mjZPtyRjKXK6Z-bMHJdFqpwhIzxDH4x04rKYk,1496 +pytz/zoneinfo/America/Montreal,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Montserrat,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Nassau,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/New_York,sha256=6e0H177gx2qdRC0JHvHwFmj-58TyYBTAqGixn-bBipU,3552 +pytz/zoneinfo/America/Nipigon,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Nome,sha256=2izM3-P-PqJ9za6MdhzFfMvPFNq7Gim69tAvEwPeY2s,2367 +pytz/zoneinfo/America/Noronha,sha256=feeRAijQqKylZgqe84nKhsFLycT5zIBm7mLIvdyGw4w,702 +pytz/zoneinfo/America/North_Dakota/Beulah,sha256=qtgbqNu8M3AkHF2n-_oSps1pYT4SxgclbkkPKbXaBHs,2396 +pytz/zoneinfo/America/North_Dakota/Center,sha256=9ZWbK9YKkquULyBUFS3Lr_idxbt7V7y4W4EO0Kn20sw,2396 +pytz/zoneinfo/America/North_Dakota/New_Salem,sha256=DH_bsQfuUnK2obdb06KgisO4XLqht12BXdrgUsZZveg,2396 +pytz/zoneinfo/America/Nuuk,sha256=KGXrMN-YkYpVCgLdpcfwMFQ77EsRAGsjUCG3yAUvVfw,1889 +pytz/zoneinfo/America/Ojinaga,sha256=9catgEQ2SD7qfuvTMxs15Cdd9SKaUy-svEzPBFw2Q3Q,1524 +pytz/zoneinfo/America/Panama,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 +pytz/zoneinfo/America/Pangnirtung,sha256=feOnxAN0N0r-M1qlkrA4JMyawoc0tqae0iiBCPDAs4k,2202 +pytz/zoneinfo/America/Paramaribo,sha256=Z7UZvNlgd-qEUHjEPYXIkLNTgjMcCzk9EfUUEmUyd7M,248 +pytz/zoneinfo/America/Phoenix,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360 +pytz/zoneinfo/America/Port-au-Prince,sha256=09ZAJd4IOiMpfdpUuF1U44R_hRt6BvpAkFXOnYO9yOM,1434 +pytz/zoneinfo/America/Port_of_Spain,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Porto_Acre,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614 +pytz/zoneinfo/America/Porto_Velho,sha256=uSMV2hZWj-VyBhFBwC950wcThfN3jq6KlycESmQTLOA,562 +pytz/zoneinfo/America/Puerto_Rico,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Punta_Arenas,sha256=tR5uIf1351AWFqrqNtmXnhQWnKREmJaZqKBzaWRVMTQ,1902 +pytz/zoneinfo/America/Rainy_River,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 +pytz/zoneinfo/America/Rankin_Inlet,sha256=nXgqjL3O2BV0em-Xk8qVRRZb_X0yQmHE6vmSSvI9Kzc,2066 +pytz/zoneinfo/America/Recife,sha256=bJ_HE0-JFio4-owpZ0pLO8U3ai0fiGu8QHL0DexLiLc,702 +pytz/zoneinfo/America/Regina,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980 +pytz/zoneinfo/America/Resolute,sha256=CnMU2dBI-63vt8-J0Q1Ropx-8b9pRCLjhvrycMIedGg,2066 +pytz/zoneinfo/America/Rio_Branco,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614 +pytz/zoneinfo/America/Rosario,sha256=uniNihhMHnr4XK4WpwiPUnrAT0YPmvzqB6f0hRLtXvY,1062 +pytz/zoneinfo/America/Santa_Isabel,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/America/Santarem,sha256=VmZP9S5pPucFxyqAOV908EmWXQZvgCgWLmlJJTUl0LE,588 +pytz/zoneinfo/America/Santiago,sha256=0CDw13dCMUsoquMupoJgupkzAUNhDK6E0lVxURA7osA,2515 +pytz/zoneinfo/America/Santo_Domingo,sha256=DKtaEj8fQ92ybITTWU4Bm160S9pzJmUVbjaWRnenxU4,458 +pytz/zoneinfo/America/Sao_Paulo,sha256=BMBnRO4_4HjvO4t3njjrMGZr-ZPmegkvyvL8KPY6ZM4,1430 +pytz/zoneinfo/America/Scoresbysund,sha256=K-qkiMCCFgOe8ccPMABA-lDjc9vb6wpluBOCVfiBdLI,1935 +pytz/zoneinfo/America/Shiprock,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/America/Sitka,sha256=aiS7Fk37hZpzZ9VkeJQeF-BqTLRC1QOTCgMAJwT8UxA,2329 +pytz/zoneinfo/America/St_Barthelemy,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Johns,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655 +pytz/zoneinfo/America/St_Kitts,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Lucia,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Thomas,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/St_Vincent,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Swift_Current,sha256=RRKOF7vZC8VvYxD8PP4J1_hUPayKBP7Lu80avRkfPDY,560 +pytz/zoneinfo/America/Tegucigalpa,sha256=EzOz7ntTlreMq69JZ2CcAb8Ps98V9bUMN480tpPIyw4,252 +pytz/zoneinfo/America/Thule,sha256=8xuPRaZU8RgO5ECqFYHYmnHioc81sBOailkVu8Y02i8,1502 +pytz/zoneinfo/America/Thunder_Bay,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Tijuana,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/America/Toronto,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/America/Tortola,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Vancouver,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892 +pytz/zoneinfo/America/Virgin,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 +pytz/zoneinfo/America/Whitehorse,sha256=TrR6PCnYG-mSClBMohqlP8qnYhXMUsydI-L-quXFxyM,1614 +pytz/zoneinfo/America/Winnipeg,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 +pytz/zoneinfo/America/Yakutat,sha256=tFwnKbvwhyyn4LNTAn5ye_JWDdxjCerNDt7oOwUwO2M,2305 +pytz/zoneinfo/America/Yellowknife,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 +pytz/zoneinfo/Antarctica/Casey,sha256=VeaLOxTfDyjfGXq5Ul95JEIMXNWHSW-0N3yOoS7VK-c,423 +pytz/zoneinfo/Antarctica/Davis,sha256=XB12dEq0Q-3XkzBNTNC7G1fzH-WxxctIuZqI3zp8ypI,283 +pytz/zoneinfo/Antarctica/DumontDUrville,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Antarctica/Macquarie,sha256=ie7RlaU8RHTorVVj-MX8StKMqx_oXf4UH2PUqpzcwe0,2260 +pytz/zoneinfo/Antarctica/Mawson,sha256=EjIFbqRdr2ZJBaI1XvoWRptnnW1LFrlhydxDDuIQjSI,185 +pytz/zoneinfo/Antarctica/McMurdo,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/Antarctica/Palmer,sha256=HTZY0M8td7oUx5REPgRCHuqKg5V3fjJEi4lYBNL4Etg,1404 +pytz/zoneinfo/Antarctica/Rothera,sha256=_9NY-f8vkozQYrjbUHP5YjcICg0-LuyA9PnIeK123RU,150 +pytz/zoneinfo/Antarctica/South_Pole,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/Antarctica/Syowa,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Antarctica/Troll,sha256=fjcYppwr1FnjEssee-RLgGOANzoUyfjse-RGK46PR2E,1148 +pytz/zoneinfo/Antarctica/Vostok,sha256=KfftwdzK6PkMDz0d-D3z4HKIBgY9KqsqHnTnqsPMrUg,213 +pytz/zoneinfo/Arctic/Longyearbyen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Asia/Aden,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Asia/Almaty,sha256=lPLWXk2f1mWYRQZFkIrq_5HkhocsUBis0M-yhdDHcBQ,983 +pytz/zoneinfo/Asia/Amman,sha256=Qv4cXXw7KBQWE882cgj0kjQ3wh1vpV1orJ2v2Jjxr2U,1433 +pytz/zoneinfo/Asia/Anadyr,sha256=WqKnHo5IHSWZ08d2sS5ytHtv0MQMoczP3W9zbDDrbYU,1174 +pytz/zoneinfo/Asia/Aqtau,sha256=4n654FZtDssXSfhQszjZG5OmtbE2zo1KbiWcYrFJg00,969 +pytz/zoneinfo/Asia/Aqtobe,sha256=1oFHTb-ybcTqLXm0r1ZOVgdYMTHlGoNs-Pgvux50d3E,997 +pytz/zoneinfo/Asia/Ashgabat,sha256=-sfGnRumio7_Bs8w9YH4xRDWgjB3wBeW7c0C56Qqk64,605 +pytz/zoneinfo/Asia/Ashkhabad,sha256=-sfGnRumio7_Bs8w9YH4xRDWgjB3wBeW7c0C56Qqk64,605 +pytz/zoneinfo/Asia/Atyrau,sha256=_U8COUIE9nG_HKddZE1Q0sPuz3rMwfjwmfnVDY_vSmg,977 +pytz/zoneinfo/Asia/Baghdad,sha256=S-plKI4zCLqI0idGABEk3oRTazNyrIj2T98-EtWtZD8,969 +pytz/zoneinfo/Asia/Bahrain,sha256=wklGY3WPGp-z1OUwb_KOHzRTwBndt1RfDg9Uttt36G4,185 +pytz/zoneinfo/Asia/Baku,sha256=6_hq98SGG0j0JA8qYx96WcIMZSLW4w460QXh_OM_ccg,1213 +pytz/zoneinfo/Asia/Bangkok,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Asia/Barnaul,sha256=3zeUimLTMrIZE0vX6XHFvB3MoqExoVbE5CSm6GV0zf0,1207 +pytz/zoneinfo/Asia/Beirut,sha256=_Z_2ZAg_iL9vU51JDB8CB04uXBDrf1kLIis-JnXaS2o,2154 +pytz/zoneinfo/Asia/Bishkek,sha256=IOoUyjABILCkXu1rjCIqSwAufRYFklc5YAC4jdhVw6Q,969 +pytz/zoneinfo/Asia/Brunei,sha256=D5qtyWJ_SM8bTQeJJIYhqqojxlVKbrFC1EYMDU9GzXQ,469 +pytz/zoneinfo/Asia/Calcutta,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285 +pytz/zoneinfo/Asia/Chita,sha256=LbSlS23swFkANUScg8zkNR0imANWNfOIaYd39HbLdIQ,1207 +pytz/zoneinfo/Asia/Choibalsan,sha256=atm7FmPwZGsftLM7vS1LltjcdaDC-DSg1cIdP2MF17I,935 +pytz/zoneinfo/Asia/Chongqing,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Chungking,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Colombo,sha256=w52L7bgT4m5hcgRuevIPY83xytfkBmkLhnKMwp16KsY,358 +pytz/zoneinfo/Asia/Dacca,sha256=-xulJ2KVhvKp6rlZLMydpw7oXVirk-riEH-181xPE54,323 +pytz/zoneinfo/Asia/Damascus,sha256=EthGheaHWmy5IrLCc9NmM3jvASQFHt8TsBF07I1tgbg,1873 +pytz/zoneinfo/Asia/Dhaka,sha256=-xulJ2KVhvKp6rlZLMydpw7oXVirk-riEH-181xPE54,323 +pytz/zoneinfo/Asia/Dili,sha256=0mUs0Utk-uW9deZV3cBUTpfWMgFvl0DyN29JuKvKMyw,213 +pytz/zoneinfo/Asia/Dubai,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Asia/Dushanbe,sha256=koYnnYWuFsBXd1vJfZsGdpwnbFHEwvkGBmSrrx3KIss,577 +pytz/zoneinfo/Asia/Famagusta,sha256=CFrcygd8ude5x6OEtfM_Dw0KYHoxpPPzq46KoHVxjjc,2028 +pytz/zoneinfo/Asia/Gaza,sha256=t0YxcUQL53VNKnKbKijn0OE_MaryEynonabse-iTtzs,3844 +pytz/zoneinfo/Asia/Harbin,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Hebron,sha256=6Y0USHKx-xoCxCr_WpCuM3olP1vUGnzrcnGiyQFcqdQ,3872 +pytz/zoneinfo/Asia/Ho_Chi_Minh,sha256=Lnv1vpUNAXBo8v0b9d9AQpy-AEyO5Qa2Ig0PvDkjrmU,337 +pytz/zoneinfo/Asia/Hong_Kong,sha256=al_O4kPlq5JpgkLYjEaZzrcgiiLul9NC0R5B69JVWhc,1233 +pytz/zoneinfo/Asia/Hovd,sha256=Zn4PLGlD-URJDsbChor5bqWTzuAil2tbrGJW0j5TLbs,877 +pytz/zoneinfo/Asia/Irkutsk,sha256=IVuoXCwdeI-KIUfFkEt6yBjqYP3V9GTrF-_WLnffFzk,1229 +pytz/zoneinfo/Asia/Istanbul,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933 +pytz/zoneinfo/Asia/Jakarta,sha256=TvEzBvSzfzFCdOsMAZ0QgR95JA5xf3kAZONhy5gEXRE,383 +pytz/zoneinfo/Asia/Jayapura,sha256=ihzUd-L8HUVqG-Na10MyPE-YYwjVFj-xerqjTN4EJZs,221 +pytz/zoneinfo/Asia/Jerusalem,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388 +pytz/zoneinfo/Asia/Kabul,sha256=JZEbo8bSj_L7HnXUm2gAUlNlCvJlRJhFkSHCg5o3ggk,194 +pytz/zoneinfo/Asia/Kamchatka,sha256=KY1PlJvRSNkY_5hyJBxj5DDweeYVQaBK05ZgL3kdcCY,1152 +pytz/zoneinfo/Asia/Karachi,sha256=iB-mWMTXUyfBwAkZdz8_UmEw0xsgxIub-KNI7akzhkk,379 +pytz/zoneinfo/Asia/Kashgar,sha256=F1ZOdZZDsVHwDJinksR-hjcqPzqOljvdreZIWFulJxY,151 +pytz/zoneinfo/Asia/Kathmandu,sha256=_RsfeSWbCr8kM4YRJi7Xv6hAEiHW14IFhsXsfhbPjoM,198 +pytz/zoneinfo/Asia/Katmandu,sha256=_RsfeSWbCr8kM4YRJi7Xv6hAEiHW14IFhsXsfhbPjoM,198 +pytz/zoneinfo/Asia/Khandyga,sha256=bKfmw6k5qYDQsEHG3Mv-VYis3YhCeV7qijDxfxQNn_g,1257 +pytz/zoneinfo/Asia/Kolkata,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285 +pytz/zoneinfo/Asia/Krasnoyarsk,sha256=D5KE_1wWSD2YdixDy8n3LBNaAlE1_y3TWXw6NrxFKKA,1193 +pytz/zoneinfo/Asia/Kuala_Lumpur,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401 +pytz/zoneinfo/Asia/Kuching,sha256=D5qtyWJ_SM8bTQeJJIYhqqojxlVKbrFC1EYMDU9GzXQ,469 +pytz/zoneinfo/Asia/Kuwait,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Asia/Macao,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227 +pytz/zoneinfo/Asia/Macau,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227 +pytz/zoneinfo/Asia/Magadan,sha256=HccEEXBQvMmLoC_JE-zP_MlLAZ1WmNLQLfM3tJt55M4,1208 +pytz/zoneinfo/Asia/Makassar,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254 +pytz/zoneinfo/Asia/Manila,sha256=ujfq0kl1EhxcYSOrG-FS750aNaYUt1TT4bFuK4EcL_c,328 +pytz/zoneinfo/Asia/Muscat,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Asia/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002 +pytz/zoneinfo/Asia/Novokuznetsk,sha256=pyxxtSUtYDeVmFk0Cg-F33laZS0iKtde9_GJnL9f0KM,1151 +pytz/zoneinfo/Asia/Novosibirsk,sha256=5K2-Gx15ThlHfolyW85S5zREtAcMjeHBYWK4E8x2LdY,1207 +pytz/zoneinfo/Asia/Omsk,sha256=HyXIWItJXBKVHUzWcQPi1Mmd6ZLmZk-QhRUo9Kv2XOI,1193 +pytz/zoneinfo/Asia/Oral,sha256=WQT4qRmC9RI_ll8zB9FvkAL8ezGb8qoqWd75GTlC7kQ,991 +pytz/zoneinfo/Asia/Phnom_Penh,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Asia/Pontianak,sha256=inOXwuKtjKv1z_eliPZSIqjSt6whtuxhPeG1YpjU_BQ,353 +pytz/zoneinfo/Asia/Pyongyang,sha256=_-g3GnDAtfDX4XAktXH9jFouLUDmOovnjoOfvRpUDsE,237 +pytz/zoneinfo/Asia/Qatar,sha256=wklGY3WPGp-z1OUwb_KOHzRTwBndt1RfDg9Uttt36G4,185 +pytz/zoneinfo/Asia/Qostanay,sha256=HIjln8QIPNRU6MkWzyPi6vDrjlmVZ4XzFxcUHtXMi7s,1025 +pytz/zoneinfo/Asia/Qyzylorda,sha256=JZLNN6NuLkqaWEeVaCZiW_gL6BrIFL9lr65iK7myVPg,1011 +pytz/zoneinfo/Asia/Rangoon,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254 +pytz/zoneinfo/Asia/Riyadh,sha256=oCKH7uafN8R1o-ijXGoT5U1JZxwvoLzJu_2Cqyi2hUM,151 +pytz/zoneinfo/Asia/Saigon,sha256=Lnv1vpUNAXBo8v0b9d9AQpy-AEyO5Qa2Ig0PvDkjrmU,337 +pytz/zoneinfo/Asia/Sakhalin,sha256=xzAor82ihAe-yXEwC6OWiMzo9b6Z-oQl39NIkU5Hhbs,1188 +pytz/zoneinfo/Asia/Samarkand,sha256=zJKSRt3lEvd6Qvg9b49QAyO4cTJyVnTKyPYcyudpHxk,563 +pytz/zoneinfo/Asia/Seoul,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617 +pytz/zoneinfo/Asia/Shanghai,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/Asia/Singapore,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401 +pytz/zoneinfo/Asia/Srednekolymsk,sha256=efaaT8iFHrcccp-VZKNMvtTuPLNjG5V9JH5KKHhH3SI,1194 +pytz/zoneinfo/Asia/Taipei,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761 +pytz/zoneinfo/Asia/Tashkent,sha256=apRPy251fSRy_ixsg3BOZNmUbHdO86P5-PdgC1Xws7U,577 +pytz/zoneinfo/Asia/Tbilisi,sha256=zQ-2bVq5_USUSbwN6q0qvWjD-HXkKaH4ifMVq1lEeIM,1021 +pytz/zoneinfo/Asia/Tehran,sha256=LQMch2TMA4wI23SQzoIrlZh0_KceXQegurwxCZ5YDlY,1248 +pytz/zoneinfo/Asia/Tel_Aviv,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388 +pytz/zoneinfo/Asia/Thimbu,sha256=G2nTQVEMmKlWt0B74_fUAL7KQ3YAu__J6HciiYs2IyU,189 +pytz/zoneinfo/Asia/Thimphu,sha256=G2nTQVEMmKlWt0B74_fUAL7KQ3YAu__J6HciiYs2IyU,189 +pytz/zoneinfo/Asia/Tokyo,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309 +pytz/zoneinfo/Asia/Tomsk,sha256=cr0ULZgWBnQfzDiJeYmqpA7Xo5QRzurvrHsrbZsnhOQ,1207 +pytz/zoneinfo/Asia/Ujung_Pandang,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254 +pytz/zoneinfo/Asia/Ulaanbaatar,sha256=qUkXRsTc_u7B90JxULSu7yzKbGtGfKcfEFIasGPC2ec,877 +pytz/zoneinfo/Asia/Ulan_Bator,sha256=qUkXRsTc_u7B90JxULSu7yzKbGtGfKcfEFIasGPC2ec,877 +pytz/zoneinfo/Asia/Urumqi,sha256=F1ZOdZZDsVHwDJinksR-hjcqPzqOljvdreZIWFulJxY,151 +pytz/zoneinfo/Asia/Ust-Nera,sha256=zsG8kgnw0Fcs5N2WwNTVmvWkTlpwf7Oo8y68HcXjYyw,1238 +pytz/zoneinfo/Asia/Vientiane,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Asia/Vladivostok,sha256=XMQLMh5SPbI6C4R3UO4KhbnG4hWVkHNedzCQeqxFk6A,1194 +pytz/zoneinfo/Asia/Yakutsk,sha256=PPNrRGgg9jefOUM-6M8XqaIm-ElfmRZSWAtSGKLzNXQ,1193 +pytz/zoneinfo/Asia/Yangon,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254 +pytz/zoneinfo/Asia/Yekaterinburg,sha256=4NyEW6Xjr4UsWPh63HIPI4G6GT_tVG1Xkgc2xbwGjzA,1229 +pytz/zoneinfo/Asia/Yerevan,sha256=FM0pUA4NbTWBb_CsJ5KCLVrLoNmad7njBKqFrJBDoxE,1137 +pytz/zoneinfo/Atlantic/Azores,sha256=NyNrE2YIwL9yVddpECcYWwci5JzrfjxiIXP7RP0MrL8,3498 +pytz/zoneinfo/Atlantic/Bermuda,sha256=LNGKfMsnYvwImjTyzXrLhMOHHDu7qI67RbYNKvvI15I,2396 +pytz/zoneinfo/Atlantic/Canary,sha256=ymK9ufqphvNjDK3hzikN4GfkcR3QeCBiPKyVc6FjlbA,1897 +pytz/zoneinfo/Atlantic/Cape_Verde,sha256=o92pLdLFX_b9vUiq3rNpca4tupIO3dx9rNrnPcA8474,256 +pytz/zoneinfo/Atlantic/Faeroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815 +pytz/zoneinfo/Atlantic/Faroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815 +pytz/zoneinfo/Atlantic/Jan_Mayen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Atlantic/Madeira,sha256=21Zcy0xRqDN3oY8jmjjO-LI7aC3G9mcS9ytaYg0g7ik,3503 +pytz/zoneinfo/Atlantic/Reykjavik,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Atlantic/South_Georgia,sha256=I9SAcPPumy6Xf9P7dg2aE16oxwDIqyKFqinJTC-XsgM,150 +pytz/zoneinfo/Atlantic/St_Helena,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Atlantic/Stanley,sha256=siEjXTAuTum_4XGtS98MBE34XW_5xgXShEX5OMnSFjo,1200 +pytz/zoneinfo/Australia/ACT,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/Adelaide,sha256=ld2EbxU75oVgmPe703z-I6aqLg0Kmv62ZcCGzkT5R20,2208 +pytz/zoneinfo/Australia/Brisbane,sha256=eW6Qzze2t0-speJmmvt1JMzbkSadIKdE84XHc7JUtGc,419 +pytz/zoneinfo/Australia/Broken_Hill,sha256=3k_3ljTvS5GSfo7Xh6w71UgR3aAwYPBsnCJ-mlEYCqQ,2229 +pytz/zoneinfo/Australia/Canberra,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/Currie,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358 +pytz/zoneinfo/Australia/Darwin,sha256=fn0IZhIW98FAnzLig-_GBtW5LA54jajdeeUzg4tCGvo,325 +pytz/zoneinfo/Australia/Eucla,sha256=i1-XGG8I6E0dXIdWGF4DlkfDLWhiAxJ_3gMpt-nm_u4,456 +pytz/zoneinfo/Australia/Hobart,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358 +pytz/zoneinfo/Australia/LHI,sha256=oyPFQzmRqWPrSXt9pNHQmEi_PvX11k2clknziOS6ud8,1846 +pytz/zoneinfo/Australia/Lindeman,sha256=xM6Udx22oLNoLR1Y7GQhHOYov8nw3xQNqgc_NVQ2JK4,475 +pytz/zoneinfo/Australia/Lord_Howe,sha256=oyPFQzmRqWPrSXt9pNHQmEi_PvX11k2clknziOS6ud8,1846 +pytz/zoneinfo/Australia/Melbourne,sha256=lvx_MQcunMc6u2smIrl8X427bLsXvjkgpCSdjYCTNBM,2190 +pytz/zoneinfo/Australia/NSW,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/North,sha256=fn0IZhIW98FAnzLig-_GBtW5LA54jajdeeUzg4tCGvo,325 +pytz/zoneinfo/Australia/Perth,sha256=Al1DOUh4U_ofMUQSeVlzSyD3x7SUjP9dchSaBUGmeWg,446 +pytz/zoneinfo/Australia/Queensland,sha256=eW6Qzze2t0-speJmmvt1JMzbkSadIKdE84XHc7JUtGc,419 +pytz/zoneinfo/Australia/South,sha256=ld2EbxU75oVgmPe703z-I6aqLg0Kmv62ZcCGzkT5R20,2208 +pytz/zoneinfo/Australia/Sydney,sha256=QsOFdYWxbbL4_9R7oZ-qYPRzNA3o1P6TIOp76GFgWQY,2190 +pytz/zoneinfo/Australia/Tasmania,sha256=GLQSzgIfsWxOvmKOrhpfofWqINQf6h36NYy3mcq6gcg,2358 +pytz/zoneinfo/Australia/Victoria,sha256=lvx_MQcunMc6u2smIrl8X427bLsXvjkgpCSdjYCTNBM,2190 +pytz/zoneinfo/Australia/West,sha256=Al1DOUh4U_ofMUQSeVlzSyD3x7SUjP9dchSaBUGmeWg,446 +pytz/zoneinfo/Australia/Yancowinna,sha256=3k_3ljTvS5GSfo7Xh6w71UgR3aAwYPBsnCJ-mlEYCqQ,2229 +pytz/zoneinfo/Brazil/Acre,sha256=0gpJUl46hQbp0P6Xj1S0NArIWeAryuuDXjsldvB5GHE,614 +pytz/zoneinfo/Brazil/DeNoronha,sha256=feeRAijQqKylZgqe84nKhsFLycT5zIBm7mLIvdyGw4w,702 +pytz/zoneinfo/Brazil/East,sha256=BMBnRO4_4HjvO4t3njjrMGZr-ZPmegkvyvL8KPY6ZM4,1430 +pytz/zoneinfo/Brazil/West,sha256=F6RLOOeOi9lymZiQmQ9pR8tFpPZ6EguNdPfOc6BhXDE,590 +pytz/zoneinfo/CET,sha256=o4omkrM_IsITxooUo8krM921XfBdvRs9JhwGXGd-Ypg,2094 +pytz/zoneinfo/CST6CDT,sha256=WGbtZ1FwjRX6Jeo_TCXKsfeDs4V9uhXGJfcnLJhk3s0,2310 +pytz/zoneinfo/Canada/Atlantic,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424 +pytz/zoneinfo/Canada/Central,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 +pytz/zoneinfo/Canada/Eastern,sha256=pYehoWB0Ofe6woPhgV8r26-5ZJpFPRjgbC5E5pltiI8,3494 +pytz/zoneinfo/Canada/Mountain,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 +pytz/zoneinfo/Canada/Newfoundland,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655 +pytz/zoneinfo/Canada/Pacific,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892 +pytz/zoneinfo/Canada/Saskatchewan,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980 +pytz/zoneinfo/Canada/Yukon,sha256=TrR6PCnYG-mSClBMohqlP8qnYhXMUsydI-L-quXFxyM,1614 +pytz/zoneinfo/Chile/Continental,sha256=0CDw13dCMUsoquMupoJgupkzAUNhDK6E0lVxURA7osA,2515 +pytz/zoneinfo/Chile/EasterIsland,sha256=QbubBs_xQlvKweAnurhyHjIK4ji77Gh4G-usXul6XVM,2219 +pytz/zoneinfo/Cuba,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416 +pytz/zoneinfo/EET,sha256=gGVsW5-qnI7ty8vqVK1ADWhunrvAT8kUC79GUf-_7G8,1908 +pytz/zoneinfo/EST,sha256=uKE_VPKfxGyYEsyqV_DdE2MW55vs_qUioOdIn5Goobc,114 +pytz/zoneinfo/EST5EDT,sha256=fwzEMT1jgnY2dDjd0EqDl26_7LC-oF48Bd4ng5311H0,2310 +pytz/zoneinfo/Egypt,sha256=Lft-GCLQhaSJm9VqUmsEFoHIS1Vhfa7pFJn9GZCpifs,2399 +pytz/zoneinfo/Eire,sha256=QOjSocO1cihNo59vQkWxvIFPRSxE9apz0KARVx1czEM,3492 +pytz/zoneinfo/Etc/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/GMT+1,sha256=1Qzl2X9rQ_RXEf11yH09wQZCr_ph6UdFP7E0yu9s-IQ,116 +pytz/zoneinfo/Etc/GMT+10,sha256=JEQyQyQlkC0o6ZTdeVjZhCIOh6cK5TF7H00Pkls-sUI,117 +pytz/zoneinfo/Etc/GMT+11,sha256=tWvcvYMFCaE60nJVvDrrov7stJvs1KQYOyrhl3dzcUs,117 +pytz/zoneinfo/Etc/GMT+12,sha256=b70HEhErq8IJmq8x7cOZy4eR__3fq5uHHpjvPBEHqMA,117 +pytz/zoneinfo/Etc/GMT+2,sha256=T6Ep5zhslBKbYaECFUB6gUKh3iTZPyMoW1kjhonxrUo,116 +pytz/zoneinfo/Etc/GMT+3,sha256=QGoYrE04bUJ-OzL37dt2MZT5FxWNLpJDPVXgJbstYZA,116 +pytz/zoneinfo/Etc/GMT+4,sha256=RWrkNki-wV7X-coe0VvufBe6LrWVpkPJgia5QQYEnBo,116 +pytz/zoneinfo/Etc/GMT+5,sha256=oRmeC41dgYXT-zzyZIRKXN9IvdL2Da5nTuwmG2_prIA,116 +pytz/zoneinfo/Etc/GMT+6,sha256=d6dAnwiejyFI2n7AzFlFW0aFAT6zYNEjBIEG0uu0sbQ,116 +pytz/zoneinfo/Etc/GMT+7,sha256=TqjYbzd0YHpx1wisFg08J19wTpg6ztJLLongZY_lozs,116 +pytz/zoneinfo/Etc/GMT+8,sha256=th_8bIMmYgRPCesBrbmBhRr0jQO7whd70LiY9HfwJyk,116 +pytz/zoneinfo/Etc/GMT+9,sha256=Qq5E6iUS7JMJIymT7YoqlI8MtqtVy0mr9t6zWFtWc9Y,116 +pytz/zoneinfo/Etc/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/GMT-1,sha256=73F1eU8uAQGP3mcoB2q99CjfManGFHk3fefljp9pYC4,117 +pytz/zoneinfo/Etc/GMT-10,sha256=fKWWNwLBOp1OkKjtc1w9LIXJR1mTTD-JdvYflRy1IrU,118 +pytz/zoneinfo/Etc/GMT-11,sha256=D2S79n6psa9t9_2vj5wIrFpHH2OJLcCKP6vtwzFZINY,118 +pytz/zoneinfo/Etc/GMT-12,sha256=me4V6lmWI8gSr8H7N41WAD0Eww1anh_EF34Qr9UoSnI,118 +pytz/zoneinfo/Etc/GMT-13,sha256=xbmbG1BQA6Dlpa_iUwEGyJxW4a3t6lmawdPKAE8vbR8,118 +pytz/zoneinfo/Etc/GMT-14,sha256=PpXoREBh02qFpvxVMj2pV9IAzSQvBE7XPvnN9qSZ-Kc,118 +pytz/zoneinfo/Etc/GMT-2,sha256=ve6hWLdeuiLhqagaWLqMD6HNybS1chRwjudfTZ2bYBE,117 +pytz/zoneinfo/Etc/GMT-3,sha256=N77jILanuLDVkLsdujXZSu-dsHiwN5MIpwh7fMUifso,117 +pytz/zoneinfo/Etc/GMT-4,sha256=LSko5fVHqPl5zfwjGqkbMa_OFnvtpT6o_4xYxNz9n5o,117 +pytz/zoneinfo/Etc/GMT-5,sha256=uLaSR5Mb18HRTsAA5SveY9PAJ97dO8QzIWqNXe3wZb4,117 +pytz/zoneinfo/Etc/GMT-6,sha256=JSN-RUAphJ50fpIv7cYC6unrtrz9S1Wma-piDHlGe7c,117 +pytz/zoneinfo/Etc/GMT-7,sha256=vVAOF8xU9T9ESnw68c0SFXpcvkoopaiwTR0zbefHHSU,117 +pytz/zoneinfo/Etc/GMT-8,sha256=S7xFQbFMpiDZy4v5L4D9fCrjRIzzoLC5p8Se23xi7us,117 +pytz/zoneinfo/Etc/GMT-9,sha256=I5vHNmUK-Yyg_S1skFN44VGVzBgktjFgVQiDIKO4aMI,117 +pytz/zoneinfo/Etc/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Etc/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Etc/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Etc/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Etc/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Europe/Amsterdam,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 +pytz/zoneinfo/Europe/Andorra,sha256=gTB5jCQmvIw3JJi1_vAcOYuhtzPBR6RXUx9gVV6p6ug,1742 +pytz/zoneinfo/Europe/Astrakhan,sha256=ZeGDZjwVVRoeR-J642zEnN26BPL58ViTJLbwnk7pLXk,1151 +pytz/zoneinfo/Europe/Athens,sha256=XDY-FBUddRyQHN8GxQLZ4awjuOlWlzlUdjv7OdXFNzA,2262 +pytz/zoneinfo/Europe/Belfast,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Belgrade,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Berlin,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Bratislava,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301 +pytz/zoneinfo/Europe/Brussels,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 +pytz/zoneinfo/Europe/Bucharest,sha256=nfg6-bU2D6DMEWb9EMIBR5kxnNsbDSx0UKfHH_ZzqFc,2184 +pytz/zoneinfo/Europe/Budapest,sha256=lNwqxWciBvw9ei81VQwIKHbC_ZDJjpgHU6HFg4wCUkY,2368 +pytz/zoneinfo/Europe/Busingen,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 +pytz/zoneinfo/Europe/Chisinau,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390 +pytz/zoneinfo/Europe/Copenhagen,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Dublin,sha256=QOjSocO1cihNo59vQkWxvIFPRSxE9apz0KARVx1czEM,3492 +pytz/zoneinfo/Europe/Gibraltar,sha256=a87WpaBlvxI4gAU9OpQOkN8VUJbirVWYf-VfFLTIoS4,3068 +pytz/zoneinfo/Europe/Guernsey,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Helsinki,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900 +pytz/zoneinfo/Europe/Isle_of_Man,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Istanbul,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933 +pytz/zoneinfo/Europe/Jersey,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Kaliningrad,sha256=s7GXSe1YvMcs7AiUhHNTA6I4nAOQn_Kmz_ZqJYO-LMM,1493 +pytz/zoneinfo/Europe/Kiev,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Kirov,sha256=P7T2Zf5Eo6o4L4Dbg_BfiFjUgTj0dQXlrwY-QZ1eBVk,1185 +pytz/zoneinfo/Europe/Kyiv,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Lisbon,sha256=mpUpxGexMhbOBImDLSQs5-GAk7pm7tg4qYW044Kkle0,3497 +pytz/zoneinfo/Europe/Ljubljana,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/London,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/Europe/Luxembourg,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 +pytz/zoneinfo/Europe/Madrid,sha256=mkLX03rW3t0tmzKBIPe_noUvaFDErwC6_5ZPZZsWHOo,2614 +pytz/zoneinfo/Europe/Malta,sha256=EhKcbPL47765tWAiQ57cusaK2TaIQqZCgtJoEZs3Ud0,2620 +pytz/zoneinfo/Europe/Mariehamn,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900 +pytz/zoneinfo/Europe/Minsk,sha256=KgPm0fHycntgd3xbTmmDl4O13Xh_9e2zUnd8XFSU29o,1307 +pytz/zoneinfo/Europe/Monaco,sha256=q3ehSIot1GZ6TyMHIjbg0oRf4ghAXuwbSDSYVim6evg,2962 +pytz/zoneinfo/Europe/Moscow,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535 +pytz/zoneinfo/Europe/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002 +pytz/zoneinfo/Europe/Oslo,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Paris,sha256=q3ehSIot1GZ6TyMHIjbg0oRf4ghAXuwbSDSYVim6evg,2962 +pytz/zoneinfo/Europe/Podgorica,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Prague,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301 +pytz/zoneinfo/Europe/Riga,sha256=hJ2_0m1taW9IuA-hMyP5n-WX7YOrR0heKszJhgljRWk,2198 +pytz/zoneinfo/Europe/Rome,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641 +pytz/zoneinfo/Europe/Samara,sha256=nXL0IxbT6qu10CNuaDHxx4W1OaAnaaKTtIJ9N9URMoU,1201 +pytz/zoneinfo/Europe/San_Marino,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641 +pytz/zoneinfo/Europe/Sarajevo,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Saratov,sha256=ygwjvXN13TgaWxjg6ysWEnHWNxwrVtkEbrk8t9bzVVw,1169 +pytz/zoneinfo/Europe/Simferopol,sha256=tzl7xdNVSZprNCul4YE5LSpoR9JoujmOq8VbbB8wHic,1469 +pytz/zoneinfo/Europe/Skopje,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Sofia,sha256=hCQKXfMNrnA5xHNw_uzTjKzVw4-Bvsq5oGO4yUCv5tY,2077 +pytz/zoneinfo/Europe/Stockholm,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 +pytz/zoneinfo/Europe/Tallinn,sha256=4a6JC0aIpMzqIV7O35zoG0LLJwkQq5AoXZ2ivkic6-w,2148 +pytz/zoneinfo/Europe/Tirane,sha256=ztlZyCS9WCXeVW8nBun3Tyi5HUY0EtFbiBbEc1gucuw,2084 +pytz/zoneinfo/Europe/Tiraspol,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390 +pytz/zoneinfo/Europe/Ulyanovsk,sha256=c8Ad5p7CKj_1cCA7lVRpcPqbQXGYaX83cuu6uIFx-Bg,1253 +pytz/zoneinfo/Europe/Uzhgorod,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Vaduz,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 +pytz/zoneinfo/Europe/Vatican,sha256=1a3oLMSiMpSbh9QxV8hLLDVbZqash89iUO1urYC1AY8,2641 +pytz/zoneinfo/Europe/Vienna,sha256=ZmI3kADE6bnrJEccqh73XXBY36L1G4DkpiTQImtNrUk,2200 +pytz/zoneinfo/Europe/Vilnius,sha256=UFzRX3orCTB8d9IzlxJPy5eUA2oBPuCu1UJl-2D7C3U,2162 +pytz/zoneinfo/Europe/Volgograd,sha256=RgFvt7mzZ-TtIKL9BVHmoNZLIeLIuiDdXeY10g2_vks,1193 +pytz/zoneinfo/Europe/Warsaw,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654 +pytz/zoneinfo/Europe/Zagreb,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 +pytz/zoneinfo/Europe/Zaporozhye,sha256=-wrpG9jPuIKFP1NgBVvnxsMRf9L_h5z3J6Q3jj1AwNM,2120 +pytz/zoneinfo/Europe/Zurich,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 +pytz/zoneinfo/Factory,sha256=aFFlKx93HXoJoF4SSuTlD8cZtJA-ne5oKzAa6eX2V4k,116 +pytz/zoneinfo/GB,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/GB-Eire,sha256=yFSVBw3KQmh99qHD7ngKJ8vLgvGER1Dqb2QoM6RNKbQ,3664 +pytz/zoneinfo/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 +pytz/zoneinfo/HST,sha256=1YkCncvgL9Z5CmUo4Vk8VbQmgA7ZAQ0PtE37j1yOli8,115 +pytz/zoneinfo/Hongkong,sha256=al_O4kPlq5JpgkLYjEaZzrcgiiLul9NC0R5B69JVWhc,1233 +pytz/zoneinfo/Iceland,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 +pytz/zoneinfo/Indian/Antananarivo,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Indian/Chagos,sha256=2errXzKdFIcpU0L-XRhSHxhNabIzbI5lXV3Pq6lt40Y,185 +pytz/zoneinfo/Indian/Christmas,sha256=hf_5PVegQcFSS60CjS80C7h-TGOrfQ4ncm83N8VmZkk,185 +pytz/zoneinfo/Indian/Cocos,sha256=_YHASq4Z5YcUILIdhEzg27CGLzarUHPDHs1Dj0QgNGM,254 +pytz/zoneinfo/Indian/Comoro,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Indian/Kerguelen,sha256=F73ffVfBoUoHre0-DwsiQrYJcLpPOW-JJGk3n88lM5U,185 +pytz/zoneinfo/Indian/Mahe,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Indian/Maldives,sha256=F73ffVfBoUoHre0-DwsiQrYJcLpPOW-JJGk3n88lM5U,185 +pytz/zoneinfo/Indian/Mauritius,sha256=Znqrc1chimlciJsYBOl0NvIHnrNdCxncGxWczq1PBeI,227 +pytz/zoneinfo/Indian/Mayotte,sha256=yJsuJTqJJqbOz37_NOS_zbf-JNr_IthHGMMN7sDqSWg,265 +pytz/zoneinfo/Indian/Reunion,sha256=pmdhPhaJRwKwONvxiZNGeFSICjlWzyY9JlFHv-H9upY,151 +pytz/zoneinfo/Iran,sha256=LQMch2TMA4wI23SQzoIrlZh0_KceXQegurwxCZ5YDlY,1248 +pytz/zoneinfo/Israel,sha256=JUuWQmW5Tha0pJjw61Q5aN7CX0z4D7ops9OOSnda6Dc,2388 +pytz/zoneinfo/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482 +pytz/zoneinfo/Japan,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309 +pytz/zoneinfo/Kwajalein,sha256=TmZ_0f-ySQ-saBAlRXV0f49Itwne51VBXn6rWcrWqHQ,302 +pytz/zoneinfo/Libya,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625 +pytz/zoneinfo/MET,sha256=i3CKSuP4N_PAj7o-Cbk8zPEdFs0CWWBCAfg2JXDx5V8,2094 +pytz/zoneinfo/MST,sha256=6IQwvtT12Bz1pTiqFuoVxNY-4ViS7ZrYHo5nPWwzKPw,114 +pytz/zoneinfo/MST7MDT,sha256=910Ek32FKoSyZWY_H19VHaVvqb-JsvnWTOOHvhrKsE0,2310 +pytz/zoneinfo/Mexico/BajaNorte,sha256=57-Q9LSTNuTidz-lOTwDysmlCoeFUXSecvVVqNWburQ,2374 +pytz/zoneinfo/Mexico/BajaSur,sha256=RQQVwlEVHRp2X-c_0hJ46y54abTlqUuLkyrUUicyc5g,1128 +pytz/zoneinfo/Mexico/General,sha256=A5MlfDUZ4O1-jMTRt0WPem7qqcW0Nrslls1hlc8C4-Q,1222 +pytz/zoneinfo/NZ,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/NZ-CHAT,sha256=xhexVc5lfJ_qAv2d3HrII6lfRSxKZYBAjY2zpYkCGE8,2054 +pytz/zoneinfo/Navajo,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/PRC,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 +pytz/zoneinfo/PST8PDT,sha256=Q7TCLkE69a6g7mPoPAkqhg-0dStyiAC0jVlM72KG_R8,2310 +pytz/zoneinfo/Pacific/Apia,sha256=M3QKsp75Q7H1X3aeE_9ZqQli9aEkNCCQctZQ5sEKu00,598 +pytz/zoneinfo/Pacific/Auckland,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 +pytz/zoneinfo/Pacific/Bougainville,sha256=hWE86eXnNx-vABbp7-YSIqWyecHPMIWLftVloAoPhL8,254 +pytz/zoneinfo/Pacific/Chatham,sha256=xhexVc5lfJ_qAv2d3HrII6lfRSxKZYBAjY2zpYkCGE8,2054 +pytz/zoneinfo/Pacific/Chuuk,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Pacific/Easter,sha256=QbubBs_xQlvKweAnurhyHjIK4ji77Gh4G-usXul6XVM,2219 +pytz/zoneinfo/Pacific/Efate,sha256=oSxNcQYx5-1FU2_yHzHI-hT-dMJcPxzy4XmdI1UxXAo,524 +pytz/zoneinfo/Pacific/Enderbury,sha256=HNTAKrsH_R2W3QRlKcmNld5KcXdP0ygXCjEovc1i-6Q,220 +pytz/zoneinfo/Pacific/Fakaofo,sha256=qOodpTMKjztvZIXVLe_f_kZ6WcHl9fCLE9ZsyvdFKLI,186 +pytz/zoneinfo/Pacific/Fiji,sha256=jB5FbOsCnHVQQ2ohPiWEQUPhG6JybB3Nog3qT6WJQ0I,564 +pytz/zoneinfo/Pacific/Funafuti,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Galapagos,sha256=_GJUYOjSiIjoNBO2qdq23isLMJ4NCVk3DKIRGeDc8BA,224 +pytz/zoneinfo/Pacific/Gambier,sha256=gAS7gr1HH_re0uYnL6eWo5KGJ-B5QaiM8mV2cY5mQxE,150 +pytz/zoneinfo/Pacific/Guadalcanal,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152 +pytz/zoneinfo/Pacific/Guam,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494 +pytz/zoneinfo/Pacific/Honolulu,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 +pytz/zoneinfo/Pacific/Johnston,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 +pytz/zoneinfo/Pacific/Kanton,sha256=HNTAKrsH_R2W3QRlKcmNld5KcXdP0ygXCjEovc1i-6Q,220 +pytz/zoneinfo/Pacific/Kiritimati,sha256=hYk1Ooz-Lj1PuZCbNV2WJIvOLtCwSwq2u63cb1Z-3NQ,224 +pytz/zoneinfo/Pacific/Kosrae,sha256=Q0jrb4zeDrd61bU4V8TqjMc0Iep8rWZyZqJ0uqsunxs,337 +pytz/zoneinfo/Pacific/Kwajalein,sha256=TmZ_0f-ySQ-saBAlRXV0f49Itwne51VBXn6rWcrWqHQ,302 +pytz/zoneinfo/Pacific/Majuro,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Marquesas,sha256=FTxPJTWtk48LVb3N2U64KLpLsmvu0DQBubTCg-dvyGM,159 +pytz/zoneinfo/Pacific/Midway,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/Pacific/Nauru,sha256=9ASKgLHB-8nsTEK1ApzfTH0yQtbNAmGX-JI7uHZiqnA,238 +pytz/zoneinfo/Pacific/Niue,sha256=OllXxukncR7a-SMmdFox5az1xpIPMhbahQhtObmpuDM,189 +pytz/zoneinfo/Pacific/Norfolk,sha256=DMdX1Bm18lzNuiCWzwfeHUMRGXPS8v5AWnh-_EX_AZw,866 +pytz/zoneinfo/Pacific/Noumea,sha256=tkHxxnxsXTOqz3YzWi0mkhTCIONzg-W7EpSRMdPjKdQ,290 +pytz/zoneinfo/Pacific/Pago_Pago,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/Pacific/Palau,sha256=aN2HbT0reqwKrtLKDK9M2zb0d0ikdNlTrrntVxdH66o,166 +pytz/zoneinfo/Pacific/Pitcairn,sha256=U4jAUuvsRNoy8XrPa16YpcXCcqHJY0u6JvCNgPEWO1c,188 +pytz/zoneinfo/Pacific/Pohnpei,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152 +pytz/zoneinfo/Pacific/Ponape,sha256=M4kTWqaSQaV1AMhyLSvmwoBJF7X9icrILbvQJwp940g,152 +pytz/zoneinfo/Pacific/Port_Moresby,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Pacific/Rarotonga,sha256=wPEsoXbyDnuhfzkgLvUqhSzrMx_FD42uAPluSPMh3Bc,589 +pytz/zoneinfo/Pacific/Saipan,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494 +pytz/zoneinfo/Pacific/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/Pacific/Tahiti,sha256=BRff9G3E-iWKhOWR1Wu02Z0iMgjrwDXV-XNrqItXdTY,151 +pytz/zoneinfo/Pacific/Tarawa,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Tongatapu,sha256=OppBZqTAZib9HY7U9AC-JavO7m6NxPGUtUfPQAl9oBY,358 +pytz/zoneinfo/Pacific/Truk,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Pacific/Wake,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Wallis,sha256=UyaKimsR8LjgL8Z2g65I0HTvr3tMZuA2wUeBB6_Zp9c,152 +pytz/zoneinfo/Pacific/Yap,sha256=nB36HBWZTdh3TlP0DLFNz1KRQ0aHIfHbp7LC4Urp9fA,172 +pytz/zoneinfo/Poland,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654 +pytz/zoneinfo/Portugal,sha256=mpUpxGexMhbOBImDLSQs5-GAk7pm7tg4qYW044Kkle0,3497 +pytz/zoneinfo/ROC,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761 +pytz/zoneinfo/ROK,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617 +pytz/zoneinfo/Singapore,sha256=XmeVImeqcJ8hJzm7TjAti1nWJAxawOqq7jIzDnHX2hI,401 +pytz/zoneinfo/Turkey,sha256=Jk4wjndDta_uLWc8W1dWdjbavJJbsL5ROTmZboVnGKU,1933 +pytz/zoneinfo/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/US/Alaska,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371 +pytz/zoneinfo/US/Aleutian,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 +pytz/zoneinfo/US/Arizona,sha256=illz0sYuLL8lIPK0Tkou6dL0Vck_D0W_3rRTOvFYRmQ,360 +pytz/zoneinfo/US/Central,sha256=_roybr6I6sIAF6cYdIxGxoRpoef153Fty48dQ6bm9oY,3592 +pytz/zoneinfo/US/East-Indiana,sha256=kNKy9Kj9ICsiYYfCCbAggzMA7exf-GpGPMxoXocHUyw,1682 +pytz/zoneinfo/US/Eastern,sha256=6e0H177gx2qdRC0JHvHwFmj-58TyYBTAqGixn-bBipU,3552 +pytz/zoneinfo/US/Hawaii,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 +pytz/zoneinfo/US/Indiana-Starke,sha256=CsvZ5BKw2qVav3x_F8CU9taJdDk7jX41Cfsqms6jXV8,2444 +pytz/zoneinfo/US/Michigan,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230 +pytz/zoneinfo/US/Mountain,sha256=MugZwApDs8NI9TnXANQlUE8guNBowWQY0m-ptpPndck,2460 +pytz/zoneinfo/US/Pacific,sha256=aJd7ua1tGG_vxser02AQpm4wAI3LLTdgh6QcSYYecmg,2852 +pytz/zoneinfo/US/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 +pytz/zoneinfo/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/W-SU,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535 +pytz/zoneinfo/WET,sha256=Sc0l03EfVs_aIi17I4KyZJFkwiAHat5BgpjuuFDhgQ0,1905 +pytz/zoneinfo/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 +pytz/zoneinfo/iso3166.tab,sha256=oBpdFY8x1GrY5vjMKgbGQYEGgqk5fUYDIPaNVCG2XnE,4791 +pytz/zoneinfo/leapseconds,sha256=fjC39Eu3wB6I4g7x_VL7HzvDVbiKbLUjfQAEgo7442I,3257 +pytz/zoneinfo/tzdata.zi,sha256=8PWtzwDNZfLJU8Wa6Ktci7tg9V5mpvh26Vb0P8jBU0w,109390 +pytz/zoneinfo/zone.tab,sha256=qSLfeCWE3tsCDIIQbr71DMkmCUXTIUEgNZgfN-60d-Y,18846 +pytz/zoneinfo/zone1970.tab,sha256=FJErvL9wggoFluO2WceYn8ZQ-nA9A073Lub1x2Pzg40,17582 +pytz/zoneinfo/zonenow.tab,sha256=YoPd7huhHsKlJliOO-eMIBE5-bHBKpbfjkSJQFAto6I,8311 diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..0b18a281107a0448a9980396d9d324ea2aa7a7f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..af44f198c687e245aada835efbab2f75ed2c9baf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pytz diff --git a/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pytz-2024.1.dist-info/zip-safe @@ -0,0 +1 @@ +