# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processing saving/loading class for common processors. """ import copy import inspect import json import os import warnings from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from .dynamic_module_utils import custom_object_save from .tokenization_utils_base import PreTrainedTokenizerBase from .utils import ( PROCESSOR_NAME, PushToHubMixin, add_model_info_to_auto_map, cached_file, copy_func, direct_transformers_import, download_url, is_offline_mode, is_remote_url, logging, ) logger = logging.get_logger(__name__) # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(Path(__file__).parent) AUTO_TO_BASE_CLASS_MAPPING = { "AutoTokenizer": "PreTrainedTokenizerBase", "AutoFeatureExtractor": "FeatureExtractionMixin", "AutoImageProcessor": "ImageProcessingMixin", } class ProcessorMixin(PushToHubMixin): """ This is a mixin used to provide saving/loading functionality for all processor classes. """ attributes = ["feature_extractor", "tokenizer"] # Names need to be attr_class for attr in attributes feature_extractor_class = None tokenizer_class = None _auto_class = None # args have to match the attributes class attribute def __init__(self, *args, **kwargs): # Sanitize args and kwargs for key in kwargs: if key not in self.attributes: raise TypeError(f"Unexpected keyword argument {key}.") for arg, attribute_name in zip(args, self.attributes): if attribute_name in kwargs: raise TypeError(f"Got multiple values for argument {attribute_name}.") else: kwargs[attribute_name] = arg if len(kwargs) != len(self.attributes): raise ValueError( f"This processor requires {len(self.attributes)} arguments: {', '.join(self.attributes)}. Got " f"{len(args)} arguments instead." ) # Check each arg is of the proper class (this will also catch a user initializing in the wrong order) for attribute_name, arg in kwargs.items(): class_name = getattr(self, f"{attribute_name}_class") # Nothing is ever going to be an instance of "AutoXxx", in that case we check the base class. class_name = AUTO_TO_BASE_CLASS_MAPPING.get(class_name, class_name) if isinstance(class_name, tuple): proper_class = tuple(getattr(transformers_module, n) for n in class_name if n is not None) else: proper_class = getattr(transformers_module, class_name) if not isinstance(arg, proper_class): raise ValueError( f"Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected." ) setattr(self, attribute_name, arg) def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this processor instance. """ output = copy.deepcopy(self.__dict__) # Get the kwargs in `__init__`. sig = inspect.signature(self.__init__) # Only save the attributes that are presented in the kwargs of `__init__`. attrs_to_save = sig.parameters # Don't save attributes like `tokenizer`, `image processor` etc. attrs_to_save = [x for x in attrs_to_save if x not in self.__class__.attributes] # extra attributes to be kept attrs_to_save += ["auto_map"] output = {k: v for k, v in output.items() if k in attrs_to_save} output["processor_class"] = self.__class__.__name__ if "tokenizer" in output: del output["tokenizer"] if "image_processor" in output: del output["image_processor"] if "feature_extractor" in output: del output["feature_extractor"] # Some attributes have different names but containing objects that are not simple strings output = { k: v for k, v in output.items() if not (isinstance(v, PushToHubMixin) or v.__class__.__name__ == "BeamSearchDecoderCTC") } return output def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this processor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): attributes_repr = [f"- {name}: {repr(getattr(self, name))}" for name in self.attributes] attributes_repr = "\n".join(attributes_repr) return f"{self.__class__.__name__}:\n{attributes_repr}\n\n{self.to_json_string()}" def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs): """ Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it can be reloaded using the [`~ProcessorMixin.from_pretrained`] method. This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`]. Please refer to the docstrings of the methods above for more information. Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: attrs = [getattr(self, attribute_name) for attribute_name in self.attributes] configs = [(a.init_kwargs if isinstance(a, PreTrainedTokenizerBase) else a) for a in attrs] configs.append(self) custom_object_save(self, save_directory, config=configs) for attribute_name in self.attributes: attribute = getattr(self, attribute_name) # Include the processor class in the attribute config so this processor can then be reloaded with the # `AutoProcessor` API. if hasattr(attribute, "_set_processor_class"): attribute._set_processor_class(self.__class__.__name__) attribute.save_pretrained(save_directory) if self._auto_class is not None: # We added an attribute to the init_kwargs of the tokenizers, which needs to be cleaned up. for attribute_name in self.attributes: attribute = getattr(self, attribute_name) if isinstance(attribute, PreTrainedTokenizerBase): del attribute.init_kwargs["auto_map"] # If we save using the predefined names, we can load using `from_pretrained` output_processor_file = os.path.join(save_directory, PROCESSOR_NAME) # For now, let's not save to `processor_config.json` if the processor doesn't have extra attributes and # `auto_map` is not specified. if set(self.to_dict().keys()) != {"processor_class"}: self.to_json_file(output_processor_file) logger.info(f"processor saved in {output_processor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) if set(self.to_dict().keys()) == {"processor_class"}: return [] return [output_processor_file] @classmethod def get_processor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a processor of type [`~processing_utils.ProcessingMixin`] using `from_args_and_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. Returns: `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the processor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", "") from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "processor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): processor_file = os.path.join(pretrained_model_name_or_path, PROCESSOR_NAME) if os.path.isfile(pretrained_model_name_or_path): resolved_processor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): processor_file = pretrained_model_name_or_path resolved_processor_file = download_url(pretrained_model_name_or_path) else: processor_file = PROCESSOR_NAME try: # Load from local folder or from cache or download from model Hub and cache resolved_processor_file = cached_file( pretrained_model_name_or_path, processor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load processor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {PROCESSOR_NAME} file" ) # Existing processors on the Hub created before #27761 being merged don't have `processor_config.json` (if not # updated afterward), and we need to keep `from_pretrained` work. So here it fallbacks to the empty dict. # (`cached_file` called using `_raise_exceptions_for_missing_entries=False` to avoid exception) # However, for models added in the future, we won't get the expected error if this file is missing. if resolved_processor_file is None: return {}, kwargs try: # Load processor dict with open(resolved_processor_file, "r", encoding="utf-8") as reader: text = reader.read() processor_dict = json.loads(text) except json.JSONDecodeError: raise EnvironmentError( f"It looks like the config file at '{resolved_processor_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_processor_file}") else: logger.info(f"loading configuration file {processor_file} from cache at {resolved_processor_file}") if "auto_map" in processor_dict and not is_local: processor_dict["auto_map"] = add_model_info_to_auto_map( processor_dict["auto_map"], pretrained_model_name_or_path ) return processor_dict, kwargs @classmethod def from_args_and_dict(cls, args, processor_dict: Dict[str, Any], **kwargs): """ Instantiates a type of [`~processing_utils.ProcessingMixin`] from a Python dictionary of parameters. Args: processor_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the processor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~processing_utils.ProcessingMixin.to_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the processor object. Returns: [`~processing_utils.ProcessingMixin`]: The processor object instantiated from those parameters. """ processor_dict = processor_dict.copy() return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # Unlike image processors or feature extractors whose `__init__` accept `kwargs`, processor don't have `kwargs`. # We have to pop up some unused (but specific) arguments to make it work. if "processor_class" in processor_dict: del processor_dict["processor_class"] if "auto_map" in processor_dict: del processor_dict["auto_map"] processor = cls(*args, **processor_dict) # Update processor with kwargs if needed for key in set(kwargs.keys()): if hasattr(processor, key): setattr(processor, key, kwargs.pop(key)) logger.info(f"Processor {processor}") if return_unused_kwargs: return processor, kwargs else: return processor @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ): r""" Instantiate a processor associated with a pretrained model. This class method is simply calling the feature extractor [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], image processor [`~image_processing_utils.ImageProcessingMixin`] and the tokenizer [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] methods. Please refer to the docstrings of the methods above for more information. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a feature extractor file saved using the [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. **kwargs Additional keyword arguments passed along to both [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`]. """ kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) processor_dict, kwargs = cls.get_processor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_args_and_dict(args, processor_dict, **kwargs) @classmethod def register_for_auto_class(cls, auto_class="AutoProcessor"): """ Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoProcessor`. This API is experimental and may have some slight breaking changes in the next releases. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoProcessor"`): The auto class to register this new feature extractor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class @classmethod def _get_arguments_from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = [] for attribute_name in cls.attributes: class_name = getattr(cls, f"{attribute_name}_class") if isinstance(class_name, tuple): classes = tuple(getattr(transformers_module, n) if n is not None else None for n in class_name) use_fast = kwargs.get("use_fast", True) if use_fast and classes[1] is not None: attribute_class = classes[1] else: attribute_class = classes[0] else: attribute_class = getattr(transformers_module, class_name) args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs)) return args @property def model_input_names(self): first_attribute = getattr(self, self.attributes[0]) return getattr(first_attribute, "model_input_names", None) ProcessorMixin.push_to_hub = copy_func(ProcessorMixin.push_to_hub) if ProcessorMixin.push_to_hub.__doc__ is not None: ProcessorMixin.push_to_hub.__doc__ = ProcessorMixin.push_to_hub.__doc__.format( object="processor", object_class="AutoProcessor", object_files="processor files" )