U
    4Afg                     @   s  d Z ddlZddlZddlZddlZddlZddlmZ ddlm	Z	 ddl
mZmZ ddlmZmZmZmZmZmZmZmZ dd	lmZmZmZ eeZd
ZdZdZdZ dZ!dd Z"G dd dZ#G dd de#Z$d ddZ%d!ddZ&dd Z'dd Z(G dd deZ)dS )"z-Factory function to build auto-model classes.    N)OrderedDict   )PretrainedConfig)get_class_from_dynamic_moduleresolve_trust_remote_code)CONFIG_NAMEcached_file	copy_funcextract_commit_hashfind_adapter_config_fileis_peft_availableloggingrequires_backends   )
AutoConfigmodel_type_to_module_name!replace_list_option_in_docstringsaJ  
    This is a generic model class that will be instantiated as one of the model classes of the library when created
    with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
    method.

    This class cannot be instantiated directly using `__init__()` (throws an error).
ax  
        Instantiates one of the model classes of the library from a configuration.

        Note:
            Loading a model from its configuration file does **not** load the model weights. It only affects the
            model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.

        Args:
            config ([`PretrainedConfig`]):
                The model class to instantiate is selected based on the configuration class:

                List options
            attn_implementation (`str`, *optional*):
                The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download configuration from huggingface.co and cache.
        >>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
        >>> model = BaseAutoModelClass.from_config(config)
        ```
ac  
        Instantiate one of the model classes of the library from a pretrained model.

        The model class to instantiate is selected based on the `model_type` property of the config object (either
        passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
        falling back to using pattern matching on `pretrained_model_name_or_path`:

        List options

        The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
        deactivated). To train the model, you should first set it back in training mode with `model.train()`

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
                      this case, `from_tf` should be set to `True` and a configuration object should be provided as
                      `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
                      PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
            model_args (additional positional arguments, *optional*):
                Will be passed along to the underlying model `__init__()` method.
            config ([`PretrainedConfig`], *optional*):
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            state_dict (*Dict[str, torch.Tensor]*, *optional*):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
                [`~PreTrainedModel.from_pretrained`] is not a simpler option.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (`bool`, *optional*, defaults to `False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (e.g., not try downloading the model).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            code_revision (`str`, *optional*, defaults to `"main"`):
                The specific revision to use for the code on the Hub, if the code leaves in a different repository than
                the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
                system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
                allowed by git.
            kwargs (additional keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")

        >>> # Update configuration during loading
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
        >>> model.config.output_attentions
        True

        >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
        >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
        >>> model = BaseAutoModelClass.from_pretrained(
        ...     "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
        ... )
        ```
a  
        Instantiate one of the model classes of the library from a pretrained model.

        The model class to instantiate is selected based on the `model_type` property of the config object (either
        passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
        falling back to using pattern matching on `pretrained_model_name_or_path`:

        List options

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
                      case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
                      argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
                      using the provided conversion scripts and loading the TensorFlow model afterwards.
            model_args (additional positional arguments, *optional*):
                Will be passed along to the underlying model `__init__()` method.
            config ([`PretrainedConfig`], *optional*):
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_pt (`bool`, *optional*, defaults to `False`):
                Load the model weights from a PyTorch checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (e.g., not try downloading the model).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            code_revision (`str`, *optional*, defaults to `"main"`):
                The specific revision to use for the code on the Hub, if the code leaves in a different repository than
                the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
                system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
                allowed by git.
            kwargs (additional keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")

        >>> # Update configuration during loading
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
        >>> model.config.output_attentions
        True

        >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
        >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
        >>> model = BaseAutoModelClass.from_pretrained(
        ...     "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
        ... )
        ```
c                 C   s   |t |  }t|ttfs|S dd |D }t| dg }|D ]X}||krT||   S d| |krt|d|    S d| |kr<|d|    S q<|d S )Nc                 S   s   i | ]}|j |qS  __name__).0modelr   r   I/tmp/pip-unpacked-wheel-zw5xktn0/transformers/models/auto/auto_factory.py
<dictcomp>  s      z$_get_model_class.<locals>.<dictcomp>architecturesTFFlaxr   )type
isinstancelisttuplegetattr)configmodel_mappingZsupported_modelsZname_to_modelr   archr   r   r   _get_model_class  s    r%   c                   @   s>   e Zd ZdZdd Zedd Zedd Zedd	d
ZdS )_BaseAutoModelClassNc                 O   s*   t | jj d| jj d| jj dd S )Nz+ is designed to be instantiated using the `z5.from_pretrained(pretrained_model_name_or_path)` or `z.from_config(config)` methods.)EnvironmentError	__class__r   )selfargskwargsr   r   r   __init__  s     z_BaseAutoModelClass.__init__c           
      K   s2  | dd }t|do | j|jk}t|| j k}t||j||}|r|r|j| j }d|krp|	d\}}n|j
}t||f|}tj|jr|| j n| j|j|dd | dd }	|j|f|S t|| j krt|| j}|j|f|S td|j d| j d	d
dd | j D  dd S )Ntrust_remote_codeauto_mapz--Texist_okcode_revision!Unrecognized configuration class  for this kind of AutoModel: .
Model type should be one of , c                 s   s   | ]}|j V  qd S Nr   r   cr   r   r   	<genexpr>  s     z2_BaseAutoModelClass.from_config.<locals>.<genexpr>.)pophasattrr   r.   r   _model_mappingkeysr   Z_name_or_pathsplitZname_or_pathr   ospathisdirregister_for_auto_classregisterr(   Z_from_configr%   
ValueErrorjoin)
clsr"   r+   r-   has_remote_codehas_local_code	class_refZrepo_idmodel_class_r   r   r   from_config  s4       0z_BaseAutoModelClass.from_configc                    s`    dd }  dd }d d< ddddd	d
dddg	} fdd|D }  dd }  dd }	  dd }
| dd }| dd }|d k	rtdt |d k	rtd|}|d k	r||d< |	d krt|tst|tfdddd|}t	||	}	nt
|dd }	t r||
d kr*i }
|d k	r*||
d< t|fd|	i|
}|d k	r|t|ddd }t|}||
d< |d }W 5 Q R X t|ts$t } dd dkr  d} dd d k	rʈ  d}tj|fd|||	d| \} |dd dkrd d< |dd d k	r$|d  d< t|d o:| j|jk}t|| j k}t||||}|
 d< |r|r|j| j }t||fd|i| }| dd }tj|r| | j n| j!|j"|dd! |j|f|d|i| S t|| j kr&t#|| j}|j|f|d|i| S td"|j" d#| j d$d%$d&d' | j D  d(d S ))Nr"   r-   TZ
_from_auto	cache_dirZforce_downloadZlocal_files_onlyproxiesZresume_downloadrevisionZ	subfolderuse_auth_tokentokenc                    s    i | ]}| kr|  |qS r   )r;   )r   namer+   r   r   r     s       z7_BaseAutoModelClass.from_pretrained.<locals>.<dictcomp>r1   _commit_hashadapter_kwargszrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.zV`token` and `use_auth_token` are both specified. Please set only the argument `token`.F)Z _raise_exceptions_for_gated_repoZ%_raise_exceptions_for_missing_entriesZ'_raise_exceptions_for_connection_errorsrzutf-8)encodingZ_adapter_model_pathZbase_model_name_or_pathZtorch_dtypeautoZquantization_config)Zreturn_unused_kwargsr-   r1   rU   r.   r/   r2   r3   r4   r5   c                 s   s   | ]}|j V  qd S r6   r   r7   r   r   r   r9   8  s     z6_BaseAutoModelClass.from_pretrained.<locals>.<genexpr>r:   )%r;   warningswarnFutureWarningrE   r   r   r   r   r
   r!   r   r   openjsonloadcopydeepcopygetr   from_pretrainedr<   r   r.   r   r=   r>   r   r   r@   rA   rB   rC   rD   r(   r%   rF   )rG   pretrained_model_name_or_path
model_argsr+   r"   r-   Zhub_kwargs_namesZ
hub_kwargsr1   Zcommit_hashrV   rR   rQ   Zresolved_config_fileZmaybe_adapter_pathfZadapter_configZkwargs_origrL   rH   rI   rJ   rK   r   rT   r   rc     s   









       0z#_BaseAutoModelClass.from_pretrainedFc                 C   sJ   t |dr4t|jt|kr4td|j d| d| jj|||d dS )a  
        Register a new model for this class.

        Args:
            config_class ([`PretrainedConfig`]):
                The configuration corresponding to the model to register.
            model_class ([`PreTrainedModel`]):
                The model to register.
        config_classzThe model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has z and you passed z!. Fix one of those so they match!r/   N)r<   strrg   rE   r=   rD   )rG   rg   rK   r0   r   r   r   rD   <  s
    z_BaseAutoModelClass.register)F)	r   
__module____qualname__r=   r,   classmethodrM   rc   rD   r   r   r   r   r&     s   

~r&   c                       s4   e Zd ZdZe fddZe fddZ  ZS )_BaseAutoBackboneClassNc           
         s   t | ddg ddlm} |d| }|dd d k	r@td|dd	rTtd
|d|j}|d|j}|d|j}|d|j	}	||||||	d}t
 j|f|S )NZvisionZtimmr   )TimmBackboneConfigr"   Zout_featuresz0Cannot specify `out_features` for timm backbonesZoutput_loading_infoFz@Cannot specify `output_loading_info=True` when loading from timmnum_channelsfeatures_onlyuse_pretrained_backboneout_indices)Zbackbonern   ro   rp   rq   )r   Zmodels.timm_backbonerm   r;   rb   rE   rn   ro   rp   rq   superrM   )
rG   rd   re   r+   rm   r"   rn   ro   rp   rq   r(   r   r   #_load_timm_backbone_from_pretrainedT  s&    z:_BaseAutoBackboneClass._load_timm_backbone_from_pretrainedc                    s6   | dd}|r"| j|f||S t j|f||S )Nuse_timm_backboneF)r;   rt   rr   rc   )rG   rd   re   r+   ru   rs   r   r   rc   n  s    z&_BaseAutoBackboneClass.from_pretrained)r   ri   rj   r=   rk   rt   rc   __classcell__r   r   rs   r   rl   P  s
   rl    c                 C   s,   t |dkr | dd| dS | ddS )Nr   z(one of the model classes of the library z0one of the model classes of the library (with a z head) z-one of the base model classes of the library )lenreplace)Z	docstringhead_docr   r   r   insert_head_docw  s    
 r{   google-bert/bert-base-casedc                 C   s  | j }| j}tt|d}|d|| _ttj}tt	|d}|d|}|d|}||_t
|j dd|}t|| _|drt}n|drt}nt}ttj}	t||d}|d|}|d|}|dd	 d
d }
|d|
}||	_t
|j |	}	t|	| _| S )N)rz   ZBaseAutoModelClassZcheckpoint_placeholderF)Zuse_model_typesr   r   /-r   Zshortcut_placeholder)r=   r   r{   CLASS_DOCSTRINGry   __doc__r	   r&   rM   FROM_CONFIG_DOCSTRINGr   rk   
startswithFROM_PRETRAINED_TF_DOCSTRINGFROM_PRETRAINED_FLAX_DOCSTRINGFROM_PRETRAINED_TORCH_DOCSTRINGrc   r?   )rG   Zcheckpoint_for_examplerz   r#   rS   Zclass_docstringrM   Zfrom_config_docstringZfrom_pretrained_docstringrc   Zshortcutr   r   r   auto_class_update  s4    





r   c                 C   s<   g }|   D ]*}t|ttfr,|t|7 }q|| q|S r6   )valuesr   r   r    append)r#   resultr   r   r   r   
get_values  s    r   c                    s   |d krd S t |tr,t fdd|D S t |r@t |S td} |krzt||W S  tk
r   td| d  d| dY qX ntd| d| dd S )	Nc                 3   s   | ]}t  |V  qd S r6   )getattribute_from_module)r   amoduler   r   r9     s     z+getattribute_from_module.<locals>.<genexpr>ZtransformerszCould not find z neither in z nor in !z in )r   r    r<   r!   	importlibimport_moduler   rE   )r   attrZtransformers_moduler   r   r   r     s    



$r   c                   @   sr   e Zd ZdZdd Zdd Zdd Zdd	 Zd
d Zdd Z	dd Z
dd Zdd Zdd Zdd ZdddZdS )_LazyAutoMappinga  
    " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.

    Args:
        - config_mapping: The map model type to config class
        - model_mapping: The map model type to model (or tokenizer) class
    c                 C   s8   || _ dd | D | _|| _| | j_i | _i | _d S )Nc                 S   s   i | ]\}}||qS r   r   r   kvr   r   r   r     s      z-_LazyAutoMapping.__init__.<locals>.<dictcomp>)_config_mappingitems_reverse_config_mappingr=   _extra_content_modules)r)   Zconfig_mappingr#   r   r   r   r,     s    z_LazyAutoMapping.__init__c                 C   s,   t | j | j }t|t| j S r6   )setr   r>   intersectionr=   rx   r   )r)   Zcommon_keysr   r   r   __len__  s    z_LazyAutoMapping.__len__c                    s    | j kr| j   S | j j }|| jkr@| j| }| ||S  fdd| j D }|D ](}|| jkr\| j| }| ||  S q\t d S )Nc                    s   g | ]\}}| j kr|qS r   r   r   keyr   r   
<listcomp>  s     
 z0_LazyAutoMapping.__getitem__.<locals>.<listcomp>)r   r   r   r=   _load_attr_from_moduler   r   KeyError)r)   r   
model_typeZ
model_nameZmodel_typesmtyper   r   r   __getitem__  s    





z_LazyAutoMapping.__getitem__c                 C   s:   t |}|| jkr*td| d| j|< t| j| |S )Nr:   ztransformers.models)r   r   r   r   r   )r)   r   r   module_namer   r   r   r     s    
z'_LazyAutoMapping._load_attr_from_modulec                    s*    fdd j  D }|t j  S )Nc                    s*   g | ]"\}}| j  kr ||qS r   )r=   r>   r   r   r   rS   r)   r   r   r     s   z)_LazyAutoMapping.keys.<locals>.<listcomp>)r   r   r   r   r>   )r)   Zmapping_keysr   r   r   r>     s    
z_LazyAutoMapping.keysc                 C   s*   z|  |W S  tk
r$   | Y S X d S r6   )r   r   )r)   r   defaultr   r   r   rb     s    z_LazyAutoMapping.getc                 C   s   t |  S r6   )boolr>   r   r   r   r   __bool__  s    z_LazyAutoMapping.__bool__c                    s*    fdd j  D }|t j  S )Nc                    s*   g | ]"\}}| j  kr ||qS r   )r   r>   r   r   r   r   r   r     s   z+_LazyAutoMapping.values.<locals>.<listcomp>)r=   r   r   r   r   )r)   Zmapping_valuesr   r   r   r      s    
z_LazyAutoMapping.valuesc                    s*    fdd j  D }|t j  S )Nc                    s>   g | ]6}| j  kr | j |  | j| fqS r   )r   r>   r   r=   )r   r   r   r   r   r   	  s
   z*_LazyAutoMapping.items.<locals>.<listcomp>)r=   r>   r   r   r   )r)   Zmapping_itemsr   r   r   r     s    
z_LazyAutoMapping.itemsc                 C   s   t |  S r6   )iterr>   r   r   r   r   __iter__  s    z_LazyAutoMapping.__iter__c                 C   s>   || j krdS t|dr$|j| jkr(dS | j|j }|| jkS )NTr   F)r   r<   r   r   r=   )r)   itemr   r   r   r   __contains__  s    
z_LazyAutoMapping.__contains__Fc                 C   sR   t |drD|j| jkrD| j|j }|| j krD|sDtd| d|| j|< dS )z7
        Register a new model in this mapping.
        r   'z*' is already used by a Transformers model.N)r<   r   r   r=   r>   rE   r   )r)   r   valuer0   r   r   r   r   rD     s
    z_LazyAutoMapping.registerN)F)r   ri   rj   r   r,   r   r   r   r>   rb   r   r   r   r   r   rD   r   r   r   r   r     s   r   )rw   )r|   rw   )*r   r`   r   r^   r@   rZ   collectionsr   Zconfiguration_utilsr   Zdynamic_module_utilsr   r   utilsr   r   r	   r
   r   r   r   r   Zconfiguration_autor   r   r   Z
get_loggerr   loggerr   r   r   r   r   r%   r&   rl   r{   r   r   r   r   r   r   r   r   <module>   s2   (

lcd >'

#