U
    4Af|6                     @   sF   d Z ddlmZ ddlmZ ddlmZ eeZ	G dd deZ
dS )	zDETA model configuration   )PretrainedConfig)logging   )CONFIG_MAPPINGc                +       sT   e Zd ZdZdZdddZd" fdd	ZeedddZ	eedd d!Z
  ZS )#
DetaConfiga  
    This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the DETA
    [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
            The configuration of the backbone model.
        backbone (`str`, *optional*):
            Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
            will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
            is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
        use_pretrained_backbone (`bool`, *optional*, `False`):
            Whether to use pretrained weights for the backbone.
        use_timm_backbone (`bool`, *optional*, `False`):
            Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
            library.
        backbone_kwargs (`dict`, *optional*):
            Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
            e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
        num_queries (`int`, *optional*, defaults to 900):
            Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
            detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
        d_model (`int`, *optional*, defaults to 256):
            Dimension of the layers.
        encoder_layers (`int`, *optional*, defaults to 6):
            Number of encoder layers.
        decoder_layers (`int`, *optional*, defaults to 6):
            Number of decoder layers.
        encoder_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        decoder_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        decoder_ffn_dim (`int`, *optional*, defaults to 2048):
            Dimension of the "intermediate" (often named feed-forward) layer in decoder.
        encoder_ffn_dim (`int`, *optional*, defaults to 2048):
            Dimension of the "intermediate" (often named feed-forward) layer in decoder.
        activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        init_xavier_std (`float`, *optional*, defaults to 1):
            The scaling factor used for the Xavier initialization gain in the HM Attention map module.
        encoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        auxiliary_loss (`bool`, *optional*, defaults to `False`):
            Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
        position_embedding_type (`str`, *optional*, defaults to `"sine"`):
            Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
        class_cost (`float`, *optional*, defaults to 1):
            Relative weight of the classification error in the Hungarian matching cost.
        bbox_cost (`float`, *optional*, defaults to 5):
            Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
        giou_cost (`float`, *optional*, defaults to 2):
            Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
        mask_loss_coefficient (`float`, *optional*, defaults to 1):
            Relative weight of the Focal loss in the panoptic segmentation loss.
        dice_loss_coefficient (`float`, *optional*, defaults to 1):
            Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
        bbox_loss_coefficient (`float`, *optional*, defaults to 5):
            Relative weight of the L1 bounding box loss in the object detection loss.
        giou_loss_coefficient (`float`, *optional*, defaults to 2):
            Relative weight of the generalized IoU loss in the object detection loss.
        eos_coefficient (`float`, *optional*, defaults to 0.1):
            Relative classification weight of the 'no-object' class in the object detection loss.
        num_feature_levels (`int`, *optional*, defaults to 5):
            The number of input feature levels.
        encoder_n_points (`int`, *optional*, defaults to 4):
            The number of sampled keys in each feature level for each attention head in the encoder.
        decoder_n_points (`int`, *optional*, defaults to 4):
            The number of sampled keys in each feature level for each attention head in the decoder.
        two_stage (`bool`, *optional*, defaults to `True`):
            Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
            DETA, which are further fed into the decoder for iterative bounding box refinement.
        two_stage_num_proposals (`int`, *optional*, defaults to 300):
            The number of region proposals to be generated, in case `two_stage` is set to `True`.
        with_box_refine (`bool`, *optional*, defaults to `True`):
            Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
            based on the predictions from the previous layer.
        focal_alpha (`float`, *optional*, defaults to 0.25):
            Alpha parameter in the focal loss.
        assign_first_stage (`bool`, *optional*, defaults to `True`):
            Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7.
        assign_second_stage (`bool`, *optional*, defaults to `True`):
            Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure.
        disable_custom_kernels (`bool`, *optional*, defaults to `True`):
            Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
            kernels are not supported by PyTorch ONNX export.

    Examples:

    ```python
    >>> from transformers import DetaConfig, DetaModel

    >>> # Initializing a DETA SenseTime/deformable-detr style configuration
    >>> configuration = DetaConfig()

    >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
    >>> model = DetaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zdetad_modelencoder_attention_heads)hidden_sizenum_attention_headsNF                      Trelu   皙?{Gz?      ?sine   r   ,              ?c,           /         s  |rt d|d k	r$|d k	r$t d|d krT|d krTtd td dddgd}n&t|trz|d	}-t|- }.|.|}|d k	r|r|d k	rt d
|| _|| _	|| _
|| _|| _|| _|| _|| _|	| _|| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _ || _!|| _"|| _#|| _$| | _%|!| _&|dkrl|dkrlt d|"| _'|#| _(|$| _)|%| _*|&| _+|'| _,|(| _-|)| _.|*| _/|+| _0t1 j2f d|i|, d S )Nz+Pretrained backbones are not supported yet.z8You can't specify both `backbone` and `backbone_config`.zX`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.ZresnetZstage2Zstage3Zstage4)Zout_features
model_typez?You can't specify both `backbone_kwargs` and `backbone_config`.TFz3If two_stage is True, with_box_refine must be True.is_encoder_decoder)3
ValueErrorloggerinfor   
isinstancedictpop	from_dictbackbone_configbackboneuse_pretrained_backboneuse_timm_backbonebackbone_kwargsnum_queriesmax_position_embeddingsr   encoder_ffn_dimencoder_layersr   decoder_ffn_dimdecoder_layersdecoder_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functioninit_stdinit_xavier_stdencoder_layerdropauxiliary_lossposition_embedding_typenum_feature_levelsencoder_n_pointsdecoder_n_points	two_stagetwo_stage_num_proposalswith_box_refineassign_first_stageassign_second_stage
class_cost	bbox_cost	giou_costmask_loss_coefficientdice_loss_coefficientbbox_loss_coefficientgiou_loss_coefficienteos_coefficientfocal_alphadisable_custom_kernelssuper__init__)/selfr%   r&   r'   r(   r)   r*   r+   r-   r,   r   r/   r.   r0   r7   r   r4   r   r1   r2   r3   r5   r6   Zreturn_intermediater8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   kwargsZbackbone_model_typeZconfig_class	__class__ Z/tmp/pip-unpacked-wheel-zw5xktn0/transformers/models/deprecated/deta/configuration_deta.pyrM      sr    /



zDetaConfig.__init__)returnc                 C   s   | j S N)r   rN   rR   rR   rS   r
     s    zDetaConfig.num_attention_headsc                 C   s   | j S rU   )r   rV   rR   rR   rS   r	   	  s    zDetaConfig.hidden_size)+NNFFNr   r   r   r   r   r   r   r   r   Tr   r   r   r   r   r   r   TFr   r   r   r   Tr   TTTr   r   r   r   r   r   r   r   r   T)__name__
__module____qualname____doc__r   Zattribute_maprM   propertyintr
   r	   __classcell__rR   rR   rP   rS   r      sj   t                                           qr   N)rZ   Zconfiguration_utilsr   utilsr   autor   Z
get_loggerrW   r   r   rR   rR   rR   rS   <module>   s
   
