U
    4Af?                    @   s6  d dl Z d dlZd dlmZ d dlmZ d dl mZ d dlmZm	Z	m
Z
mZmZmZmZmZ d dlZddlmZmZ ddlmZ dd	lmZ ed
eZede	ee ge
eef f ZG dd dZdd Zd5ee e
eef dddZeG dd deZee e
eef dddZ ee e
eef dddZ!ee e
eef dddZ"eG dd dZ#eG dd deZ$d6ee% d d!d"Z&d7ee% d d#d$Z'd8ee% d d%d&Z(d'd( Z)eG d)d* d*Z*eG d+d, d,eZ+eG d-d. d.e+Z,eG d/d0 d0e+Z-eG d1d2 d2eZ.eG d3d4 d4eZ/dS )9    N)Mapping)	dataclass)randint)AnyCallableDictListNewTypeOptionalTupleUnion   )BertTokenizerBertTokenizerFast)PreTrainedTokenizerBase)PaddingStrategyInputDataClassDataCollatorc                   @   s   e Zd ZdddZdS )DataCollatorMixinNc                 C   sX   |d kr| j }|dkr | |S |dkr2| |S |dkrD| |S td| dd S )NtfptnpzFramework 'z' not recognized!)return_tensorstf_call
torch_call
numpy_call
ValueErrorselffeaturesr    r    C/tmp/pip-unpacked-wheel-zw5xktn0/transformers/data/data_collator.py__call__'   s    


zDataCollatorMixin.__call__)N)__name__
__module____qualname__r"   r    r    r    r!   r   &   s   r   c              	   O   sP   t | ds| j||S | jdd}d| jd< z| j||}W 5 || jd< X |S )zz
    Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
    deprecation_warningszAsking-to-pad-a-fast-tokenizerFT)hasattrpadr&   get)	tokenizerZpad_argsZ
pad_kwargsZwarning_stateZpaddedr    r    r!   "pad_without_fast_tokenizer_warning4   s    

r+   r   r   returnc                 C   s4   |dkrt | S |dkr t| S |dkr0t| S dS )a  
    Very simple data collator that simply collates batches of dict-like objects and performs special handling for
    potential keys named:

        - `label`: handles a single value (int or float) per object
        - `label_ids`: handles a list of values per object

    Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
    to the model. See glue and ner for example of how it's useful.
    r   r   r   N)torch_default_data_collatortf_default_data_collatornumpy_default_data_collator)r   r   r    r    r!   default_data_collatorJ   s    r1   c                   @   sD   e Zd ZU dZdZeed< deeee	f  eee	f dddZ
dS )	DefaultDataCollatora*  
    Very simple data collator that simply collates batches of dict-like objects and performs special handling for
    potential keys named:

        - `label`: handles a single value (int or float) per object
        - `label_ids`: handles a list of values per object

    Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
    to the model. See glue and ner for example of how it's useful.

    This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
    helpful if you need to set a return_tensors value at initialization.

    Args:
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r   r   Nr,   c                 C   s   |d kr| j }t||S N)r   r1   r   r    r    r!   r"   y   s    zDefaultDataCollator.__call__)N)r#   r$   r%   __doc__r   str__annotations__r   r   r   r"   r    r    r    r!   r2   c   s   
r2   c                    s  dd l }t| d ts$dd | D } | d }i }d|kr|d d k	rt|d |jr`|d  n|d }t|trx|jn|j}|jdd | D |d|d< n|d|kr|d d k	rt|d |jr|	d	d | D |d< n:t|d d tr|jn|j}|jd
d | D |d|d< |
 D ]\ } dkr |d k	r t|ts t||jrt|	 fdd| D | < nNt|tjr|t	 fdd| D | < n| fdd| D | < q |S )Nr   c                 S   s   g | ]}t |qS r    vars.0fr    r    r!   
<listcomp>   s     z/torch_default_data_collator.<locals>.<listcomp>labelc                 S   s   g | ]}|d  qS r=   r    r9   r    r    r!   r<      s     dtypelabels	label_idsc                 S   s   g | ]}|d  qS rB   r    r9   r    r    r!   r<      s     c                 S   s   g | ]}|d  qS rC   r    r9   r    r    r!   r<      s     r=   rB   c                    s   g | ]}|  qS r    r    r9   kr    r!   r<      s     c                    s   g | ]}|  qS r    r    r9   rE   r    r!   r<      s     c                    s   g | ]}|  qS r    r    r9   rE   r    r!   r<      s     )torch
isinstancer   Tensoritemintlongfloattensorstackitemsr5   r   ndarray)r   rG   firstbatchr=   r@   vr    rE   r!   r.      s,    $ $ r.   c                    s  dd l }t| d ts$dd | D } | d }i }d|krJ|d d k	rJdn8d|krd|d d k	rddnd|kr~|d d k	r~dnd d k	rjt| |jr| jjr|jn|j}nt| tj	st| tj
rt| jtjr|jn|j}nRt| ttfr.t| d tr&|jn|j}nt| trD|jn|j}|jfdd| D |d|d< | D ]x\ } d	krr|d k	rrt|tsrt||jtj	fr| fd
d| D | < n| fdd| D | < qr|S )Nr   c                 S   s   g | ]}t |qS r    r7   r9   r    r    r!   r<      s     z,tf_default_data_collator.<locals>.<listcomp>r=   rB   rA   c                    s   g | ]}|  qS r    r    r9   )label_col_namer    r!   r<      s     r?   )r=   rB   rA   c                    s   g | ]}|  qS r    r    r9   rE   r    r!   r<      s     c                    s   g | ]}|  qS r    r    r9   rE   r    r!   r<      s     )
tensorflowrH   r   rI   r@   
is_integerint64float32r   rQ   ZgenericZ
issubdtypeintegertuplelistrK   convert_to_tensorrP   r5   rO   )r   r   rR   rS   r@   rT   r    )rF   rU   r!   r/      s6    
 ""   r/   c                    s  t | d tsdd | D } | d }i }d|kr|d d k	rt |d tjrX|d  n|d }t |trptjntj}tjdd | D |d|d< n|d|kr|d d k	rt |d tjrt	d	d | D |d< n:t |d d trtjntj}tjd
d | D |d|d< |
 D ]r\ } dkr|d k	rt |tst |tjrlt	 fdd| D | < nt fdd| D | < q|S )Nr   c                 S   s   g | ]}t |qS r    r7   r9   r    r    r!   r<      s     z/numpy_default_data_collator.<locals>.<listcomp>r=   c                 S   s   g | ]}|d  qS r>   r    r9   r    r    r!   r<      s     r?   rA   rB   c                 S   s   g | ]}|d  qS rC   r    r9   r    r    r!   r<      s     c                 S   s   g | ]}|d  qS rC   r    r9   r    r    r!   r<      s     rD   c                    s   g | ]}|  qS r    r    r9   rE   r    r!   r<      s     c                    s   g | ]}|  qS r    r    r9   rE   r    r!   r<      s     )rH   r   r   rQ   rJ   rK   rX   rY   arrayrO   rP   r5   )r   rR   rS   r=   r@   rT   r    rE   r!   r0      s&    $  r0   c                   @   s   e Zd ZU dZeed< dZeee	e
f ed< dZee ed< dZee ed< dZe	ed	< eee	ef  ee	ef d
ddZdS )DataCollatorWithPaddinga  
    Data collator that will dynamically pad the inputs received.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:

            - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
              sequence is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.5 (Volta).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r*   TpaddingN
max_lengthpad_to_multiple_ofr   r   r,   c                 C   sV   t | j|| j| j| j| jd}d|kr8|d |d< |d= d|krR|d |d< |d= |S )Nr`   ra   rb   r   r=   rA   rB   )r+   r*   r`   ra   rb   r   )r   r   rS   r    r    r!   r"     s    z DataCollatorWithPadding.__call__)r#   r$   r%   r4   r   r6   r`   r   boolr5   r   ra   r
   rK   rb   r   r   r   r   r"   r    r    r    r!   r_      s   
r_   c                   @   s   e Zd ZU dZeed< dZeee	e
f ed< dZee ed< dZee ed< dZeed	< d
Ze	ed< dd Zdd Zdd ZdS )"DataCollatorForTokenClassificationa  
    Data collator that will dynamically pad the inputs received, as well as the labels.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:

            - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
              sequence is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.5 (Volta).
        label_pad_token_id (`int`, *optional*, defaults to -100):
            The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r*   Tr`   Nra   rb   label_pad_token_idr   r   c                    s   dd l d|d  krdnd  |d  krB fdd|D nd } fdd|D }tj|jjjdd}|d kr|S |d	 jd
 jj}fdd|dkrƇfdd|D | < nfdd|D | < j	|  j
d| < |S )Nr   r=   rA   c                    s   g | ]}|  qS r    r    r:   Zfeature
label_namer    r!   r<   I  s     zADataCollatorForTokenClassification.torch_call.<locals>.<listcomp>c                    s"   g | ]} fd d|  D qS )c                    s   i | ]\}}| kr||qS r    r    r:   rF   rT   ri   r    r!   
<dictcomp>K  s       zLDataCollatorForTokenClassification.torch_call.<locals>.<listcomp>.<dictcomp>rP   rh   ri   r    r!   r<   K  s     r   rc   	input_ids   c                    s   t |  jr|  S t| S r3   )rH   rI   tolistr\   )Ztensor_or_iterablerG   r    r!   to_list\  s    z>DataCollatorForTokenClassification.torch_call.<locals>.to_listrightc                    s(   g | ] }| j gt|   qS r    rg   lenr:   r=   r   sequence_lengthrr   r    r!   r<   b  s    c                    s(   g | ] } j gt|  | qS r    rt   rv   rw   r    r!   r<   f  s    r?   )rG   keysr+   r*   r`   ra   rb   shapepadding_siderN   rX   )r   r   rA   Zno_labels_featuresrS   r{   r    )rj   r   rx   rr   rG   r!   r   E  s4    &	
z-DataCollatorForTokenClassification.torch_callc                    s   dd l d|d  krdnd  |d  krB fdd|D nd }tj|jjj|d krfdnd d}|d krz|S |d jd	 jj	}|d
krfdd|D |d< nfdd|D |d< fdd|
 D }|S )Nr   r=   rA   c                    s   g | ]}|  qS r    r    rh   ri   r    r!   r<   q  s     z>DataCollatorForTokenClassification.tf_call.<locals>.<listcomp>r   rc   rn   ro   rs   c                    s(   g | ] }t | jgt|   qS r    r\   rg   ru   rv   r   rx   r    r!   r<     s    c                    s(   g | ] } j gt|  t| qS r    rg   ru   r\   rv   r}   r    r!   r<     s    c                    s"   i | ]\}}| j | jd qS r?   r]   rX   rk   r   r    r!   rl     s      z>DataCollatorForTokenClassification.tf_call.<locals>.<dictcomp>)rV   ry   r+   r*   r`   ra   rb   r]   rz   r{   rP   r   r   rA   rS   r{   r    )rj   r   rx   r   r!   r   m  s0    &

z*DataCollatorForTokenClassification.tf_callc                    s   d|d   krdnd  |d   kr: fdd|D nd }tj|jjj|d kr^dnd d}|d krr|S t|d jd	 jj	}|d
krfdd|D |d< nfdd|D |d< dd |
 D }|S )Nr=   r   rA   c                    s   g | ]}|  qS r    r    rh   ri   r    r!   r<     s     zADataCollatorForTokenClassification.numpy_call.<locals>.<listcomp>r   rc   rn   ro   rs   c                    s(   g | ] }t | jgt|   qS r    r|   rv   r}   r    r!   r<     s    c                    s(   g | ] } j gt|  t| qS r    r~   rv   r}   r    r!   r<     s    c                 S   s"   i | ]\}}|t j|t jd qS r   r   r^   rX   rk   r    r    r!   rl     s      zADataCollatorForTokenClassification.numpy_call.<locals>.<dictcomp>)ry   r+   r*   r`   ra   rb   r   r^   rz   r{   rP   r   r    )rj   r   rx   r!   r     s.    &

z-DataCollatorForTokenClassification.numpy_call)r#   r$   r%   r4   r   r6   r`   r   rd   r5   r   ra   r
   rK   rb   rg   r   r   r   r   r    r    r    r!   re      s   
( re   rb   c                    s:  ddl t| d tttjfr0fdd| D } | d d t fdd| D }|rz|dksl | dkrzj| ddS |j	dkrt
d|jj d	td
d | D }|dk	r|| dkr|| d | }| d t| |g|j}t| D ]D\}}|jdkr|||d|jd f< q||||jd  df< q|S )_Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.r   Nc                    s   g | ]} j | jd qS r   )rN   rL   r:   erq   r    r!   r<     s     z(_torch_collate_batch.<locals>.<listcomp>c                 3   s   | ]}| d  kV  qdS r   Nsizer:   xlength_of_firstr    r!   	<genexpr>  s     z'_torch_collate_batch.<locals>.<genexpr>)ZdimCYou are attempting to pad samples but the tokenizer you are using () does not have a pad token.c                 s   s   | ]}| d V  qdS r   r   r   r    r    r!   r     s     ro   rs   )rG   rH   r\   r[   r   rQ   r   allrO   
_pad_tokenr   	__class__r#   maxZnew_fullru   pad_token_id	enumerater{   rz   examplesr*   rb   are_tensors_same_lengthra   resultiexampler    )r   rG   r!   _torch_collate_batch  s(    
r   c           	         sL  dd l t| d ttfr,fdd| D } t| d  t fdd| D }|rt|d ksf | dkrtj| ddS |jd krtd|j	j
 dtd	d | D }|d k	r|| dkr|| d
 | }g }| d }tj|dftjd}| D ]J}|jdkr|t| |d< n|t| |d< |j|||jd qj|ddS )Nr   c                    s   g | ]} j | jd qS r   r   r   r   r    r!   r<     s     z%_tf_collate_batch.<locals>.<listcomp>c                 3   s   | ]}t | kV  qd S r3   ru   r   r   r    r!   r     s     z$_tf_collate_batch.<locals>.<genexpr>Zaxisr   r   c                 s   s   | ]}t |V  qd S r3   r   r   r    r    r!   r     s     ro   r   r?   rs   )r   ro   )r   r   )Zconstant_values)rV   rH   r\   r[   ru   r   rO   r   r   r   r#   r   rankr   zerosZint32r{   appendr(   r   )	r   r*   rb   r   ra   r   r   Zpaddingsr   r    )r   r   r!   _tf_collate_batch  s.    
r   c                    s.  t | d ttfr dd | D } t| d  t fdd| D }|rh|dksZ | dkrhtj| ddS |jdkrtd|j	j
 d	td
d | D }|dk	r|| dkr|| d | }tjt| |f|j| d jd}t| D ]D\}}|jdkr|||d|jd f< q||||jd  df< q|S )r   r   c                 S   s   g | ]}t j|t jd qS r   r   r   r    r    r!   r<     s     z(_numpy_collate_batch.<locals>.<listcomp>c                 3   s   | ]}t | kV  qd S r3   r   r   r   r    r!   r     s     z'_numpy_collate_batch.<locals>.<genexpr>Nr   r   r   c                 s   s   | ]}t |V  qd S r3   r   r   r    r    r!   r   
  s     ro   )rz   Z
fill_valuer@   rs   )rH   r\   r[   ru   r   r   rO   r   r   r   r#   r   fullr   r@   r   r{   rz   r   r    r   r!   _numpy_collate_batch  s&    
 r   c                 C   s(   t | tr| S t| dr |  } |  S )Nnumpy)rH   r\   r'   r   rp   )r   r    r    r!   rp     s
    

rp   c                   @   s   e Zd ZU dZeed< dZee ed< dZ	e
eeef ed< dZee ed< dZee ed< d	Zeed
< dZeed< dddZdS )DataCollatorForSeq2Seqa  
    Data collator that will dynamically pad the inputs received, as well as the labels.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        model ([`PreTrainedModel`], *optional*):
            The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
            prepare the *decoder_input_ids*

            This is useful when using *label_smoothing* to avoid calculating loss twice.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:

            - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
              sequence is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.5 (Volta).
        label_pad_token_id (`int`, *optional*, defaults to -100):
            The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r*   NmodelTr`   ra   rb   rf   rg   r   r   c                    sl  |d krj }d|d  kr"dnd  |d  krH fdd|D nd }|d k	rjtdd |D rjd } fdd|D }tj|jjj|d	}jd
kpjtj	k}|d k	r|rt
|d   trt||d< ndd |D |d< njtjkojd k	}|s tdd |D njjd k	rLj d j j jjt
|d   trfdd|D |d< nfdd|D |d< |dd d k	r"|dkrdd l}|j|d |jd|d< nD|dkrdd l}	|	j|d |	jd|d< ntj|d tjd|d< nd |d< |d k	rhjd k	rhtjdrhjj|d d}
|
|d< |S )Nr=   r   rA   c                    s   g | ]}|  qS r    r    rh   ri   r    r!   r<   N  s     z3DataCollatorForSeq2Seq.__call__.<locals>.<listcomp>c                 s   s   | ]}|d kV  qd S r3   r    rv   r    r    r!   r   Q  s     z2DataCollatorForSeq2Seq.__call__.<locals>.<genexpr>c                    s"   g | ]} fd d|  D qS )c                    s   i | ]\}}| kr||qS r    r    rk   ri   r    r!   rl   S  s       z>DataCollatorForSeq2Seq.__call__.<locals>.<listcomp>.<dictcomp>rm   rh   ri   r    r!   r<   S  s     rc   Fc                 S   s   g | ]}t |g gqS r    )r   concatenaterv   r    r    r!   r<   f  s     c                 s   s   | ]}t |V  qd S r3   r   )r:   lr    r    r!   r   i  s     ro   c                    sD   g | ]<}d kr(|j g t|   nj g t|  | qS )rs   rt   rv   max_label_lengthr{   r   r    r!   r<   s  s   c              
      sh   g | ]`}d kr:t |t jjg t|  t jdgn(t t jjg t|  t jd|gqS )rs   r?   )r   r   r^   rg   ru   rX   rv   r   r    r!   r<   z  s   r   r?   r   %prepare_decoder_input_ids_from_labels)rA   decoder_input_ids)r   ry   r   r+   r*   r`   ra   rb   r   Z
DO_NOT_PADrH   r\   Z
MAX_LENGTHr   r{   r)   rG   rN   rX   rV   Zconstantr   r^   r   r'   r   )r   r   r   rA   Znon_labels_featuresrS   Z
no_paddingZmax_paddingrG   r   r   r    )rj   r   r{   r   r!   r"   I  sp    &





zDataCollatorForSeq2Seq.__call__)N)r#   r$   r%   r4   r   r6   r   r
   r   r`   r   rd   r5   r   ra   rK   rb   rg   r   r"   r    r    r    r!   r     s   
!r   c                   @   sd  e Zd ZU dZeed< dZeed< dZe	ed< dZ
ee ed< d	Zeed
< dZeed< dd Zedd Zdeee eeef dddZeeee eeeef f  eeef dddZeeee eeeef f  eeef dddZd eee eeef dddZeeee eeeef f  eeef dddZd!eee eeef dddZdS )"DataCollatorForLanguageModelingal  
    Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
    are not all of the same length.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        mlm (`bool`, *optional*, defaults to `True`):
            Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
            with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
            tokens and the value to predict for the masked token.
        mlm_probability (`float`, *optional*, defaults to 0.15):
            The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.
        return_tensors (`str`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".

    <Tip>

    For best performance, this data collator should be used with a dataset having items that are dictionaries or
    BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
    [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.

    </Tip>r*   Tmlmg333333?mlm_probabilityNrb   Ftf_experimental_compiler   r   c                 C   s>   | j r| jjd krtd| jr:dd l}|j| jdd| _d S )NzThis tokenizer does not have a mask token which is necessary for masked language modeling. You should pass `mlm=False` to train on causal language modeling instead.r   T)Zjit_compile)r   r*   
mask_tokenr   r   rV   functiontf_mask_tokens)r   r   r    r    r!   __post_init__  s    z-DataCollatorForLanguageModeling.__post_init__c                 C   s6   dd l }|| |}|||j| dd dk|jS )Nr   ro   )rV   fillcastrandomuniformrd   )rz   Zprobabilityr   Zprob_matrixr    r    r!   tf_bernoulli  s    z,DataCollatorForLanguageModeling.tf_bernoulli)inputsspecial_tokens_maskr-   c                 C   s   ddl }|||j}||}| || j| @ }|||d}| |d|@ }	||	||}| |d|@ |	 @ }
|jj|||jd}||
||}||fS )w
        Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
        r   Nrf   皙?      ?maxvalr@   )	rV   r   r@   rz   r   r   wherer   r   )r   r   
vocab_sizemask_token_idr   r   input_shapemasked_indicesrA   indices_replacedindices_randomrandom_wordsr    r    r!   r     s    
z.DataCollatorForLanguageModeling.tf_mask_tokensr   r-   c                    s   dd l }t|d tr,t j|d jd}ndt| j jdi}|dd } jr|d kr fdd|d 	 
 D }||j||jd	|j}n|||j} j||d |j| jjt jd
\|d< |d< n@|d } jjd k	r
|| jjkd|}n
||}||d< |S )Nr   r   r   rb   rn   r   r   c                    s   g | ]} j j|d dqS T)Zalready_has_special_tokensr*   Zget_special_tokens_maskr:   valr   r    r!   r<     s   z;DataCollatorForLanguageModeling.tf_call.<locals>.<listcomp>r?   )r   r   r   rA   rf   )rV   rH   r   r+   r*   rb   r   popr   r   rp   r   r]   rX   rd   r   r   ru   r   r   identity)r   r   r   rS   r   rA   r    r   r!   r     s>        

z'DataCollatorForLanguageModeling.tf_callc                 C   s   t |d tr$t| j|d| jd}ndt|| j| jdi}|dd }| jrl| j|d |d\|d< |d< n0|d 	 }| jj
d k	rd	||| jj
k< ||d< |S )
Nr   r   r   rn   r   r   r   rA   rf   )rH   r   r+   r*   rb   r   r   r   torch_mask_tokenscloner   r   r   rS   r   rA   r    r    r!   r   #  s*         z*DataCollatorForLanguageModeling.torch_callc           
         s   ddl }| }||j j}|dkrP fdd| D }|j||jd}n| }|j|dd |	| }d|| < |	||jd	 |@ } j
 j
j||< |	||jd
 |@ | @ }|jt j
|j|jd}	|	| ||< ||fS )r   r   Nc                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<   E  s    zEDataCollatorForLanguageModeling.torch_mask_tokens.<locals>.<listcomp>r?           valuerf   r   r   )rG   r   r   rz   r   rp   rN   rd   masked_fill_	bernoullir*   convert_tokens_to_idsr   r   ru   rL   )
r   r   r   rG   rA   probability_matrixr   r   r   r   r    r   r!   r   ;  s$    

"z1DataCollatorForLanguageModeling.torch_mask_tokensc                 C   s   t |d tr$t| j|d| jd}ndt|| j| jdi}|dd }| jrl| j|d |d\|d< |d< n2t	
|d }| jjd k	rd	||| jjk< ||d< |S )
Nr   r   r   rn   r   r   r   rA   rf   )rH   r   r+   r*   rb   r   r   r   numpy_mask_tokensr   copyr   r   r    r    r!   r   \  s*         z*DataCollatorForLanguageModeling.numpy_callc           	         s   t |}t |j j}|dkrH fdd| D }t j|td}n
|t}d||< t j	j
d||jdt}d|| < t j	j
dd	|jdt|@ } jj||< t j	j
dd
|jdt|@ | @ }t j	jdt jt |t jd}|||< ||fS )r   Nc                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<   |  s    zEDataCollatorForLanguageModeling.numpy_mask_tokens.<locals>.<listcomp>r?   r   ro   r   rf   r   r   lowhighr   r@   )r   r   r   rz   r   rp   r^   rd   astyper   binomialr*   r   r   ru   Zcount_nonzerorX   )	r   r   r   rA   r   r   r   r   r   r    r   r!   r   t  s.    



"   z1DataCollatorForLanguageModeling.numpy_mask_tokens)N)N)N)r#   r$   r%   r4   r   r6   r   rd   r   rM   rb   r
   rK   r   r   r5   r   staticmethodr   r   r   r   r   r   r   r   r   r   r   r   r    r    r    r!   r     s*   

  
2)2 !2r   c                   @   s  e Zd ZdZeeee eee	ef f  ee	ef dddZ
eeee eee	ef f  ee	ef dddZeeee eee	ef f  ee	ef dddZdee	 d
ddZeeeeef dddZeeeeef dddZeeeeef dddZdS )DataCollatorForWholeWordMaska#  
    Data collator used for language modeling that masks entire words.

    - collates batches of tensors, honoring their tokenizer's pad_token
    - preprocesses batches for masked language modeling

    <Tip>

    This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
    that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
    produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].

    </Tip>r   c                 C   s   t |d trdd |D }n|}dd |D }t|| j| jd}g }|D ]}g }t|d D ]}| j|}|| q^d|krt|d }	t|d }
t	|
D ]}||	krd||  ||< q|| 
| qJt|| j| jd}| ||\}}||d	S )
Nr   c                 S   s   g | ]}|d  qS rn   r    r   r    r    r!   r<     s     z;DataCollatorForWholeWordMask.torch_call.<locals>.<listcomp>c                 S   s   g | ]}d |iqS r   r    r   r    r    r!   r<     s     r   rn   chinese_ref##rn   rA   )rH   r   r   r*   rb   rp   _convert_id_to_tokenr   ru   range_whole_word_maskr   r   r   rn   batch_inputmask_labelsr   
ref_tokensidtokenref_poslen_seqr   
batch_maskr   rA   r    r    r!   r     s*    z'DataCollatorForWholeWordMask.torch_callc                 C   s  dd l }t|d tr&dd |D }n|}dd |D }t|| j| jd}g }|D ]}g }t|d D ]}| j|}	||	 qfd|krt|d }
t	|d }t
|D ]}||
krd||  ||< q|| | qRt|| j| jd}| |||j|\}}||d	S )
Nr   c                 S   s   g | ]}|d  qS r   r    r   r    r    r!   r<     s     z8DataCollatorForWholeWordMask.tf_call.<locals>.<listcomp>c                 S   s   g | ]}d |iqS r   r    r   r    r    r!   r<     s     r   rn   r   r   r   )rV   rH   r   r   r*   rb   rp   r   r   ru   r   r   r   r   rX   )r   r   r   rn   r   r   r   r   r   r   r   r   r   r   r   rA   r    r    r!   r     s,    z$DataCollatorForWholeWordMask.tf_callc                 C   s   t |d trdd |D }n|}dd |D }t|| j| jd}g }|D ]}g }t|d D ]}| j|}|| q^d|krt|d }	t|d }
t	|
D ]}||	krd||  ||< q|| 
| qJt|| j| jd}| ||\}}||d	S )
Nr   c                 S   s   g | ]}|d  qS r   r    r   r    r    r!   r<     s     z;DataCollatorForWholeWordMask.numpy_call.<locals>.<listcomp>c                 S   s   g | ]}d |iqS r   r    r   r    r    r!   r<     s     r   rn   r   r   r   )rH   r   r   r*   rb   rp   r   r   ru   r   r   r   r   r    r    r!   r     s*    z'DataCollatorForWholeWordMask.numpy_call   )input_tokensc                    sX  t | jttfstd g }t|D ]L\}}|dks&|dkr@q&t|dkrf|drf|d 	| q&|	|g q&t
| t|tdttt|| j }g }t  |D ]p}t||kr q t|t| |krqd}	|D ]}
|
 krd}	 qq|	rq|D ]}
 |
 |	|
 qqt t|kr:td	 fd
dtt|D }|S )zM
        Get 0/1 labels for masked tokens with whole word mask proxy
        zDataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. Please refer to the documentation for more information.z[CLS]z[SEP]ro   r   FTz?Length of covered_indexes is not equal to length of masked_lms.c                    s   g | ]}| krd ndqS )ro   r   r    )r:   r   Zcovered_indexesr    r!   r<   ,  s     zADataCollatorForWholeWordMask._whole_word_mask.<locals>.<listcomp>)rH   r*   r   r   warningswarnr   ru   
startswithr   r   shuffleminr   rK   roundr   setaddr   r   )r   r   Zmax_predictionsZcand_indexesr   r   Znum_to_predictZ
masked_lmsZ	index_setZis_any_index_coveredindexr   r    r   r!   r      sD    
"
z-DataCollatorForWholeWordMask._whole_word_mask)r   r   r-   c                    s  ddl } jjdkrtd| }|} fdd| D }|j|j||jddd  jj	dk	r|
 jj}|j|dd | }d	|| < |||jd
 |@ }	 j jj||	< |||jd |@ |	 @ }
|jt j|j|jd}||
 ||
< ||fS )
        Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
        'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
        r   NThis tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<   @  s    zBDataCollatorForWholeWordMask.torch_mask_tokens.<locals>.<listcomp>r?   r   r   rf   r   r   )rG   r*   r   r   r   rp   r   rN   rd   r   eqr   r   r   rz   r   r   ru   rL   )r   r   r   rG   rA   r   r   padding_maskr   r   r   r   r    r   r!   r   /  s,    

"z.DataCollatorForWholeWordMask.torch_mask_tokensc                    s   ddl }||} jjdkr&td||}|||j} fdd|D }||j||jd @ } jjdk	r| jj	k}|| @ }|
||d} |d|@ }	|
|	 jj|} |d	|@ |	 @ }
|jj|t j|jd
}|
|
||}||fS )r  r   Nr  c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<   i  s    z?DataCollatorForWholeWordMask.tf_mask_tokens.<locals>.<listcomp>r?   rf   r   r   r   )rV   rz   r*   r   r   r   r   rd   r   r   r   r   r   r   r   ru   rX   )r   r   r   r   r   rA   r   r   r	  r   r   r   r    r   r!   r   W  s,    



z+DataCollatorForWholeWordMask.tf_mask_tokensc           
         s   j jdkrtdt|}|t} fdd| D }d|tj|td<  j j	dk	rp| j j
k}d||< d|| < tjjdd	|jd
t|@ } j  j j||< tjjdd|jd
t|@ | @ }tjjdt j |jtjd}	|	| ||< ||fS )r  Nr  c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<     s    zBDataCollatorForWholeWordMask.numpy_mask_tokens.<locals>.<listcomp>r   r?   rf   ro   r   r   r   r   )r*   r   r   r   r   r   rd   rp   r^   r   r   r   r   rz   r   r   ru   rX   )
r   r   r   rA   r   r   r	  r   r   r   r    r   r!   r     s*    



"z.DataCollatorForWholeWordMask.numpy_mask_tokensN)r   )r#   r$   r%   r4   r   r   rK   r   r   r5   r   r   r   r   r   r   r   r   r    r    r    r!   r     s   222/(*r   c                   @   sV   e Zd ZdZdd Zeeeef  eeef dddZ	ee
eeef ddd	Zd
S )DataCollatorForSOPz
    Data collator used for sentence order prediction task.

    - collates batches of tensors, honoring their tokenizer's pad_token
    - preprocesses batches for both masked language modeling and sentence order prediction
    c                 O   s   t dt d S )NzDataCollatorForSOP is deprecated and will be removed in a future version, you can now use DataCollatorForLanguageModeling instead.)r   r   FutureWarning)r   argskwargsr    r    r!   __init__  s    zDataCollatorForSOP.__init__r   c           
      C   s   dd l }ddlm} dd |D }t|| j}| |\}}}dd |D }||d| jjd}dd |D }||}	|||||	d	S )
Nr   )pad_sequencec                 S   s   g | ]}|d  qS r   r    r:   r   r    r    r!   r<     s     z/DataCollatorForSOP.__call__.<locals>.<listcomp>c                 S   s   g | ]}|d  qS )token_type_idsr    r  r    r    r!   r<     s     T)Zbatch_firstZpadding_valuec                 S   s   g | ]}|d  qS )sentence_order_labelr    r  r    r    r!   r<     s     )rn   rA   attention_maskr  r  )rG   Ztorch.nn.utils.rnnr  r   r*   mask_tokensr   rO   )
r   r   rG   r  rn   rA   r  r  Zsop_label_listr  r    r    r!   r"     s    
zDataCollatorForSOP.__call__r   r-   c                    sX  ddl } jjdkrtd| }||j j} fdd| D }|j	|j
||jddd  jjdk	r| jj}|j	|dd || }|  } jjdk	r| jj}	|j	|	d	d d
|| < |||jd |@ }
 j jj||
< |||jd |@ |
 @ }|jt j|j|jd}|| ||< |||fS )z
        Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
        original. N-gram not applied yet.
        r   Nr  c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<     s    z2DataCollatorForSOP.mask_tokens.<locals>.<listcomp>r?   r   r   g      ?rf   r   r   )rG   r*   r   r   r   r   rz   r   rp   r   rN   rd   r   r  r   r   rM   r   r   ru   rL   )r   r   rG   rA   r   r   r	  r   r  Zattention_padding_maskr   r   r   r    r   r!   r    s4    


"zDataCollatorForSOP.mask_tokensN)r#   r$   r%   r4   r  r   r   r5   r   r"   r   r  r    r    r    r!   r
    s   $r
  c                   @   s(  e Zd ZU dZeed< dZeed< dZe	ed< dZ
eed< eeee	 eeeef f  eeef d	d
dZeeee	 eeeef f  eeef d	ddZeeee	 eeeef f  eeef d	ddZeeeeeef dddZeeeeeef dddZeeeeeef dddZdS )*DataCollatorForPermutationLanguageModelingz
    Data collator used for permutation language modeling.

    - collates batches of tensors, honoring their tokenizer's pad_token
    - preprocesses batches for permutation language modeling with procedures specific to XLNet
    r*   gUUUUUU?plm_probability   max_span_lengthr   r   r   c                 C   sH   t |d trdd |D }t|| j}| |\}}}}||||dS )Nr   c                 S   s   g | ]}|d  qS r   r    r   r    r    r!   r<     s     zIDataCollatorForPermutationLanguageModeling.torch_call.<locals>.<listcomp>rn   	perm_masktarget_mappingrA   )rH   r   r   r*   r   r   r   rS   r   r  r  rA   r    r    r!   r     s
    z5DataCollatorForPermutationLanguageModeling.torch_callc                 C   sH   t |d trdd |D }t|| j}| |\}}}}||||dS )Nr   c                 S   s   g | ]}|d  qS r   r    r   r    r    r!   r<     s     zFDataCollatorForPermutationLanguageModeling.tf_call.<locals>.<listcomp>r  )rH   r   r   r*   r   r  r    r    r!   r     s
    z2DataCollatorForPermutationLanguageModeling.tf_callc                 C   sH   t |d trdd |D }t|| j}| |\}}}}||||dS )Nr   c                 S   s   g | ]}|d  qS r   r    r   r    r    r!   r<     s     zIDataCollatorForPermutationLanguageModeling.numpy_call.<locals>.<listcomp>r  )rH   r   r   r*   r   r  r    r    r!   r     s
    z5DataCollatorForPermutationLanguageModeling.numpy_callr  c                    s  ddl } jjdkrtd|dd dkr6td| }|j|jd|jd}|j	|d|d|df|j
d}t|dD ]}d}|d}||k r|d jd d }	t|	 j }
|||
|	 d d  }d|||||	 f< ||
7 }q||d||< q|j fd	d
| D |jd}|j|dd  jjdk	rt| jj}|j|dd ||B  } jj||< d|| < |j	|d|d|df|j
d}t|dD ]}||d}|d|dd fdd}|||dd  }||dd}|||  || @ d ||ddf|d|dfk|| @ ||< q| ||| fS )g  
        The masked tokens to be predicted for a particular sequence are determined by the following algorithm:

            0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
            1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
            2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
               masked
            3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
               span_length]` and mask tokens `start_index:start_index + span_length`
            4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
               sequence to be processed), repeat from Step 1.
        r   NThis tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.ro   r   This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.r?   )ro   c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<   U  s     zPDataCollatorForPermutationLanguageModeling.torch_mask_tokens.<locals>.<listcomp>r   r   rf   r   )rG   r*   r   r   r   r   r   rz   rd   r   rY   r   r   r  rJ   rK   r  eyerN   rp   r   r   r  r   r   arangereshape	transposeZrandpermflattenrL   )r   r   rG   rA   r   r  r   cur_lenmax_lenspan_lengthcontext_lengthstart_indexr   r	  non_func_maskr  
perm_indexr    r   r!   r      sX    (




(	 &z<DataCollatorForPermutationLanguageModeling.torch_mask_tokensc              
      s  ddl } jjdkrtd||d d dkr:td||}tj|j dt	d}||}tj
|d |d |d ftjd}tt|D ]}d}||d }	||	k rtd jd }
t|
 j }|td||
 d  }d|||||
 f< ||7 }qt|d ||< q|j|||j	d}||}| fdd	|  D }|j||j	d}|| @ } jjdk	r| jjk}|| @ }||B  }|| jj|}|||d
}g }tt|D ]}||d }|||d|d d f}|j|}|||d}|||  || @ d|}||||d df||d|d fk|| @  q|j |dd}|||j!|||j||||j!fS )r  r   Nr  ro   r   r   r?   c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<     s   zMDataCollatorForPermutationLanguageModeling.tf_mask_tokens.<locals>.<listcomp>rf   r   )r   r   )"rV   r*   r   r   rz   r   r   r   Zas_listrd   r   rY   r   ru   r   r  rK   r  r!  r   r]   r   rp   r   r   r   r   r$  r#  r   r   r   rO   rX   )r   r   r   rA   r   Zlabels_shaper  r   r&  r'  r(  r)  r*  r   r	  r+  r  r,  r    r   r!   r     sh    

"








&z9DataCollatorForPermutationLanguageModeling.tf_mask_tokensc                    sP   j jdkrtd|jd d dkr.tdt|}tj|jdtd}tj|jd |jd |jd ftj	d}t
|jd D ]}d}|jd }||k rtd jd }t| j }	|td|	| d  }
d|||
|
| f< ||	7 }qt|jd ||< qtj fdd	| D td}d||<  j jdk	rL| j jk}d
||< ||B  } j j||< d|| < tj|jd |jd |jd ftj	d}t
|jd D ]}t|jd }|d|jd d fj}tj| |j }d|||  || @ < ||jd df|d|jd fk|| @ ||< q|tj|||tjfS )r  Nr  ro   r   r   r   r?   c                    s   g | ]} j j|d dqS r   r   r   r   r    r!   r<   !  s     zPDataCollatorForPermutationLanguageModeling.numpy_mask_tokens.<locals>.<listcomp>r   rf   r   )r*   r   r   rz   r   r   r   rd   r   rY   r   r   r  rK   r  r!  r^   rp   r   r   r   r"  r#  Tr   r   r%  r   rX   )r   r   rA   r   r  r   r&  r'  r(  r)  r*  r   r	  r+  r  r,  r    r   r!   r     sV    
(



(	
&z<DataCollatorForPermutationLanguageModeling.numpy_mask_tokensN)r#   r$   r%   r4   r   r6   r  rM   r  rK   r   r5   r   r   r   r   r   r   r   r   r   r   r   r    r    r    r!   r    s   
222ckr  c                       s0   e Zd ZdZdd fdd
Zd	ddZ  ZS )
DataCollatorWithFlatteningz
    Data collator used for padding free approach. Does the following:

    - concatate the entire mini batch into single long sequence [1, total_tokens]
    - no padding will be added, returns `input_ids`, `labels` and `position_ids`
    T)return_position_idsc                   s"   t  j|| || _td d S )NzUsing `DataCollatorWithFlattening` will flatten the entire mini batch into single long sequence.Make sure your attention computation is able to handle it!)superr  r/  r   r   )r   r/  r  r  r   r    r!   r  Y  s
    z#DataCollatorWithFlattening.__init__Nc              	   C   s   |d kr| j }d|d k}g g d}| jr8|dg i tdt|D ]}|d  || d 7  < |r|d  dg|| d dd   7  < n&|d  dg|| d dd   7  < | jrF|d  ttt|| d 7  < qFt|g|S )NrA   r   r   Zposition_idsrn   rf   ro   )r   r/  updater   ru   r\   r1   )r   r   r   Zis_labels_providedretidxr    r    r!   r"   a  s    
(&&z#DataCollatorWithFlattening.__call__)N)r#   r$   r%   r4   r  r"   __classcell__r    r    r1  r!   r.  P  s   r.  )r   )N)N)N)0r   r   collections.abcr   Zdataclassesr   r   typingr   r   r   r   r	   r
   r   r   r   r   Zmodels.bertr   r   Ztokenization_utils_baser   utilsr   r   r5   r   r   r+   r1   r2   r.   r/   r0   r_   re   rK   r   r   r   rp   r   r   r   r
  r  r.  r    r    r    r!   <module>   s\   (
 $) 3 $&   t  R  T