U
    5Afh                     @   s  d Z ddlmZ ddlmZmZmZ ddlZddlZddlm	Z	 ddl
mZmZ ddlmZmZmZmZ d	d
lmZ eeZdZdZeG dd deZG dd de	jZG dd de	jZG dd de	jZG dd de	jZdZdZ edeG dd deZ!dS )zPyTorch UnivNetModel model.    )	dataclass)OptionalTupleUnionN)nn   )ModelOutputPreTrainedModel)add_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )UnivNetConfigr   zdg845/univnet-devc                   @   s.   e Zd ZU dZdZejed< dZejed< dS )UnivNetModelOutputa  
    Output class for the [`UnivNetModel`], which includes the generated audio waveforms and the original unpadded
    lengths of those waveforms (so that the padding can be removed by [`UnivNetModel.batch_decode`]).

    Args:
        waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            Batched 1D (mono-channel) output audio waveforms.
        waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`):
            The batched length in samples of each unpadded waveform in `waveforms`.
    N	waveformswaveform_lengths)	__name__
__module____qualname____doc__r   torchFloatTensor__annotations__r    r   r   P/tmp/pip-unpacked-wheel-zw5xktn0/transformers/models/univnet/modeling_univnet.pyr   $   s   
r   c                       sF   e Zd ZdZed fddZejdddZdd	 Z	d
d Z
  ZS )#UnivNetKernelPredictorResidualBlockz
    Implementation of the residual block for the kernel predictor network inside each location variable convolution
    block (LVCBlock).

    Parameters:
        config: (`UnivNetConfig`):
            Config for the `UnivNetModel` model.
    configc                    s   t    |j| _|j| _|j| _|j| _| jd d }t	
| j| _t	j| j| j| j|dd| _t	j| j| j| j|dd| _d S )Nr      Tpaddingbias)super__init__model_in_channelsZchannelskernel_predictor_conv_sizekernel_sizeZkernel_predictor_dropoutZdropout_probleaky_relu_sloper   ZDropoutdropoutConv1dconv1conv2)selfr   r!   	__class__r   r   r$   ?   s    
z,UnivNetKernelPredictorResidualBlock.__init__)hidden_statesc                 C   sJ   |}|  |}| |}tj|| j}| |}tj|| j}|| S N)r)   r+   r   
functional
leaky_relur(   r,   )r-   r0   residualr   r   r   forwardO   s    


z+UnivNetKernelPredictorResidualBlock.forwardc                 C   s    t j| j t j| j d S r1   )r   utilsweight_normr+   r,   r-   r   r   r   apply_weight_normY   s    z5UnivNetKernelPredictorResidualBlock.apply_weight_normc                 C   s    t j| j t j| j d S r1   )r   r6   remove_weight_normr+   r,   r8   r   r   r   r:   ]   s    z6UnivNetKernelPredictorResidualBlock.remove_weight_norm)r   r   r   r   r   r$   r   r   r5   r9   r:   __classcell__r   r   r.   r   r   5   s   
r   c                       sL   e Zd ZdZdeeed fddZejddd	Z	d
d Z
dd Z  ZS )UnivNetKernelPredictora  
    Implementation of the kernel predictor network which supplies the kernel and bias for the location variable
    convolutional layers (LVCs) in each UnivNet LVCBlock.

    Based on the KernelPredictor implementation in
    [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7).

    Parameters:
        config: (`UnivNetConfig`):
            Config for the `UnivNetModel` model.
        conv_kernel_size (`int`, *optional*, defaults to 3):
            The kernel size for the location variable convolutional layer kernels (convolutional weight tensor).
        conv_layers (`int`, *optional*, defaults to 4):
            The number of location variable convolutional layers to output kernels and biases for.
    r      )r   conv_kernel_sizeconv_layersc                    s   t     j| _d j | _|| _|| _| j| j | j | j | _| j| j | _ j	| _
 j| _ j| _ j| _ j| _| jd d }tj| j
| jdddd| _t fddt| jD | _tj| j| j| j|dd| _tj| j| j| j|dd| _d S )Nr   r      Tr    c                    s   g | ]}t  qS r   )r   ).0_r   r   r   
<listcomp>   s     z3UnivNetKernelPredictor.__init__.<locals>.<listcomp>)r#   r$   model_hidden_channelsconv_in_channelsconv_out_channelsr>   r?   Zkernel_channelsZbias_channelsZnum_mel_binsZresnet_in_channelsZ kernel_predictor_hidden_channelsZresnet_hidden_channelsr&   Zresnet_kernel_sizeZkernel_predictor_num_blocks
num_blocksr(   r   r*   
input_conv
ModuleListrange	resblockskernel_conv	bias_conv)r-   r   r>   r?   r!   r.   r   r   r$   s   s<    
         zUnivNetKernelPredictor.__init__)spectrogramc                 C   s   |j \}}}| |}tj|| j}| jD ]}||}q,| |}| |}|	|| j
| j| j| j| }	|	|| j
| j| }
|	|
fS )a  
        Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location
        variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels,
        seq_length).

        Args:
            spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`):
                Tensor containing the log-mel spectrograms.

        Returns:
            Tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of
            location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels,
            self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of
            location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels,
            seq_length)`.
        )shaperH   r   r2   r3   r(   rK   rL   rM   viewr?   rE   rF   r>   
contiguous)r-   rN   Z
batch_sizerB   Z
seq_lengthr0   resblockZkernel_hidden_statesZbias_hidden_stateskernelsbiasesr   r   r   r5      s,    




zUnivNetKernelPredictor.forwardc                 C   sB   t j| j | jD ]}|  qt j| j t j| j d S r1   )r   r6   r7   rH   rK   r9   rL   rM   r-   Zlayerr   r   r   r9      s
    

z(UnivNetKernelPredictor.apply_weight_normc                 C   sB   t j| j | jD ]}|  qt j| j t j| j d S r1   )r   r6   r:   rH   rK   rL   rM   rU   r   r   r   r:      s
    

z)UnivNetKernelPredictor.remove_weight_norm)r   r=   r   r   r   r   r   intr$   r   r   r5   r9   r:   r;   r   r   r.   r   r<   b   s     &.r<   c                       sb   e Zd ZdZeeed fddZdddZdej	ej	ej	eed	d
dZ
dd Zdd Z  ZS )UnivNetLvcResidualBlocka  
    Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network.

    Parameters:
        config: (`UnivNetConfig`):
            Config for the `UnivNetModel` model.
        kernel_size (`int`):
            The kernel size for the dilated 1D convolutional layer.
        dilation (`int`):
            The dilation for the dilated 1D convolutional layer.
    )r   r'   dilationc                    s\   t    |j| _|| _|| _|j| _| j| jd  d }tj| j| j| j|| jd| _	d S )Nr   r   )r!   rY   )
r#   r$   rD   hidden_channelsr'   rY   r(   r   r*   conv)r-   r   r'   rY   r!   r.   r   r   r$      s    
z UnivNetLvcResidualBlock.__init__   c                 C   s   |}t j|| j}| |}t j|| j}| j||||d}t|d d d | jd d f t	|d d | jd d d f  }|| }|S N)hop_size)
r   r2   r3   r(   r[   location_variable_convolutionr   ZsigmoidrZ   tanh)r-   r0   kernelr"   r^   r4   r   r   r   r5      s    
$zUnivNetLvcResidualBlock.forwardr   )r0   ra   r"   rY   r^   c                 C   sB  |j \}}}|j \}}}	}
}||| krBtd||  d| d|t|
d d  }tj|||fdd}|d|d|  |}||k rtj|d|fdd}|d||}|d	d	d	d	d	d	d	d	d	|f }|dd
}|d
|
d}t	d||}|j
tjd}|ddj
tjd}|| }| ||	d}|S )u  
        Performs location-variable convolution operation on the input sequence (hidden_states) using the local
        convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform
        Generation](https://arxiv.org/abs/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao.

        Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.

        Args:
            hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`):
                The input sequence of shape (batch, in_channels, in_length).
            kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`):
                The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length).
            bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`):
                The bias for the local convolution of shape (batch, out_channels, kernel_length).
            dilation (`int`, *optional*, defaults to 1):
                The dilation of convolution.
            hop_size (`int`, *optional*, defaults to 256):
                The hop_size of the conditioning sequence.
        Returns:
            `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size,
            out_channels, in_length).
        z#Dim 2 of `hidden_states` should be z
) but got zX. Please check `hidden_states` or `kernel` and `hop_size` to make sure they are correct.r   r   Zconstantr   r   Nr=   zbildsk,biokl->bolsd)Zmemory_format)rO   
ValueErrorrW   r   r2   padZunfold	transposer   ZeinsumtoZchannels_last_3d	unsqueezerQ   rP   )r-   r0   ra   r"   rY   r^   batchrB   Z	in_lengthZout_channelsr'   Zkernel_lengthr!   Zoutput_hidden_statesr   r   r   r_   	  s*    &z5UnivNetLvcResidualBlock.location_variable_convolutionc                 C   s   t j| j d S r1   )r   r6   r7   r[   r8   r   r   r   r9   J  s    z)UnivNetLvcResidualBlock.apply_weight_normc                 C   s   t j| j d S r1   )r   r6   r:   r[   r8   r   r   r   r:   M  s    z*UnivNetLvcResidualBlock.remove_weight_norm)r\   )r   r\   )r   r   r   r   r   rW   r$   r5   r   r   r_   r9   r:   r;   r   r   r.   r   rX      s"   
  ArX   c                       sP   e Zd ZdZdeeed fddZejejdddZ	d	d
 Z
dd Z  ZS )UnivNetLvcBlocka#  
    Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a
    `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers.

    Based on LVCBlock in
    [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98)

    Parameters:
        config (`UnivNetConfig`):
            Config for the `UnivNetModel` model.
        layer_id (`int`):
            An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and
            `len(config.resblock_stride_sizes) - 1)` inclusive.
        lvc_hop_size (`int`, *optional*, defaults to 256):
            The hop size for the location variable convolutional layers.
    r\   )r   layer_idlvc_hop_sizec                    s   t     j_ j| _ j| _ j| _	|_
 j_tj	_tjjjdj jjd jd  jd d_t jj_t fddtjD _d S )Nr   )strider!   Zoutput_paddingc                    s    g | ]}t  jj| qS r   )rX   r'   	dilationsrA   ir   r-   r   r   rC   ~  s     z,UnivNetLvcBlock.__init__.<locals>.<listcomp>)r#   r$   rD   rZ   resblock_kernel_sizesr'   resblock_stride_sizesrl   Zresblock_dilation_sizesrm   cond_hop_lengthr(   lenrG   r   ConvTranspose1d	convt_prer<   kernel_predictorrI   rJ   rK   )r-   r   rj   rk   r.   rp   r   r$   c  s(    
	zUnivNetLvcBlock.__init__)r0   rN   c           	   	   C   s   t j|| j}| |}| |\}}t| jD ]^\}}|d d |d d d d d d d d f }|d d |d d d d f }||||| jd}q2|S r]   )	r   r2   r3   r(   rv   rw   	enumeraterK   rs   )	r-   r0   rN   rS   rT   ro   rR   ra   r"   r   r   r   r5     s    
(zUnivNetLvcBlock.forwardc                 C   s0   t j| j | j  | jD ]}|  qd S r1   )r   r6   r7   rv   rw   r9   rK   rU   r   r   r   r9     s    

z!UnivNetLvcBlock.apply_weight_normc                 C   s0   t j| j | j  | jD ]}|  qd S r1   )r   r6   r:   rv   rw   rK   rU   r   r   r   r:     s    

z"UnivNetLvcBlock.remove_weight_norm)r\   rV   r   r   r.   r   ri   Q  s    ri   aL  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`UnivNetConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
aR  
    Converts a noise waveform and a conditioning spectrogram to a speech waveform. Passing a batch of log-mel
    spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a
    single, un-batched speech waveform.

    Args:
        input_features (`torch.FloatTensor`):
            Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
            config.num_mel_channels)`, or un-batched and of shape `(sequence_length, config.num_mel_channels)`.
        noise_sequence (`torch.FloatTensor`, *optional*):
            Tensor containing a noise sequence of standard Gaussian noise. Can be batched and of shape `(batch_size,
            sequence_length, config.model_in_channels)`, or un-batched and of shape (sequence_length,
            config.model_in_channels)`. If not supplied, will be randomly generated.
        padding_mask (`torch.BoolTensor`, *optional*):
            Mask indicating which parts of each sequence are padded. Mask values are selected in `[0, 1]`:

            - 1 for tokens that are **not masked**
            - 0 for tokens that are **masked**

            The mask can be batched and of shape `(batch_size, sequence_length)` or un-batched and of shape
            `(sequence_length,)`.
        generator (`torch.Generator`, *optional*):
            A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
            deterministic.
        return_dict:
            Whether to return a [`~utils.ModelOutput`] subclass instead of a plain tuple.
zUnivNet GAN vocoder.c                       s   e Zd ZeZdZed fddZeee	e
eddejeej eej eej ee eeej e
f ddd	Zd
d Zdd Zdd Z  ZS )UnivNetModelinput_featuresr   c                    s   t    t j| _ j| _tj j j	ddddd| _
t j}d}g  jD ]}|| }| qTt fddt|D | _tj j	ddddd| _|   d S )	N   r   r   Zreflect)r'   rl   r!   padding_modec                    s   g | ]}t  || d qS ))rj   rk   )ri   rn   r   Zhop_lengthsr   r   rC     s   z)UnivNetModel.__init__.<locals>.<listcomp>)r!   r|   )r#   r$   rt   rq   Znum_kernelsr(   r   r*   r%   rD   conv_prerr   appendrI   rJ   rK   	conv_postZ	post_init)r-   r   Z
num_layersZ
hop_lengthrl   r.   r}   r   r$     s0    


zUnivNetModel.__init__)output_typeconfig_classN)rz   noise_sequencepadding_mask	generatorreturn_dictreturnc                 C   s  |dk	r|n| j j}| dk}|s.|d}|j\}}}	|dk	r^| dk}
|
s|d}n$||| j jf}tj|||j|j	d}|jd }|dkr|dkr|
|dd}n|dkr|dkr|
|dd}||krtd| d| d|dk	r4| dkr
|d}|jd }||kr4td	| d| d|d
d}|d
d}| |}| jD ]}|||}q\tj|| j}| |}t|}|d}d}|dk	rtj|dd}|s||f}|S t||dS )a  
        Returns:

        Example:

         ```python
         >>> from transformers import UnivNetFeatureExtractor, UnivNetModel
         >>> from datasets import load_dataset, Audio

         >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev")
         >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev")

         >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
         >>> # Resample the audio to the feature extractor's sampling rate.
         >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))
         >>> inputs = feature_extractor(
         ...     ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
         ... )
         >>> audio = model(**inputs).waveforms
         >>> list(audio.shape)
         [1, 140288]
         ```
        Nr   r   )r   dtypedevicer   z&The batch size of `noise_sequence` is z+ and the batch size of `input_features` is z', but the two are expected to be equal.z$The batch size of `padding_mask` is r   )dim)r   r   )r   Zuse_return_dictr   rg   rO   r%   r   Zrandnr   r   repeatrc   re   r~   rK   r   r2   r3   r(   r   r`   Zsqueezesumr   )r-   rz   r   r   r   r   Zspectrogram_batchedZspectrogram_batch_sizeZspectrogram_lengthrB   Znoise_sequence_batchedZnoise_sequence_shapeZnoise_sequence_batch_sizeZpadding_mask_batch_sizer0   rR   Zwaveformr   outputsr   r   r   r5     sh    !
   










zUnivNetModel.forwardc                 C   sF   t |tjtjtjfrB|jjjd| jj	d |j
dk	rB|j
j  dS )zInitialize the weights.g        )ZmeanZstdN)
isinstancer   ZLinearr*   ru   ZweightdataZnormal_r   Zinitializer_ranger"   Zzero_)r-   moduler   r   r   _init_weightsf  s    
zUnivNetModel._init_weightsc                 C   s4   t j| j | jD ]}|  qt j| j d S r1   )r   r6   r7   r~   rK   r9   r   rU   r   r   r   r9   m  s    

zUnivNetModel.apply_weight_normc                 C   s4   t j| j | jD ]}|  qt j| j d S r1   )r   r6   r:   r~   rK   r   rU   r   r   r   r:   s  s    

zUnivNetModel.remove_weight_norm)NNNN)r   r   r   r   r   Zmain_input_namer$   r   UNIVNET_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr   r   r   	Generatorboolr   r   r5   r   r9   r:   r;   r   r   r.   r   ry     s(   '
    jry   )"r   Zdataclassesr   typingr   r   r   r   Ztorch.utils.checkpointr   Zmodeling_utilsr   r	   r6   r
   r   r   r   Zconfiguration_univnetr   Z
get_loggerr   loggerr   Z_CHECKPOINT_FOR_DOCr   Moduler   r<   rX   ri   ZUNIVNET_START_DOCSTRINGr   ry   r   r   r   r   <module>   s0   
-t{L