U
    4Af                     @   s   U d Z ddlZddlZddlmZ ddlmZmZmZm	Z	m
Z
mZmZ ddlZddlmZmZmZ ddlmZmZ ddlmZmZmZmZmZmZmZmZmZm Z m!Z! dd	l"m#Z#m$Z$m%Z%m&Z& e% rddl'Z'e&(e)Z*eZ+eZ,d
d
d
gZ-dddgZ.dZ/e0e1d< G dd dZ2G dd deZ3dS )z Image processor class for Flava.    N)	lru_cache)AnyDictIterableListOptionalTupleUnion   )BaseImageProcessorBatchFeatureget_size_dict)resizeto_channel_dimension_format)OPENAI_CLIP_MEANOPENAI_CLIP_STDChannelDimension
ImageInputPILImageResamplinginfer_channel_dimension_formatis_scaled_imagemake_list_of_imagesto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)
TensorTypefilter_out_non_signature_kwargsis_vision_availableloggingg        g      ?g?LOGIT_LAPLACE_EPSc                   @   s^   e Zd Zdeeeeef f eee eee edddZd	d
 Z	dd Z
dd Zdd ZdS )FlavaMaskingGenerator   K   N   333333?)
input_sizetotal_mask_patchesmask_group_max_patchesmask_group_min_patchesmask_group_min_aspect_ratiomask_group_max_aspect_ratioc                 C   sr   t |ts|fd }|\| _| _| j| j | _|| _|| _|d krF|n|| _|pVd| }t	|t	|f| _
d S )N      )
isinstancetupleheightwidthZnum_patchesr&   r(   r'   mathloglog_aspect_ratio)selfr%   r&   r'   r(   r)   r*    r5   T/tmp/pip-unpacked-wheel-zw5xktn0/transformers/models/flava/image_processing_flava.py__init__;   s    	

zFlavaMaskingGenerator.__init__c              	   C   s0   d| j | j| j| j| j| jd | jd f }|S )Nz<MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)r   r,   )r/   r0   r(   r'   r&   r3   )r4   repr_strr5   r5   r6   __repr__Q   s    	zFlavaMaskingGenerator.__repr__c                 C   s   | j | jfS )Nr/   r0   )r4   r5   r5   r6   	get_shape]   s    zFlavaMaskingGenerator.get_shapec                 C   s8  d}t dD ]$}t| j|}ttj| j }ttt	|| }ttt	|| }|| j
k r|| jk rtd| j| }	td| j
| }
||	|	| |
|
| f  }d|| |   k r|kr$n nLt |	|	| D ]<}t |
|
| D ](}|||f dkrd|||f< |d7 }qq|dkr q4q|S )Nr   
   r,   )rangerandomuniformr(   r1   expr3   introundsqrtr0   r/   randintsum)r4   maskmax_mask_patchesdeltaZ_attemptZtarget_areaZaspect_ratior/   r0   topleftZ
num_maskedijr5   r5   r6   _mask`   s&     "zFlavaMaskingGenerator._maskc                 C   s\   t j|  td}d}|| jk rX| j| }t|| j}| ||}|dkrNqXq||7 }q|S )N)shapeZdtyper   )npzerosr;   rA   r&   minr'   rM   )r4   rF   Z
mask_countrG   rH   r5   r5   r6   __call__x   s    


zFlavaMaskingGenerator.__call__)r!   r"   Nr#   r$   N)__name__
__module____qualname__r	   rA   r   r   floatr7   r9   r;   rM   rR   r5   r5   r5   r6   r    :   s$         r    c                &       s~  e Zd ZdZdgZddejddddddddddd	dd
ddddejddddddddfee	e
ef eee	e
ef eeeef eeeeee f  eeeee f  eeeeee eee eeeeeeeeeef eeeeeee f  eeeee f  dd fddZee	e
ef d fddZe edddZejddfeje	e
ef eeee
ef  eee
ef  ejdddZejejdddZdddddddddddejdfeee	e
ef eee	e
ef eeeeeeee f  eeeee f  eee ee ejdddZe  ddddddddddddddddddddddddddddddejdf eee e	e
ef eee ee	e
ef  ee ee ee eeeee f  eeeee f  ee ee ee ee ee ee ee ee ee ee	e
ef  ee ee ee	e
ef  ee ee ee ee eee  eee  eee
e!f  eeee
ef  e"j#j#d"ddZ$  Z%S ) FlavaImageProcessora  
    Constructs a Flava image processor.

    Args:
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in `preprocess`.
        size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
            Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
        resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
            Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
            `preprocess`.
        do_center_crop (`bool`, *optional*, defaults to `True`):
            Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
        crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
            Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
            `crop_size` parameter in `preprocess`.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
            parameter in `preprocess`.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
            `preprocess`.
        do_normalize (`bool`, *optional*, defaults to `True`):
            Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
        image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
            Mean to use if normalizing the image. This is a float or list of floats the length of the number of
            channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
            Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
            number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
        return_image_mask (`bool`, *optional*, defaults to `False`):
            Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
        input_size_patches (`int`, *optional*, defaults to 14):
            Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
            by the `input_size_patches` parameter in `preprocess`.
        total_mask_patches (`int`, *optional*, defaults to 75):
            Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
            `preprocess`.
        mask_group_min_patches (`int`, *optional*, defaults to 16):
            Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
            parameter in `preprocess`.
        mask_group_max_patches (`int`, *optional*):
            Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
            parameter in `preprocess`.
        mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
            Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
            in `preprocess`.
        mask_group_max_aspect_ratio (`float`, *optional*):
            Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
            in `preprocess`.
        codebook_do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
            parameter in `preprocess`. `codebook_size`.
        codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
            Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
            `preprocess`.
        codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
            Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
            parameter in `preprocess`.
        codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
            Whether to crop the input for codebook at the center. If the input size is smaller than
            `codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
            overridden by the `codebook_do_center_crop` parameter in `preprocess`.
        codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
            Desired output size for codebook input when applying center-cropping. Can be overridden by the
            `codebook_crop_size` parameter in `preprocess`.
        codebook_do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
            overridden by the `codebook_do_rescale` parameter in `preprocess`.
        codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
            `codebook_rescale_factor` parameter in `preprocess`.
        codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
            Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
            `codebook_do_map_pixels` parameter in `preprocess`.
        codebook_do_normalize (`bool`, *optional*, defaults to `True`):
            Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
            be overridden by the `codebook_do_normalize` parameter in `preprocess`.
        codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
            The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
            by the `codebook_image_mean` parameter in `preprocess`.
        codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
            The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
            be overridden by the `codebook_image_std` parameter in `preprocess`.
    pixel_valuesTNgp?Fr!   r"   r#   r$   )	do_resizesizeresampledo_center_crop	crop_size
do_rescalerescale_factordo_normalize
image_mean	image_stdreturn_image_maskinput_size_patchesr&   r(   r'   r)   r*   return_codebook_pixelscodebook_do_resizecodebook_sizecodebook_resamplecodebook_do_center_cropcodebook_crop_sizecodebook_do_rescalecodebook_rescale_factorcodebook_do_map_pixelscodebook_do_normalizecodebook_image_meancodebook_image_stdreturnc                    s~  t  jf | |d k	r|nddd}t|}|d k	r8|nddd}t|dd}|d k	rZ|nddd}t|dd}|d k	r||nddd}t|dd}|| _|| _|| _|| _|| _|| _|| _	|| _
|	d k	r|	nt| _|
d k	r|
nt| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _ |d k	r`|nt!| _ |d k	rt|nt"| _#d S )N   r:   r]   
param_namep   rg   rj   )$superr7   r   rY   rZ   r[   r^   r_   r\   r]   r`   FLAVA_IMAGE_MEANra   FLAVA_IMAGE_STDrb   rc   rd   r&   r(   r'   r)   r*   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   FLAVA_CODEBOOK_MEANFLAVA_CODEBOOK_STDrp   )r4   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   r&   r(   r'   r)   r*   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   kwargs	__class__r5   r6   r7      sN    #zFlavaImageProcessor.__init__)image_processor_dictc                    sD   |  }d|kr|d|d< d|kr4|d|d< t j|f|S )z
        Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
        created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
        rg   rj   )copypoprv   	from_dict)clsr~   r{   r|   r5   r6   r   1  s    zFlavaImageProcessor.from_dict)rq   c                 C   s   t ||||||dS )N)r%   r&   r(   r'   r)   r*   )r    )r4   rd   r&   r(   r'   r)   r*   r5   r5   r6   masking_generator>  s    
z%FlavaImageProcessor.masking_generator)imagerZ   r[   data_formatinput_data_formatrq   c                 K   sT   t |}d|ksd|kr*td|  |d |d f}t|f||||d|S )a  
        Resize an image to `(size["height"], size["width"])`.

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`Dict[str, int]`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
                `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
            data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the output image. If unset, the channel dimension format of the input
                image is used. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.

        Returns:
            `np.ndarray`: The resized image.
        r/   r0   zFThe `size` dictionary must contain the keys `height` and `width`. Got )rZ   r[   r   r   )r   
ValueErrorkeysr   )r4   r   rZ   r[   r   r   r{   Zoutput_sizer5   r5   r6   r   R  s    #zFlavaImageProcessor.resize)r   rq   c                 C   s   ddt   | t  S )Nr,   r+   )r   )r4   r   r5   r5   r6   
map_pixels  s    zFlavaImageProcessor.map_pixels)r   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   do_map_pixelsr   r   rq   c                 C   s   t |||	|
||||||d
 t|}t|r:|r:td |dkrJt|}|r`| j||||d}|rt| j|||d}|r| j|||d}|	r| j	||
||d}|r| 
|}|dk	rt|||d}|S )	zPreprocesses a single image.)
r^   r_   r`   ra   rb   r\   r]   rY   rZ   r[   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.N)r   rZ   r[   r   )r   rZ   r   )r   Zscaler   )r   ZmeanZstdr   )Zinput_channel_dim)r   r   r   loggerZwarning_oncer   r   Zcenter_cropZrescale	normalizer   r   )r4   r   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   r   r   r   r5   r5   r6   _preprocess_image  s@    
z%FlavaImageProcessor._preprocess_image)"imagesrY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   r&   r(   r'   r)   r*   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   return_tensorsr   r   rq   c"           &         s  dk	rnj dk	rnjtdk	r8njdk	rJnjdk	r\njtdddk	rznjdk	rnjdk	rnjdk	rnj	dk	rnj
|dk	r|nj}|dk	r|nj}|dk	r|nj}|dk	r|nj}|dk	r |nj}|dk	r4|nj}|dk	rH|nj}|dk	r\|nj}dk	rpnj
dk	r
nj
t
dd
dk	rnjdk	rnj	dk	r̈	nj	dk	rnj dk	r nj t dd dk	rnjdk	r(njdk	r<njdk	rPnjt|}t|spt dfdd|D }"d	|"i}#|r܇ 	
fd
d|D }$|$|#d< |rj!||||||dfdd|D }%|%|#d< t"|#|dS )a  
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the image.
            resample (`int`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
                has an effect if `do_resize` is set to `True`.
            do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
                Whether to center crop the image.
            crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
                Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image values between [0 - 1].
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Image mean.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation.
            return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
                Whether to return the image mask.
            input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
                Size of the patches to extract from the image.
            total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
                Total number of patches to extract from the image.
            mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
                Minimum number of patches to extract from the image.
            mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
                Maximum number of patches to extract from the image.
            mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
                Minimum aspect ratio of the patches to extract from the image.
            mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
                Maximum aspect ratio of the patches to extract from the image.
            return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
                Whether to return the codebook pixels.
            codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
                Whether to resize the codebook pixels.
            codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
                Size of the codebook pixels.
            codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
                Resampling filter to use if resizing the codebook pixels. This can be one of the enum
                `PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
            codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
                Whether to center crop the codebook pixels.
            codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
                Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
                to `True`.
            codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
                Whether to rescale the codebook pixels values between [0 - 1].
            codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
                Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
            codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
                Whether to map the codebook pixels values.
            codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
                Whether to normalize the codebook pixels.
            codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
                Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
            codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
                Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
                set to `True`.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                    - Unset: Return a list of `np.ndarray`.
                    - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                    - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                    - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                    - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                    - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                    - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        Nr]   rs   rg   rj   zkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.c                    s2   g | ]*}j |	 
d dqS )Fr   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   r   r   r   r   .0img)r]   r   r\   r`   r^   rY   ra   rb   r   r[   r_   r4   rZ   r5   r6   
<listcomp>  s"   z2FlavaImageProcessor.preprocess.<locals>.<listcomp>rX   c                    s2   g | ]*}j |
 	d qS )r   r   r   )rj   ri   rm   rn   rk   rf   ro   rp   rh   rl   rg   r   r   r4   r5   r6   r     s"   Zcodebook_pixel_values)rd   r&   r(   r'   r)   r*   c                    s   g | ]
}  qS r5   r5   )r   _)mask_generatorr5   r6   r     s     Zbool_masked_pos)dataZtensor_type)#rY   rZ   r   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   r&   r(   r'   r)   r*   re   rf   rg   rh   rk   rl   ri   rj   rm   rn   ro   rp   r   r   r   r   r   )&r4   r   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   r&   r(   r'   r)   r*   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   r   r   r   Zprocessed_imagesr   Zcodebook_imagesmasksr5   )rj   ri   rm   rn   rk   rf   ro   rp   rh   rl   rg   r]   r   r\   r`   r^   rY   ra   rb   r   r   r[   r_   r4   rZ   r6   
preprocess  s    ~
"$zFlavaImageProcessor.preprocess)&rS   rT   rU   __doc__Zmodel_input_namesr   ZBICUBICZLANCZOSboolr   strrA   r	   rV   r   r   r7   classmethodr   r   r   r    r   rO   Zndarrayr   r   r   ZFIRSTr   r   r   r   r   PILZImager   __classcell__r5   r5   r|   r6   rW      sf  W



O	
0

@


rW   )4r   r1   r>   	functoolsr   typingr   r   r   r   r   r   r	   ZnumpyrO   Zimage_processing_utilsr   r   r   Zimage_transformsr   r   Zimage_utilsr   r   r   r   r   r   r   r   r   r   r   utilsr   r   r   r   r   Z
get_loggerrS   r   rw   rx   ry   rz   r   rV   __annotations__r    rW   r5   r5   r5   r6   <module>   s&   $4


N