U
    5Afl%                     @   s   d dl mZmZmZmZ ddlmZmZmZm	Z	m
Z
 ddlmZmZ e rbd dlmZ ddlmZ e rd dlZd d	lmZ dd
lmZ e	eZeeddG dd deZdS )    )AnyDictListUnion   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )ChunkPipelinebuild_pipeline_init_args)Image)
load_imageN)BaseModelOutput)2MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMEST)Zhas_image_processorc                       s   e Zd ZdZ fddZdeedeeee	f  f eeee f d fddZ
d	d
 ZdddZdd ZdddZdeeef dddZ  ZS )ZeroShotObjectDetectionPipelinea  
    Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of
    objects when you provide an image and a set of `candidate_labels`.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
    >>> detector(
    ...     "http://images.cocodataset.org/val2017/000000039769.jpg",
    ...     candidate_labels=["cat", "couch"],
    ... )
    [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]

    >>> detector(
    ...     "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png",
    ...     candidate_labels=["head", "bird"],
    ... )
    [{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"zero-shot-object-detection"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection).
    c                    sB   t  jf | | jdkr*td| j dt| d | t d S )NtfzThe z is only available in PyTorch.Zvision)super__init__	framework
ValueError	__class__r   Zcheck_model_typer   )selfkwargsr    U/tmp/pip-unpacked-wheel-zw5xktn0/transformers/pipelines/zero_shot_object_detection.pyr   8   s
    

z(ZeroShotObjectDetectionPipeline.__init__NzImage.Imageimagecandidate_labelsc                    sF   d|kr| d}t|ttjfr.||d}n|}t j|f|}|S )a|  
        Detect objects (bounding boxes & classes) in the image(s) passed as inputs.

        Args:
            image (`str`, `PIL.Image` or `List[Dict[str, Any]]`):
                The pipeline handles three types of images:

                - A string containing an http url pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                You can use this parameter to send directly a list of images, or a dataset or a generator like so:

                ```python
                >>> from transformers import pipeline

                >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
                >>> detector(
                ...     [
                ...         {
                ...             "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
                ...             "candidate_labels": ["cat", "couch"],
                ...         },
                ...         {
                ...             "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
                ...             "candidate_labels": ["cat", "couch"],
                ...         },
                ...     ]
                ... )
                [[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]
                ```


            candidate_labels (`str` or `List[str]` or `List[List[str]]`):
                What the model should recognize in the image.

            threshold (`float`, *optional*, defaults to 0.1):
                The probability necessary to make a prediction.

            top_k (`int`, *optional*, defaults to None):
                The number of top predictions that will be returned by the pipeline. If the provided number is `None`
                or higher than the number of predictions available, it will default to the number of predictions.

            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
                the call may block forever.


        Return:
            A list of lists containing prediction results, one list per input image. Each list contains dictionaries
            with the following keys:

            - **label** (`str`) -- Text query corresponding to the found object.
            - **score** (`float`) -- Score corresponding to the object (between 0 and 1).
            - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a
              dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys.
        Ztext_queriesr   )pop
isinstancestrr   r   __call__)r   r    r!   r   inputsresultsr   r   r   r%   A   s    ?
z(ZeroShotObjectDetectionPipeline.__call__c                 K   sN   i }d|kr|d |d< i }d|kr0|d |d< d|krD|d |d< |i |fS )Ntimeout	thresholdtop_kr   )r   r   Zpreprocess_paramsZpostprocess_paramsr   r   r   _sanitize_parameters   s    z4ZeroShotObjectDetectionPipeline._sanitize_parametersc           
      c   s   t |d |d}|d }t|tr,|d}tj|j|jggtjd}t	|D ]^\}}| j
|| jd}| j|| jd}	| jdkr|	| j}	|t|d k||d	||	V  qNd S )
Nr    )r(   r!   ,)Zdtype)Zreturn_tensorsptr   )is_lasttarget_sizecandidate_label)r   r#   r$   splittorchZtensorheightwidthZint32	enumerate	tokenizerr   image_processortoZtorch_dtypelen)
r   r&   r(   r    r!   r/   ir0   Ztext_inputsZimage_featuresr   r   r   
preprocess   s"    


z*ZeroShotObjectDetectionPipeline.preprocessc                 C   s>   | d}| d}| d}| jf |}|||d|}|S )Nr/   r0   r.   )r/   r0   r.   )r"   model)r   Zmodel_inputsr/   r0   r.   outputsmodel_outputsr   r   r   _forward   s    


z(ZeroShotObjectDetectionPipeline._forward皙?c                 C   s   g }|D ]|}|d }t |}| jj|||d dd }|d  D ]@}|d |  }	| |d | d }
|	||
d}|| qBqt|dd	 d
d}|r|d | }|S )Nr0   r/   )r=   r)   Ztarget_sizesr   ZscoresZboxes)scorelabelboxc                 S   s   | d S )NrA   r   )xr   r   r   <lambda>       z=ZeroShotObjectDetectionPipeline.postprocess.<locals>.<lambda>T)keyreverse)r   r7   Zpost_process_object_detectionZnonzeroitem_get_bounding_boxappendsorted)r   r>   r)   r*   r'   Zmodel_outputrB   r=   indexrA   rC   resultr   r   r   postprocess   s(      z+ZeroShotObjectDetectionPipeline.postprocessztorch.Tensor)rC   returnc                 C   s8   | j dkrtd|  \}}}}||||d}|S )a%  
        Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }

        Args:
            box (`torch.Tensor`): Tensor containing the coordinates in corners format.

        Returns:
            bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
        r-   zAThe ZeroShotObjectDetectionPipeline is only available in PyTorch.)xminyminxmaxymax)r   r   inttolist)r   rC   rQ   rR   rS   rT   Zbboxr   r   r   rJ      s    

z1ZeroShotObjectDetectionPipeline._get_bounding_box)N)N)r@   N)__name__
__module____qualname____doc__r   r   r$   r   r   r   r%   r+   r;   r?   rO   rU   rJ   __classcell__r   r   r   r   r      s     I


r   )typingr   r   r   r   utilsr   r   r	   r
   r   baser   r   ZPILr   Zimage_utilsr   r2   Ztransformers.modeling_outputsr   Zmodels.auto.modeling_autor   Z
get_loggerrW   loggerr   r   r   r   r   <module>   s   
