U
    4Af                     @   s   d dl mZmZ d dlmZ ddlmZmZmZm	Z	m
Z
mZ ddlmZ e rVd dlZe rnd dlm  mZ e
eZeG dd	 d	eZdS )
    )	dataclassfield)Tuple   )cached_propertyis_torch_availableis_torch_xla_availableis_torch_xpu_availableloggingrequires_backends   )BenchmarkArgumentsNc                       s   e Zd ZU dddddddgZ fdd	Zed
ddidZeed< ed
ddidZ	eed< edddidZ
eed< eedef dddZedd ZeedddZeddddZedd Zed d! Z  ZS )"PyTorchBenchmarkArgumentsZno_inferenceZno_cudaZno_tpuZno_speedZ	no_memoryZno_env_printZno_multi_processc                    s   | j D ]P}||kr|dd }t| |||  t| d| d| d||   q|d| j| _|d| j| _|d| j| _t j	f | dS )	z
        This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
        deleted
           Nz! is depreciated. Please use --no_z or =torchscripttorch_xla_tpu_print_metricsfp16_opt_level)
deprecated_argssetattrpoploggerwarningr   r   r   super__init__)selfkwargsZdeprecated_argZpositive_arg	__class__ I/tmp/pip-unpacked-wheel-zw5xktn0/transformers/benchmark/benchmark_args.pyr   5   s    
z"PyTorchBenchmarkArguments.__init__Fhelpz"Trace the models using torchscript)defaultmetadatar   zPrint Xla/PyTorch tpu metricsr   ZO1zFor fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details at https://nvidia.github.io/apex/amp.htmlr   ztorch.device)returnc                 C   s   t | dg td | js,td}d}nRt r@t }d}n>t	 r\td}tj
 }n"ttj rndnd}tj }||fS )NtorchzPyTorch: setting up devicescpur   xpucuda)r   r   infor(   r%   devicer   xmZ
xla_devicer	   r'   Zdevice_countZis_available)r   r*   n_gpur   r   r    _setup_devicesT   s    



z(PyTorchBenchmarkArguments._setup_devicesc                 C   s   t  o
| jS )N)r   Ztpur   r   r   r    is_tpuf   s    z PyTorchBenchmarkArguments.is_tpuc                 C   s   t | dg tj S )Nr%   )r   r%   r(   Zcurrent_devicer.   r   r   r    
device_idxj   s    z$PyTorchBenchmarkArguments.device_idxc                 C   s   t | dg | jd S )Nr%   r   r   r-   r.   r   r   r    r*   p   s    z PyTorchBenchmarkArguments.devicec                 C   s   t | dg | jd S )Nr%   r   r1   r.   r   r   r    r,   u   s    zPyTorchBenchmarkArguments.n_gpuc                 C   s
   | j dkS )Nr   )r,   r.   r   r   r    is_gpuz   s    z PyTorchBenchmarkArguments.is_gpu)__name__
__module____qualname__r   r   r   r   bool__annotations__r   r   strr   r   intr-   propertyr/   r0   r*   r,   r2   __classcell__r   r   r   r    r   )   s:   




r   )Zdataclassesr   r   typingr   utilsr   r   r   r	   r
   r   Zbenchmark_args_utilsr   r%   Ztorch_xla.core.xla_modelcoreZ	xla_modelr+   Z
get_loggerr3   r   r   r   r   r   r    <module>   s    
