
    shp              	          S SK Jr  S SKJrJr  S SKrS SKJrJrJ	r	  S SK
JrJrJr  S SKJr  SSKJr  / S	QrS
 r " S S\5      rS\	S\\   S\\\	4   S\\   4S jrS\S\\\4   4S jr " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r  " S  S!\5      r! " S" S#\5      r" " S$ S%\5      r# " S& S'\5      r$ " S( S)\5      r% " S* S+\5      r& " S, S-\5      r'g).    )ABC)CallableOptionalN)BackendConfigDTypeConfigObservationType)NodePatternPatternQuantizerCls)Node   )all_node_args_have_no_tensors)QuantizeHandlerBinaryOpQuantizeHandlerCatQuantizeHandlerConvReluQuantizeHandlerLinearReLUQuantizeHandlerBatchNormQuantizeHandlerEmbeddingQuantizeHandlerRNNDynamicQuantizeHandlerDefaultNodeQuantizeHandlerFixedQParamsOpQuantizeHandlerCopyNodeQuantizeHandler#GeneralTensorShapeOpQuantizeHandlerCustomModuleQuantizeHandlerStandaloneModuleQuantizeHandlerc                 r    U c  U $ [        U [        5      (       d  U S   n [        U [        5      (       d  M  U $ )N)
isinstancer   )node_patterns    }/Users/tiagomarins/Projetos/claudeai/copy_bank/venv/lib/python3.13/site-packages/torch/ao/quantization/fx/quantize_handler.py_default_root_node_getterr"   #   s:    t,,#B' t,,    c                       \ rS rSrSr   SS\S\\\R                  R                  4   S\\   4S jjrS\4S	 jrS
 rS rSrg)r   ,   z-Base handler class for the quantizer patternsNr    modulesroot_node_getterc                    Xl         X l        Uc  [        nU" U5      U l        X@l        XPl        SU l        [        U R                  [        5      (       a  0 n[        [        U R                  R                  5      5       He  nU R                  R                  U   n[        U[        5      (       d  M3  [        XR                  U5      (       a  MP  U =R                  S-  sl        Mg     gg)zGRecords pattern information in __init__, which will be used
in convert
Nr   r   )r    r&   r"   	root_nodeis_custom_module_is_standalone_module_num_tensor_argsr   r   rangelenargsr   )	selfr    r&   r'   is_custom_moduleis_standalone_modulecache_for_no_tensor_checkarg_idxargs	            r!   __init__QuantizeHandler.__init__/   s     )#8),7!1%9"  dnnd++:<% T^^%8%8!9:nn))'2c4((5\\+D  ((A-( ; ,r#   returnc                     g)a|  
Returns True if the operator works for both floating point and
quantized input, and does some computation based on the input Tensor,
or the ops that only re-arranges the Tensor values or query some metadata
about the Tensor
so we need to insert observer/fake_quant for the output of the
operator (same observer instance as input)
since the distribution of values is different for input and output
Tensors (for HistogramObserver) while they share the same quantization
parameters
Example operator: avgpool2d, reshape, transpose, maxpool2d
Example observed operator:
observer_0 - avgpool2d - observer_0 (same observer instance as input)
F r0   s    r!   is_general_tensor_value_op*QuantizeHandler.is_general_tensor_value_opO   s     r#   c                     U R                   $ N)r*   r;   s    r!   r1    QuantizeHandler.is_custom_module`   s    %%%r#   c                     U R                   $ r?   )r+   r;   s    r!   r2   $QuantizeHandler.is_standalone_modulec   s    )))r#   )r*   r+   r&   r    r,   r)   )NFF)__name__
__module____qualname____firstlineno____doc__r	   dictstrtorchnnModuler   r   r6   boolr<   r1   r2   __static_attributes__r:   r#   r!   r   r   ,   s_    7 04".!. c588??*+. #8,	.@D "&*r#   r   observation_typedtype_configs#num_tensor_args_to_observation_typer8   c                 4   ^ ^^  " UUU 4S jS[         5      nU$ )z_
Return a configurable QuantizeHandler that matches the given specifications from the backend.
c                      >^  \ rS rSr S	S\S\\\R                  R                  4   S\
\   4U UUU4S jjjrS\4S jrSrU =r$ )
>_get_quantize_handler_cls.<locals>.ConfigurableQuantizeHandlerp   r    r&   r'   c                    > [         TU ]  XU5        T(       a<  U R                  T;   d   SU R                   SU 35       eTU R                     U l        OTU l        TU l        g )Nz7Must provide observation_type config for tensor number z, in num_tensor_args_to_observation_type for )superr6   r,   rO   rP   )r0   r    r&   r'   	__class__rP   rQ   rO   s       r!   r6   G_get_quantize_handler_cls.<locals>.ConfigurableQuantizeHandler.__init__q   sz     G\4DE2++/RR MdNbNbMcB<.RR )L(()% )9%!.Dr#   r8   c                 <    U R                   [        R                  :H  $ r?   )rO   r    OUTPUT_SHARE_OBSERVER_WITH_INPUTr;   s    r!   r<   Y_get_quantize_handler_cls.<locals>.ConfigurableQuantizeHandler.is_general_tensor_value_op   s    %%"CCDr#   )rP   rO   r?   )rC   rD   rE   rF   r	   rH   rI   rJ   rK   rL   r   r   r6   rM   r<   rN   __classcell__)rX   rP   rQ   rO   s   @r!   ConfigurableQuantizeHandlerrT   p   sV    
 48		/%	/ #uxx./	/ 'x0		/ 	/&	 	 	r#   r^   )r   )rO   rP   rQ   r^   s   ``` r!   _get_quantize_handler_clsr_   g   s     o 4 '&r#   backend_configc                     0 nU R                   R                  5        H7  u  p#UR                  nUR                  nUR                  n[        XEU5      X'   M9     U$ )a  
Note: Quantize handler is just a holder for some check methods like
(should_insert_observer_for_output), maybe this can be a enum as well,
we can refactor this after we convert the path for fbgemm/qnnpack fully to the
new path, this is not exposed to backend developers
)!_pattern_complex_format_to_configitemsrO   rP   $_num_tensor_args_to_observation_typer_   )r`   pattern_to_quantize_handlerspatternconfigrO   rP   rQ   s          r!   !_get_pattern_to_quantize_handlersrh      sj     $& )KKQQS!22,,77 	, 1J-P1
$- T ('r#   c                       \ rS rSrSrg)r      r:   NrC   rD   rE   rF   rN   r:   r#   r!   r   r          r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrSrg)r      zCCommon quantized op, first input and first output will be quantizedr:   N)rC   rD   rE   rF   rG   rN   r:   r#   r!   r   r      s    Mr#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   c                       \ rS rSrSrg)r      r:   Nrk   r:   r#   r!   r   r      rl   r#   r   )(abcr   typingr   r   rJ   $torch.ao.quantization.backend_configr   r   r   torch.ao.quantization.utilsr	   r
   r   torch.fx.graphr   utilsr   __all__r"   r   listrH   inttyper_   rh   r   r   r   r   r   r   r   r   r   r   r   r   r   r:   r#   r!   <module>r      s;    %  
 K J  0$8*c 8*v#'%#'$#' *.c?.B)C#' 
/	#'L(!(	'<
 (0	o 		 	
	o 	
	 	
	 	
	 	
	 	
N N
	O 	
	o 	
	/ 	
	/ 	
	o 	r#   