
    sh                        S r SSKJrJr  SSKrSSKJs  Js  Jr	  SSK
Js  Js  Js  Jr  SSKJr  SSKJs  Jr  SSKJr  SSKJr  SSKJrJrJr  SSKJr  SS	KJrJr  / S
QrSS1rS\ \!   S\ \!   4S jr" " S S\5      r# " S S\#5      r$ " S S\#5      r% " S S\#5      r& " S S\#5      r' " S S\'5      r( " S S\'5      r) " S S\'5      r*g) zQuantized convolution modules.    )ClassVarOptionalN)ops)	_size_1_t)_pair_single_triple)fuse_conv_bn_weights   )_quantize_weightWeightedQuantizedModule)Conv1dConv2dConv3dConvTranspose1dConvTranspose2dConvTranspose3dzerosreflectpaddingreturnc                    ^ ^^ / n[        T 5      m[        T5       H)  mUR                  UUU 4S j[        S5       5       5        M+     U$ )Nc              3   :   >#    U  H  nTTT-
  S -
     v   M     g7f)r   N ).0_Nidxr   s     v/Users/tiagomarins/Projetos/claudeai/copy_bank/venv/lib/python3.13/site-packages/torch/ao/nn/quantized/modules/conv.py	<genexpr>*_reverse_repeat_padding.<locals>.<genexpr>#   s     /WhC!0Dhs      )lenrangeextend)r    _reversed_padding_repeated_twicer   r   s   ` @@r   _reverse_repeat_paddingr'      s?    24$GAQx(///WeTUh/WW ++    c                   6  ^  \ rS rSr        SS jr   S SU 4S jjjrS rS rS rS r	U 4S jr
\R                  R                  S	 5       rU 4S
 jr\R                  R                  S 5       rS rS r\SS j5       r\SS j5       r\S 5       rSrU =r$ )_ConvNd'   c                     [         eNNotImplementedError)selfin_channelsout_channelskernel_sizestrider   dilationgroupsbiaspadding_modedevicedtypes               r   __init___ConvNd.__init__(   s
     "!r(   c           
      Z  > XS.n[         TU ]  5         X-  S:w  a  [        S5      eX)-  S:w  a  [        S5      eXl        X l        X0l        X@l        XPl        X`l        Xpl	        Xl
        Xl        U[        ;  a  [        SU S35      eXl        U R                  (       a  XU R                  -  /nOX!U R                  -  /n[        R                  " U[!        U5      -   4SS[        R"                  S.UR%                  5        VVs0 s H  u  nnUS	:w  d  M  UU_M     snnD6nU
(       aT  [        R&                  " U4S	[        R(                  0UR%                  5        VVs0 s H  u  nnUS	:w  d  M  UU_M     snnD6OS nU R+                  UU5        S
U l        SU l        g s  snnf s  snnf )Nr9   r:   r   z'in_channels must be divisible by groupsz(out_channels must be divisible by groupsz'padding_mode' z* is not supported by quantized convolutionr   )scale
zero_pointr:   r:   g      ?)superr;   
ValueErrorr1   r2   r3   r4   r   r5   
transposedoutput_paddingr6   _SUPPORTED_PADDINGr8   torch_empty_affine_quantizedlistqint8itemsr   floatset_weight_biasr?   r@   )r0   r1   r2   r3   r4   r   r5   rC   rD   r6   r7   r8   r9   r:   factory_kwargsweight_shapekvqweight
bias_float	__class__s                       r   _init_ConvNd._init9   s     %+;1$FGG A%GHH&(& $,11!,/YZ  )??')DEL(*DEL//4,,
++	

 !/ 4 4 6G 61!w,tq!t 6G
  KKkk %3$8$8$:K$:DAqa7l41a4$:K  	 	Wj1
 H Ls   F!F! F'1F'c                     [         er-   r.   )r0   rQ   rR   s      r   rL   _ConvNd.set_weight_biasx       !!r(   c                     [         er-   r.   r0   s    r   r7   _ConvNd.bias{   rX   r(   c                     [         er-   r.   rZ   s    r   _weight_bias_ConvNd._weight_bias~   rX   r(   c                    SnU R                   S[        U R                   5      -  :w  a  US-  nU R                  S[        U R                  5      -  :w  a  US-  nU R                  S[        U R                  5      -  :w  a  US-  nU R                  S:w  a  US-  nU R                  5       c  US	-  nUR                  " S
0 U R                  D6$ )Nzq{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}, scale={scale}, zero_point={zero_point})r   z, padding={padding})r   z, dilation={dilation}z!, output_padding={output_padding}r   z, groups={groups}z, bias=Falser   )r   r#   r5   rD   r6   r7   format__dict__)r0   ss     r   
extra_repr_ConvNd.extra_repr   s    H 	
 <<4#dll"333&&A==D3t}}#555((A$T-@-@)A"AA44A;;!$$A99;Axx($--((r(   c                    > [         TU ]  XU5        U R                  5       u  pEXAUS-   '   XQUS-   '   [        R                  " U R
                  5      XS-   '   [        R                  " U R                  5      XS-   '   g )Nweightr7   r?   r@   )rA   _save_to_state_dictr]   rF   tensorr?   r@   )r0   destinationprefix	keep_varswbrS   s         r   rg   _ConvNd._save_to_state_dict   so    #KC""$)*FX%&'(FVO$(-TZZ(@W$%-2\\$//-J\)*r(   c                 L   U R                  5       u  pU R                  U R                  U R                  U R                  U R
                  U R                  U R                  U R                  U R                  U R                  UUU R                  U R                  U R                  4$ r-   )r]   r1   r2   r3   r4   r   r5   rC   rD   r6   r8   r?   r@   trainingr0   rl   rm   s      r   __getstate___ConvNd.__getstate__   s    ""$KKLLMMOOKKJJOOMM
 	
r(   c           	      \  > U R                  XS-      XS-      5        UR                  US-   5        UR                  US-   5        [        XS-      5      U l        UR                  US-   5        [	        XS-      5      U l        UR                  US-   5        [        TU ]  UUUSUUU5        g )Nrf   r7   r?   r@   F)rL   poprK   r?   intr@   rA   _load_from_state_dict)	r0   
state_dictrj   local_metadatastrictmissing_keysunexpected_keys
error_msgsrS   s	           r   rw   _ConvNd._load_from_state_dict   s     	Z(9:JPV<WXv()v':w&678
v'(j,)>?@v,-%	
r(   c                 8   US   U l         US   U l        US   U l        US   U l        US   U l        US   U l        US   U l        US   U l        US	   U l        US
   U l	        U R                  US   US   5        US   U l        US   U l        US   U l        g )Nr   r   r"                     	   
               )r1   r2   r3   r4   r   r5   rC   rD   r6   r8   rL   r?   r@   rp   )r0   states     r   __setstate___ConvNd.__setstate__   s     8!!H 8AhQxa(#AhAh!!HU2Yb	22Y
)b	r(   c                     [        U 5      R                  [        U 5      5      n[        R                  R                  R                  U5        U R                  5       nUR                  U5        U$ r-   )type__new__rF   nnModuler;   rr   r   )r0   memonew_instancer   s       r   __deepcopy___ConvNd.__deepcopy__   sR    Dz))$t*5  .!!#!!%(r(   c                 $    U R                  0 5      $ r-   )r   rZ   s    r   __copy___ConvNd.__copy__   s      $$r(   c                    Uc  UR                   R                  5       nU" UR                  5        UR                  [        R                  :X  d   S5       e[        UR                  R                  5       U5      nU " UR                  UR                  UR                  UR                  UR                  UR                  UR                  UR                  SLUR                  5	      nUR!                  XAR                  5        Ub  UR                  [        R                  :X  a  U$ UR#                  5       u  pg[        U5      Ul        ['        U5      Ul        U$ )z&Creates a qconv object and returns it.N*Weight observer must have a dtype of qint8)qconfigrf   r:   rF   rI   r   rK   r1   r2   r3   r4   r   r5   r6   r7   r8   rL   calculate_qparamsr?   rv   r@   )clsmodactivation_post_processweight_post_processrQ   qconv	act_scaleact_zps           r   	get_qconv_ConvNd.get_qconv   s    &"%++"4"4"6CJJ'%%4	87	84"3::#3#3#57JKOOOOJJKKLLJJHHD 

 	gxx0#+&,,;L 7 I I KI	*EK"6{ELr(   c           	         [        US5      (       a  [        U5      U R                  :X  a  [        UR                  UR
                  UR                  R                  UR                  R                  UR                  R                  UR                  R                  UR                  R
                  5      u  Ul        Ul        [        US5      (       d   S5       eUR                  nUR                  nO[        U5      U R                  :X  dF   SU R                  -   S-   U R                  R                  -   S-   [        [        U5      5      -   5       e[        US5      (       d   S5       e[        US5      (       d  S OUR                  n[        U5      U R                  U R                   U R"                  4;   a  US	   nUR$                  R	                  5       nU R'                  XU5      $ )
Nweight_fake_quantr   z,Input QAT module must have observer attached nnq..from_float only works for z	 but got:r   -Input float module must have qconfig defined.r   )hasattrr   _NNIQAT_CONV_BN_MODULEr
   rf   r7   bnrunning_meanrunning_varepsr   r   _FLOAT_MODULE__name__str_NNI_CONV_RELU_MODULE_NNI_CONV_ADD_MODULE_NNI_CONV_ADD_RELU_MODULEr   r   )r   r   use_precomputed_fake_quantr   r   s        r   
from_float_ConvNd.from_float  s   3+,, CyC666';JJHHFF''FF&&FFJJFFMMFFKK($
CH .  >=>  #&"7"7&)&A&A#9 1 11 ,,/0 ##,,- 	
 d3i.!1 Y  ?>? 
 s$=>> 00 $
 Cy))((-- 
 !f"%++"4"4"6}}S;NOOr(   c                    U " UR                   UR                  UR                  UR                  UR                  UR
                  UR                  UR                  SLUR                  UR                  R                  UR                  R                  S9nUR                  5       nUR                  XQR                  5        [        U5      Ul        [!        U5      Ul        U$ )aY  Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
    ref_qconv (Module): a reference quantized  module, either produced by torch.ao.quantization
                        utilities or provided by the user
    output_scale (float): scale for output Tensor
    output_zero_point (int): zero point for output Tensor
Nr>   )r1   r2   r3   r4   r   r5   r6   r7   r8   rf   r9   r:   get_quantized_weightrL   rK   r?   rv   r@   )r   	ref_qconvoutput_scaleoutput_zero_pointr   rQ   s         r   from_reference_ConvNd.from_referenceD  s     !!""!!NN$&""##**""((
 002g~~6L)01r(   )r5   r6   r1   r3   r2   rD   r   r8   r?   r4   rp   rC   r@   r   r   r   r   Tr   NN)r   NN)r   Nr-   F)r   
__module____qualname____firstlineno__r;   rT   rL   r7   r]   rc   rg   rF   jitexportrr   rw   r   r   r   classmethodr   staticmethodr   r   __static_attributes____classcell__rS   s   @r   r*   r*   '   s     ": = 
= =~"""):K YY
 
.
6 YY" " %  B +P +PZ  r(   r*   c                     ^  \ rS rSr% Sr\R                  r\\	\R                        \
S'   \R                  r\\\	\R                           \
S'   \R"                  r\\\	\R                           \
S'   Sr\\\	\R                           \
S'   Sr\\\	\R                           \
S'           SS	\S
\S\S\S\S\S\S\S\4U 4S jjjrS rS\R8                  S\\R8                     SS4S jrS rS rS r S r!\"SS j5       r#Sr$U =r%$ ) r   ia  a  Applies a 1D convolution over a quantized input signal composed of
several quantized input planes.

For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv1d`.

.. note::
    Only `zeros` is supported for the :attr:`padding_mode` argument.

.. note::
    Only `torch.quint8` is supported for the input data type.


Attributes:
    weight (Tensor):     packed tensor derived from the learnable weight
                         parameter.
    scale (Tensor):      scalar for the output scale
    zero_point (Tensor): scalar for the output zero point

See :class:`~torch.nn.Conv1d` for other attributes.

Examples::

    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
    >>> m = nn.quantized.Conv1d(16, 33, 3, stride=2)
    >>> input = torch.randn(20, 16, 100)
    >>> # quantize input to quint8
    >>> # xdoctest: +SKIP
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0,
    ...                                     dtype=torch.quint8)
    >>> output = m(q_input)

r   r   r   Nr   r   r1   r2   r3   r4   r   r5   r6   r7   r8   c                    > XS.n[        U5      n[        U5      n[        U[        5      (       a  UO
[        U5      n[        U5      n[        TU ]  " UUUUUUS[        S5      UUU	40 UD6  g Nr>   Fr   )r   
isinstancer   rA   rT   r0   r1   r2   r3   r4   r   r5   r6   r7   r8   r9   r:   rM   rS   s                r   r;   Conv1d.__init__  s     %+;k*'55'77;K8$ 	AJ	
 	
r(   c                     g)NQuantizedConv1dr   rZ   s    r   	_get_nameConv1d._get_name       r(   rl   rm   r   c                    U R                   S:X  a[  [        R                  R                  R	                  XU R
                  U R                  U R                  U R                  5      U l	        g [        R                  R                  R	                  XU R
                  [        S5      U R                  U R                  5      U l	        g Nr   r   )r8   rF   r   	quantizedconv1d_prepackr4   r   r5   r6   _packed_paramsr   rq   s      r   rL   Conv1d.set_weight_bias      '"'))"5"5"D"Ddkk4<<#D #())"5"5"D"Ddkk58T]]DKK#Dr(   c                 r    [         R                  R                  R                  U R                  5      u  pX4$ r-   )rF   r   r   conv1d_unpackr   rq   s      r   r]   Conv1d._weight_bias  +    yy""001D1DEtr(   c                 (    U R                  5       S   $ Nr   r]   rZ   s    r   rf   Conv1d.weight        "1%%r(   c                 (    U R                  5       S   $ Nr   r   rZ   s    r   r7   Conv1d.bias  r   r(   c                 V   [        UR                  5      S:w  a  [        S5      eU R                  S:w  a7  [	        U R
                  S S 5      n[        R                  " XU R                  S9n[        R                  R                  XR                  U R                  U R                  5      $ )Nr    Input shape must be `(N, C, L)`!r   r   mode)r#   shaperB   r8   r'   r   Fpadr   r   conv1dr   r?   r@   r0   inputr&   s      r   forwardConv1d.forward  s     u{{q ?@@'/Ft||TVUVGW/X,EEd>O>OE }}##&&

DOO
 	
r(   c                 *    [         R                  XUS9$ zCreates a quantized module from a float module or qparams_dict.

Args:
    mod (Module): a float module, either produced by torch.ao.quantization
      utilities or provided by the user
)r   r*   r   r   r   r   s      r   r   Conv1d.from_float  "     !!1K " 
 	
r(   r   r   r   )&r   r   r   r   __doc__r   r   r   r   r   __annotations__nniqatConvBn1dr   r   r   nni
ConvReLU1dr   r   r   rv   r   boolr   r;   r   rF   TensorrL   r]   rf   r7   r   r   r   r   r   r   s   @r   r   r   a  ss    D 02yyM8DO,8BH//HXd299o%>?QAD8HT"))_$=>O@D(8DO#<=DEIxbii(ABI ##
#
 #
 	#

 #
 #
 #
 #
 #
 #
 #
J! (5<<2H T &&
 	
 	
r(   r   c                     ^  \ rS rSr% Sr\R                  r\\	\R                        \
S'   \R                  r\\\	\R                           \
S'   \R"                  r\\\	\R                           \
S'   \R&                  r\\	\R&                        \
S'   \R*                  r\\	\R*                        \
S'           SU 4S	 jjrS
 rS\R4                  S\\R4                     SS4S jrS rS rS rS r\ SS j5       r!Sr"U =r#$ )r   i  aI  Applies a 2D convolution over a quantized input signal composed of
several quantized input planes.

For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv2d`.

.. note::
    Only `zeros` is supported for the :attr:`padding_mode` argument.

.. note::
    Only `torch.quint8` is supported for the input data type.


Attributes:
    weight (Tensor):     packed tensor derived from the learnable weight
                         parameter.
    scale (Tensor):      scalar for the output scale
    zero_point (Tensor): scalar for the output zero point

See :class:`~torch.nn.Conv2d` for other attributes.

Examples::

    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
    >>> # With square kernels and equal stride
    >>> m = nn.quantized.Conv2d(16, 33, 3, stride=2)
    >>> # non-square kernels and unequal stride and with padding
    >>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
    >>> # non-square kernels and unequal stride and with padding and dilation
    >>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
    >>> input = torch.randn(20, 16, 50, 100)
    >>> # quantize input to quint8
    >>> # xdoctest: +SKIP
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> output = m(q_input)

r   r   r   r   r   Nc                    > XS.n[        U5      n[        U5      n[        U5      n[        U5      n[        TU ]  " UUUUUUS[        S5      UUU	40 UD6  g r   )r   rA   rT   r   s                r   r;   Conv2d.__init__  so     %+;K(v.? 	!H	
 	
r(   c                     g)NQuantizedConv2dr   rZ   s    r   r   Conv2d._get_name2  r   r(   rl   rm   r   c                    U R                   S:X  a[  [        R                  R                  R	                  XU R
                  U R                  U R                  U R                  5      U l	        g [        R                  R                  R	                  XU R
                  [        S5      U R                  U R                  5      U l	        g r   )r8   rF   r   r   conv2d_prepackr4   r   r5   r6   r   r   rq   s      r   rL   Conv2d.set_weight_bias5  r   r(   c                 6    U R                   R                  5       $ r-   r   unpackrZ   s    r   r]   Conv2d._weight_bias?      ""))++r(   c                 (    U R                  5       S   $ r   r   rZ   s    r   rf   Conv2d.weightB  r   r(   c                 (    U R                  5       S   $ r   r   rZ   s    r   r7   Conv2d.biasE  r   r(   c                 P   [        UR                  5      S:w  a  [        S5      eU R                  S:w  a4  [	        U R
                  5      n[        R                  " XU R                  S9n[        R                  R                  XR                  U R                  U R                  5      $ )Nr   #Input shape must be `(N, C, H, W)`!r   r   )r#   r   rB   r8   r'   r   r   r   r   r   conv2dr   r?   r@   r   s      r   r   Conv2d.forwardH  s     u{{q BCC'/Ft||/T,EEd>O>OE }}##&&

DOO
 	
r(   c                 *    [         R                  XUS9$ r   r   r   s      r   r   Conv2d.from_floatV  r   r(   r   r   r   )$r   r   r   r   r   r   r   r   r   r   r  r  ConvBn2dr   r   r   r  
ConvReLU2dr   	ConvAdd2dr   ConvAddReLU2dr   r;   r   rF   r  rL   r]   rf   r7   r   r   r   r   r   r   s   @r   r   r     s   $J 02yyM8DO,8BH//HXd299o%>?QAD8HT"))_$=>O:=--(4#67GCFCTCTxS->->(?@T "
H! (5<<2H T ,&&
 	
 	
r(   r   c                     ^  \ rS rSr% Sr\R                  r\\	\R                        \
S'   \R                  r\\\	\R                           \
S'   \R"                  r\\\	\R                           \
S'   Sr\\\	\R                           \
S'   Sr\\\	\R                           \
S'           SU 4S	 jjrS
 rS\R0                  S\\R0                     SS4S jrS rS rS rS r\SS j5       rSr U =r!$ )r   ic  aa  Applies a 3D convolution over a quantized input signal composed of
several quantized input planes.

For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv3d`.

.. note::
    Only `zeros` is supported for the :attr:`padding_mode` argument.

.. note::
    Only `torch.quint8` is supported for the input data type.


Attributes:
    weight (Tensor):     packed tensor derived from the learnable weight
                         parameter.
    scale (Tensor):      scalar for the output scale
    zero_point (Tensor): scalar for the output zero point

See :class:`~torch.nn.Conv3d` for other attributes.

Examples::

    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
    >>> # With square kernels and equal stride
    >>> m = nn.quantized.Conv3d(16, 33, 3, stride=2)
    >>> # non-square kernels and unequal stride and with padding
    >>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
    >>> # non-square kernels and unequal stride and with padding and dilation
    >>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
    >>> input = torch.randn(20, 16, 56, 56, 56)
    >>> # quantize input to quint8
    >>> # xdoctest: +SKIP
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> output = m(q_input)

r   r   r   Nr   r   c                    > U	S:w  d   S5       eXS.n[        U5      n[        U5      n[        U5      n[        U5      n[        TU ]  " UUUUUUS[        S5      UUU	40 UD6  g )Nr   z*Conv3d does not support reflection paddingr>   Fr   )r	   rA   rT   r   s                r   r;   Conv3d.__init__  s     y(V*VV($*;k*'"8$ 	AJ	
 	
r(   c                     g)NQuantizedConv3dr   rZ   s    r   r   Conv3d._get_name  r   r(   rl   rm   r   c                    U R                   S:X  a[  [        R                  R                  R	                  XU R
                  U R                  U R                  U R                  5      U l	        g [        R                  R                  R	                  XU R
                  [        S5      U R                  U R                  5      U l	        g r   )r8   rF   r   r   conv3d_prepackr4   r   r5   r6   r   r	   rq   s      r   rL   Conv3d.set_weight_bias  s    '"'))"5"5"D"Ddkk4<<#D #())"5"5"D"Ddkk71:t}}dkk#Dr(   c                 6    U R                   R                  5       $ r-   r  rZ   s    r   r]   Conv3d._weight_bias  r  r(   c                 (    U R                  5       S   $ r   r   rZ   s    r   rf   Conv3d.weight  r   r(   c                 (    U R                  5       S   $ r   r   rZ   s    r   r7   Conv3d.bias  r   r(   c                 P   [        UR                  5      S:w  a  [        S5      eU R                  S:w  a4  [	        U R
                  5      n[        R                  " XU R                  S9n[        R                  R                  XR                  U R                  U R                  5      $ )Nr   z&Input shape must be `(N, C, D, H, W)`!r   r   )r#   r   rB   r8   r'   r   r   r   r   r   conv3dr   r?   r@   r   s      r   r   Conv3d.forward  s     u{{q EFF'/Ft||/T,EEd>O>OE }}##&&

DOO
 	
r(   c                 *    [         R                  XUS9$ r   r   r   s      r   r   Conv3d.from_float  r   r(   r   r   r   )"r   r   r   r   r   r   r   r   r   r   r  r  ConvBn3dr   r   r   r  
ConvReLU3dr   r   r   r;   r   rF   r  rL   r]   rf   r7   r   r   r   r   r   r   s   @r   r   r   c  s   $J 02yyM8DO,8BH//HXd299o%>?QAD8HT"))_$=>O@D(8DO#<=DEIxbii(ABI #
J! (5<<2H T ,&&
 	
 	
r(   r   c            	          ^  \ rS rSr% \\\R                  R                  R                        \
S'     SU 4S jjrS\\   S\\   S\\   S\\   4S jr\SS	 j5       r\S
 5       rSrU =r$ )_ConvTransposeNdi  r   c                    > US:w  a"  [        SU R                  R                   35      eXS.n[        TU ]  " UUUUUUUUU	U
U40 UD6  g )Nr   z+Only "zeros" padding mode is supported for r>   )rB   rS   r   rA   rT   )r0   r1   r2   r3   r4   r   r5   rC   rD   r6   r7   r8   r9   r:   rM   rS   s                  r   r;   _ConvTransposeNd.__init__  sr      7"=dnn>U>U=VW  %+; 		
 	
r(   r3   r5   r   r   c                     [         R                  R                  [        [           / 5      n[        [        U5      5       H%  nX%   X   S-
  -  X5   -
  nUR                  U5        M'     U$ r   )rF   r   annotaterH   rv   r$   r#   append)r0   r3   r5   r   reskdxr   s          r   _input_padding_ConvTransposeNd._input_padding  s_     ii  cB/[)*C-;#3a#787<GCJJsO + 
r(   c                    SU R                   -   S-   U R                  R                   -   n[        U5      U R                  :X  d   U5       e[        US5      (       d   S5       eUR                  R                  5       nU" UR
                  5        UR                  [        R                  :X  d   S5       e[        UR
                  R                  5       U5      nU " UR                  UR                  UR                  UR                  UR                  UR                   UR"                  UR$                  SLUR&                  UR(                  5
      nUR+                  XQR$                  5        [        US5      (       a(  UR,                  R                  [        R                  :X  a  U$ UR,                  R/                  5       u  px[        U5      Ul        [3        U5      Ul        U$ )zCreates a quantized module from a float module or qparams_dict.
Args:
    mod (Module): a float module, either produced by torch.ao.quantization
      utilities or provided by the user
r   r   r   r   r   Nr   )r   r   r   r   r   rf   r:   rF   rI   r   rK   r1   r2   r3   r4   r   rD   r6   r7   r5   r8   rL   r   r   r?   rv   r@   )	r   r   r   msgr   rQ   r   r   r   s	            r   r   _ConvTransposeNd.from_float  s    ll+, (() 	 CyC---2s2-sI&&W(WW&!kk002CJJ'%%4	87	84"3::#3#3#57JKOOOOJJKKJJHHD LL
 	gxx0677**00EKK?L # ; ; M M OI	*EK"6{ELr(   c                    U " UR                   UR                  UR                  UR                  UR                  UR
                  UR                  UR                  SLUR                  UR                  UR                  R                  UR                  R                  S9nUR                  5       nUR                  XQR                  5        [        U5      Ul        [#        U5      Ul        U$ )a[  Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
    ref_qconvt (Module): a reference quantized  module, either produced by torch.ao.quantization
                         utilities or provided by the user
    output_scale (float): scale for output Tensor
    output_zero_point (int): zero point for output Tensor
Nr>   )r1   r2   r3   r4   r   rD   r6   r7   r5   r8   rf   r9   r:   r   rL   rK   r?   rv   r@   )r   
ref_qconvtr   r   r   rQ   s         r   r   _ConvTransposeNd.from_referenceI  s     ""##""%%OO4'##$$++##))
 113g7L)01r(   r   )NNr   )r   r   r   r   r   r   r   modulesconvr*   r  r;   rH   rv   rC  r   r   r   r   r   r   r   s   @r   r;  r;    s    D!8!89:: $
L904S	DHI	c , ,\  r(   r;  c                      ^  \ rS rSr% Sr\R                  r\\	\R                        \
S'            SU 4S jjrS rS\R                  S\\R                     S	S4S
 jrS rS rS rS r\S 5       rSrU =r$ )r   ig  a?  Applies a 1D transposed convolution operator over an input image
composed of several input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose1d`.

.. note:: Currently only the QNNPACK engine is implemented.
    Please, set the `torch.backends.quantized.engine = 'qnnpack'`

For special notes, please, see :class:`~torch.ao.nn.quantized.Conv1d`

Attributes:
    weight (Tensor):     packed tensor derived from the learnable weight
                         parameter.
    scale (Tensor):      scalar for the output scale
    zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose2d` for other attributes.

Examples::

    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
    >>> torch.backends.quantized.engine = 'qnnpack'
    >>> from torch.ao.nn import quantized as nnq
    >>> # With square kernels and equal stride
    >>> m = nnq.ConvTranspose1d(16, 33, 3, stride=2)
    >>> # non-square kernels and unequal stride and with padding
    >>> m = nnq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
    >>> input = torch.randn(20, 16, 50)
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> output = m(q_input)
    >>> # exact output size can be also specified as an argument
    >>> input = torch.randn(1, 16, 12)
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> downsample = nnq.Conv1d(16, 16, 3, stride=2, padding=1)
    >>> upsample = nnq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
    >>> h = downsample(q_input)
    >>> h.size()
    torch.Size([1, 16, 6])
    >>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
    >>> output = upsample(h, output_size=input.size())
    >>> output.size()
    torch.Size([1, 16, 12])
r   Nc                    > XS.n[        U5      n[        U5      n[        U5      n[        U	5      n	[        U5      n[        TU ]  " UUUUUU	SUUUU
40 UD6  g Nr>   T)r   rA   r;   r0   r1   r2   r3   r4   r   rD   r6   r7   r5   r8   r9   r:   rM   rS   s                 r   r;   ConvTranspose1d.__init__  v     %+;k*'"8$ 0	
 	
r(   c                     g)NQuantizedConvTranspose1dr   rZ   s    r   r   ConvTranspose1d._get_name      )r(   rl   rm   r   c           	          [         R                  R                  R                  UUU R                  U R
                  U R                  U R                  U R                  5      U l	        g r-   )
rF   r   r   conv_transpose1d_prepackr4   r   rD   r5   r6   r   rq   s      r   rL   ConvTranspose1d.set_weight_bias  J    #ii11JJKKLLMMKK
r(   c                 r    [         R                  R                  R                  U R                  5      u  pX4$ r-   )rF   r   r   conv_transpose1d_unpackr   rq   s      r   r]   ConvTranspose1d._weight_bias  s+    yy""::4;N;NOtr(   c                 *    U R                  5       u  pU$ r-   r   r0   rl   r   s      r   rf   ConvTranspose1d.weight      ""$r(   c                 *    U R                  5       u  pU$ r-   r   r0   r   rm   s      r   r7   ConvTranspose1d.bias  ra  r(   c                     [        UR                  5      S:w  a  [        S5      e[        R                  R
                  R                  XR                  U R                  U R                  5      $ )Nr   r   )
r#   r   rB   rF   r   r   conv_transpose1dr   r?   r@   r0   r   s     r   r   ConvTranspose1d.forward  sS     u{{q ?@@yy""33&&

DOO
 	
r(   c                 .    [         R                  XX#5      $ r-   r;  r   r   rI  r   r   s       r   r   ConvTranspose1d.from_reference      ..\
 	
r(   r   	r   r   r   r   Tr   r   NN)r   r   r   r   r   r   r   r   r   r   r  r;   r   rF   r  r   rL   r]   rf   r7   r   r   r   r   r   r   s   @r   r   r   g      )V 9;8J8JM8D!3!345J #
J*	
 	
(5<<2H 	
T 	

 
 
r(   r   c                      ^  \ rS rSr% Sr\R                  r\\	\R                        \
S'            SU 4S jjrS rS\R                  S\\R                     S	S4S
 jrS rS rS rS r\S 5       rSrU =r$ )r   i  a  Applies a 2D transposed convolution operator over an input image
composed of several input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose2d`.

For special notes, please, see :class:`~torch.ao.nn.quantized.Conv2d`

Attributes:
    weight (Tensor):     packed tensor derived from the learnable weight
                         parameter.
    scale (Tensor):      scalar for the output scale
    zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose2d` for other attributes.

Examples::

    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
    >>> # QNNPACK or FBGEMM as backend
    >>> torch.backends.quantized.engine = 'qnnpack'
    >>> # With square kernels and equal stride
    >>> import torch.ao.nn.quantized as nnq
    >>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
    >>> # non-square kernels and unequal stride and with padding
    >>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
    >>> input = torch.randn(20, 16, 50, 100)
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> output = m(q_input)
    >>> # exact output size can be also specified as an argument
    >>> input = torch.randn(1, 16, 12, 12)
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
    >>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
    >>> h = downsample(q_input)
    >>> h.size()
    torch.Size([1, 16, 6, 6])
    >>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
    >>> output = upsample(h, output_size=input.size())
    >>> output.size()
    torch.Size([1, 16, 12, 12])
r   Nc                    > XS.n[        U5      n[        U5      n[        U5      n[        U	5      n	[        U5      n[        TU ]  " UUUUUU	SUUUU
40 UD6  g rO  )r   rA   r;   rP  s                 r   r;   ConvTranspose2d.__init__  st     %+;K(v.?~.	
 	
r(   c                     g)NQuantizedConvTranspose2dr   rZ   s    r   r   ConvTranspose2d._get_name5  rV  r(   rl   rm   r   c           	          [         R                  R                  R                  UUU R                  U R
                  U R                  U R                  U R                  5      U l	        g r-   )
rF   r   r   conv_transpose2d_prepackr4   r   rD   r5   r6   r   rq   s      r   rL   ConvTranspose2d.set_weight_bias8  rZ  r(   c                 r    [         R                  R                  R                  U R                  5      u  pX4$ r-   )rF   r   r   conv2d_unpackr   rq   s      r   r]   ConvTranspose2d._weight_biasC  r   r(   c                 *    U R                  5       u  pU$ r-   r   r_  s      r   rf   ConvTranspose2d.weightG  ra  r(   c                 *    U R                  5       u  pU$ r-   r   rc  s      r   r7   ConvTranspose2d.biasK  ra  r(   c                     [        UR                  5      S:w  a  [        S5      e[        R                  R                  XR                  U R                  U R                  5      $ )Nr   r  )	r#   r   rB   r   r   conv_transpose2dr   r?   r@   rg  s     r   r   ConvTranspose2d.forwardO  sM     u{{q BCC}}--&&

DOO
 	
r(   c                 .    [         R                  XX#5      $ r-   rj  rk  s       r   r   ConvTranspose2d.from_referenceX  rm  r(   r   rn  )r   r   r   r   r   r   r   r   r   r   r  r;   r   rF   r  r   rL   r]   rf   r7   r   r   r   r   r   r   s   @r   r   r     s    'R 9;8J8JM8D!3!345J #
J*	
 	
(5<<2H 	
T 	

 
 
r(   r   c                      ^  \ rS rSr% Sr\R                  r\\	\R                        \
S'            SU 4S jjrS rS\R                  S\\R                     S	S4S
 jrS rS rS rS r\S 5       rSrU =r$ )r   i_  ac  Applies a 3D transposed convolution operator over an input image
composed of several input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose3d`.

.. note:: Currently only the FBGEMM engine is implemented.
    Please, set the `torch.backends.quantized.engine = 'fbgemm'`

For special notes, please, see :class:`~torch.ao.nn.quantized.Conv3d`

Attributes:
    weight (Tensor):     packed tensor derived from the learnable weight
                         parameter.
    scale (Tensor):      scalar for the output scale
    zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose3d` for other attributes.

Examples::

    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
    >>> torch.backends.quantized.engine = 'fbgemm'
    >>> from torch.ao.nn import quantized as nnq
    >>> # With cubic kernels and equal stride
    >>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
    >>> # non-cubic kernels and unequal stride and with padding
    >>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
    >>> input = torch.randn(20, 16, 50, 100, 100)
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> output = m(q_input)
    >>> # exact output size can be also specified as an argument
    >>> input = torch.randn(1, 16, 12, 12, 12)
    >>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
    >>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
    >>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
    >>> h = downsample(q_input)
    >>> h.size()
    torch.Size([1, 16, 6, 6, 6])
    >>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
    >>> output = upsample(h, output_size=input.size())
    >>> output.size()
    torch.Size([1, 16, 12, 12, 12])
r   Nc                    > XS.n[        U5      n[        U5      n[        U5      n[        U	5      n	[        U5      n[        TU ]  " UUUUUU	SUUUU
40 UD6  g rO  )r	   rA   r;   rP  s                 r   r;   ConvTranspose3d.__init__  rR  r(   c                     g)NQuantizedConvTranspose3dr   rZ   s    r   r   ConvTranspose3d._get_name  rV  r(   rl   rm   r   c           	          [         R                  R                  R                  UUU R                  U R
                  U R                  U R                  U R                  5      U l	        g r-   )
rF   r   r   conv_transpose3d_prepackr4   r   rD   r5   r6   r   rq   s      r   rL   ConvTranspose3d.set_weight_bias  rZ  r(   c                 r    [         R                  R                  R                  U R                  5      u  pX4$ r-   )rF   r   r   conv3d_unpackr   rq   s      r   r]   ConvTranspose3d._weight_bias  r   r(   c                 *    U R                  5       u  pU$ r-   r   r_  s      r   rf   ConvTranspose3d.weight  ra  r(   c                 *    U R                  5       u  pU$ r-   r   rc  s      r   r7   ConvTranspose3d.bias  ra  r(   c                     [        UR                  5      S:w  a  [        S5      e[        R                  R                  XR                  U R                  U R                  5      $ )Nr   z&Input shape must be `(N, C, T, H, W)`!)	r#   r   rB   r   r   conv_transpose3dr   r?   r@   rg  s     r   r   ConvTranspose3d.forward  sM     u{{q EFF}}--&&

DOO
 	
r(   c                 .    [         R                  XX#5      $ r-   rj  rk  s       r   r   ConvTranspose3d.from_reference  rm  r(   r   rn  )r   r   r   r   r   r   r   r   r   r   r  r;   r   rF   r  r   rL   r]   rf   r7   r   r   r   r   r   r   s   @r   r   r   _  ro  r(   r   )+r   typingr   r   rF   torch.ao.nn.intrinsicaor   	intrinsicr  torch.ao.nn.intrinsic.qatqatr  torch.nntorch.nn.functional
functionalr   
torch._opsr   torch.nn.common_typesr   torch.nn.modules.utilsr   r   r	   torch.nn.utilsr
   utilsr   r   __all__rE   rH   rv   r'   r*   r   r   r   r;  r   r   r   r   r(   r   <module>r     s    % %  # # * *     + : : / < y) ,T#Y ,49 ,w% wt	~
W ~
B~
W ~
B
W 
J|w |~z
& z
zx
& x
vz
& z
r(   