
    sh                        S r SSKrSSKrSSKJr  SSKJrJr  SSKJr  SSK	J
r
  SSKJr  SS	KJr  SS
KJr  SSKJr  SSKJr  SSKJr  SSKJr  SSKJr  \" \R8                  S5      (       a  SSKJr  SSKJ r   SSK!J"r"  / SQr#g)aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
    N)optim   )_apply_optimizer_in_backward_get_in_backward_optimizers)_FunctionalAdadelta)_FunctionalAdagrad)_FunctionalAdam)_FunctionalAdamax)_FunctionalAdamW)_FunctionalRMSprop)_FunctionalRprop)_FunctionalSGD)_NamedOptimizer)as_functional_optim	_rpc_init)DistributedOptimizer)PostLocalSGDOptimizer)ZeroRedundancyOptimizer)r   r   r   r   )$__doc__warningstorchr   apply_optimizer_in_backwardr   r   functional_adadeltar   functional_adagradr   functional_adamr	   functional_adamaxr
   functional_adamwr   functional_rmspropr   functional_rpropr   functional_sgdr   named_optimizerr   utilsr   hasattr_C	optimizerr   post_localSGD_optimizerr   zero_redundancy_optimizerr   __all__     t/Users/tiagomarins/Projetos/claudeai/copy_bank/venv/lib/python3.13/site-packages/torch/distributed/optim/__init__.py<module>r,      s]       5 2 , 0 . 2 . * , &
 588[!!/ : >r*   