Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/main/apex/apex/optimizers/fused_lamb.py
Views: 792
import torch1from apex.multi_tensor_apply import multi_tensor_applier23class FusedLAMB(torch.optim.Optimizer):45"""Implements LAMB algorithm.67Currently GPU-only. Requires Apex to be installed via8``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.910This version of fused LAMB implements 2 fusions.1112* Fusion of the LAMB update's elementwise operations13* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.1415:class:`apex.optimizers.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer::1617opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)18...19opt.step()2021:class:`apex.optimizers.FusedLAMB` may be used with or without Amp. If you wish to use :class:`FusedLAMB` with Amp,22you may choose any ``opt_level``::2324opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)25model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")26...27opt.step()2829In general, ``opt_level="O1"`` is recommended.3031LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.3233Arguments:34params (iterable): iterable of parameters to optimize or dicts defining35parameter groups.36lr (float, optional): learning rate. (default: 1e-3)37betas (Tuple[float, float], optional): coefficients used for computing38running averages of gradient and its norm. (default: (0.9, 0.999))39eps (float, optional): term added to the denominator to improve40numerical stability. (default: 1e-8)41weight_decay (float, optional): weight decay (L2 penalty) (default: 0)42amsgrad (boolean, optional): whether to use the AMSGrad variant of this43algorithm from the paper `On the Convergence of Adam and Beyond`_44NOT SUPPORTED now! (default: False)45adam_w_mode (boolean, optional): Apply L2 regularization or weight decay46True for decoupled weight decay(also known as AdamW) (default: True)47grad_averaging (bool, optional): whether apply (1-beta2) to grad when48calculating running averages of gradient. (default: True)49set_grad_none (bool, optional): whether set grad to None when zero_grad()50method is called. (default: True)51max_grad_norm (float, optional): value used to clip global grad norm52(default: 1.0)53use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.054weight decay parameter (default: False)5556.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:57https://arxiv.org/abs/1904.0096258.. _On the Convergence of Adam and Beyond:59https://openreview.net/forum?id=ryQu7f-RZ60"""6162def __init__(self, params, lr=1e-3, bias_correction=True,63betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,64amsgrad=False, adam_w_mode=True,65grad_averaging=True, set_grad_none=True,66max_grad_norm=1.0, use_nvlamb=False):67if amsgrad:68raise RuntimeError('FusedLAMB does not support the AMSGrad variant.')69defaults = dict(lr=lr, bias_correction=bias_correction,70betas=betas, eps=eps, weight_decay=weight_decay,71grad_averaging=grad_averaging,72max_grad_norm=max_grad_norm)73super(FusedLAMB, self).__init__(params, defaults)74if multi_tensor_applier.available:75import amp_C76self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm77# Skip buffer78self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device)79self.multi_tensor_lamb = amp_C.multi_tensor_lamb80else:81raise RuntimeError('apex.optimizers.FusedLAMB requires cuda extensions')8283self.adam_w_mode = 1 if adam_w_mode else 084self.set_grad_none = set_grad_none85self.use_nvlamb = use_nvlamb8687def zero_grad(self):88if self.set_grad_none:89for group in self.param_groups:90for p in group['params']:91p.grad = None92else:93super(FusedLAMB, self).zero_grad()9495def step(self, closure=None):96"""Performs a single optimization step.9798Arguments:99closure (callable, optional): A closure that reevaluates the model100and returns the loss.101"""102loss = None103if closure is not None:104loss = closure()105106# create separate grad lists for fp32 and fp16 params107g_all_32, g_all_16 = [], []108for group in self.param_groups:109for p in group['params']:110if p.grad is None:111continue112if p.dtype == torch.float32:113g_all_32.append(p.grad.data)114elif p.dtype == torch.float16:115g_all_16.append(p.grad.data)116else:117raise RuntimeError('FusedLAMB only support fp16 and fp32.')118119device = self.param_groups[0]["params"][0].device120g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device)121# compute grad norm for two lists122if len(g_all_32) > 0:123g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm,124self._dummy_overflow_buf,125[g_all_32], False)[0]126if len(g_all_16) > 0:127g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm,128self._dummy_overflow_buf,129[g_all_16], False)[0]130131# blend two grad norms to get global grad norm132global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm,133self._dummy_overflow_buf,134[[g_norm_32, g_norm_16]],135False)[0]136max_grad_norm = self.defaults['max_grad_norm']137138for group in self.param_groups:139bias_correction = 1 if group['bias_correction'] else 0140beta1, beta2 = group['betas']141grad_averaging = 1 if group['grad_averaging'] else 0142143# assume same step across group now to simplify things144# per parameter step can be easily support by making it tensor, or pass list into kernel145if 'step' in group:146group['step'] += 1147else:148group['step'] = 1149150# create lists for multi-tensor apply151g_16, p_16, m_16, v_16 = [], [], [], []152g_32, p_32, m_32, v_32 = [], [], [], []153154for p in group['params']:155if p.grad is None:156continue157if p.grad.data.is_sparse:158raise RuntimeError('FusedLAMB does not support sparse gradients, please consider SparseAdam instead')159160state = self.state[p]161# State initialization162if len(state) == 0:163# Exponential moving average of gradient values164state['exp_avg'] = torch.zeros_like(p.data)165# Exponential moving average of gradient values166state['exp_avg_sq'] = torch.zeros_like(p.data)167168if p.dtype == torch.float16:169g_16.append(p.grad.data)170p_16.append(p.data)171m_16.append(state['exp_avg'])172v_16.append(state['exp_avg_sq'])173elif p.dtype == torch.float32:174g_32.append(p.grad.data)175p_32.append(p.data)176m_32.append(state['exp_avg'])177v_32.append(state['exp_avg_sq'])178else:179raise RuntimeError('FusedLAMB only support fp16 and fp32.')180181if(len(g_16) > 0):182multi_tensor_applier(self.multi_tensor_lamb,183self._dummy_overflow_buf,184[g_16, p_16, m_16, v_16],185group['lr'],186beta1,187beta2,188group['eps'],189group['step'],190bias_correction,191group['weight_decay'],192grad_averaging,193self.adam_w_mode,194global_grad_norm,195max_grad_norm,196self.use_nvlamb)197if(len(g_32) > 0):198multi_tensor_applier(self.multi_tensor_lamb,199self._dummy_overflow_buf,200[g_32, p_32, m_32, v_32],201group['lr'],202beta1,203beta2,204group['eps'],205group['step'],206bias_correction,207group['weight_decay'],208grad_averaging,209self.adam_w_mode,210global_grad_norm,211max_grad_norm,212self.use_nvlamb)213214return loss215216217