CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
ai-forever

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: ai-forever/sber-swap
Path: blob/main/apex/apex/optimizers/fused_lamb.py
Views: 792
1
import torch
2
from apex.multi_tensor_apply import multi_tensor_applier
3
4
class FusedLAMB(torch.optim.Optimizer):
5
6
"""Implements LAMB algorithm.
7
8
Currently GPU-only. Requires Apex to be installed via
9
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
10
11
This version of fused LAMB implements 2 fusions.
12
13
* Fusion of the LAMB update's elementwise operations
14
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
15
16
:class:`apex.optimizers.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer::
17
18
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
19
...
20
opt.step()
21
22
:class:`apex.optimizers.FusedLAMB` may be used with or without Amp. If you wish to use :class:`FusedLAMB` with Amp,
23
you may choose any ``opt_level``::
24
25
opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)
26
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
27
...
28
opt.step()
29
30
In general, ``opt_level="O1"`` is recommended.
31
32
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
33
34
Arguments:
35
params (iterable): iterable of parameters to optimize or dicts defining
36
parameter groups.
37
lr (float, optional): learning rate. (default: 1e-3)
38
betas (Tuple[float, float], optional): coefficients used for computing
39
running averages of gradient and its norm. (default: (0.9, 0.999))
40
eps (float, optional): term added to the denominator to improve
41
numerical stability. (default: 1e-8)
42
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
43
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
44
algorithm from the paper `On the Convergence of Adam and Beyond`_
45
NOT SUPPORTED now! (default: False)
46
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
47
True for decoupled weight decay(also known as AdamW) (default: True)
48
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
49
calculating running averages of gradient. (default: True)
50
set_grad_none (bool, optional): whether set grad to None when zero_grad()
51
method is called. (default: True)
52
max_grad_norm (float, optional): value used to clip global grad norm
53
(default: 1.0)
54
use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0
55
weight decay parameter (default: False)
56
57
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
58
https://arxiv.org/abs/1904.00962
59
.. _On the Convergence of Adam and Beyond:
60
https://openreview.net/forum?id=ryQu7f-RZ
61
"""
62
63
def __init__(self, params, lr=1e-3, bias_correction=True,
64
betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,
65
amsgrad=False, adam_w_mode=True,
66
grad_averaging=True, set_grad_none=True,
67
max_grad_norm=1.0, use_nvlamb=False):
68
if amsgrad:
69
raise RuntimeError('FusedLAMB does not support the AMSGrad variant.')
70
defaults = dict(lr=lr, bias_correction=bias_correction,
71
betas=betas, eps=eps, weight_decay=weight_decay,
72
grad_averaging=grad_averaging,
73
max_grad_norm=max_grad_norm)
74
super(FusedLAMB, self).__init__(params, defaults)
75
if multi_tensor_applier.available:
76
import amp_C
77
self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm
78
# Skip buffer
79
self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device)
80
self.multi_tensor_lamb = amp_C.multi_tensor_lamb
81
else:
82
raise RuntimeError('apex.optimizers.FusedLAMB requires cuda extensions')
83
84
self.adam_w_mode = 1 if adam_w_mode else 0
85
self.set_grad_none = set_grad_none
86
self.use_nvlamb = use_nvlamb
87
88
def zero_grad(self):
89
if self.set_grad_none:
90
for group in self.param_groups:
91
for p in group['params']:
92
p.grad = None
93
else:
94
super(FusedLAMB, self).zero_grad()
95
96
def step(self, closure=None):
97
"""Performs a single optimization step.
98
99
Arguments:
100
closure (callable, optional): A closure that reevaluates the model
101
and returns the loss.
102
"""
103
loss = None
104
if closure is not None:
105
loss = closure()
106
107
# create separate grad lists for fp32 and fp16 params
108
g_all_32, g_all_16 = [], []
109
for group in self.param_groups:
110
for p in group['params']:
111
if p.grad is None:
112
continue
113
if p.dtype == torch.float32:
114
g_all_32.append(p.grad.data)
115
elif p.dtype == torch.float16:
116
g_all_16.append(p.grad.data)
117
else:
118
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
119
120
device = self.param_groups[0]["params"][0].device
121
g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device)
122
# compute grad norm for two lists
123
if len(g_all_32) > 0:
124
g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm,
125
self._dummy_overflow_buf,
126
[g_all_32], False)[0]
127
if len(g_all_16) > 0:
128
g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm,
129
self._dummy_overflow_buf,
130
[g_all_16], False)[0]
131
132
# blend two grad norms to get global grad norm
133
global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm,
134
self._dummy_overflow_buf,
135
[[g_norm_32, g_norm_16]],
136
False)[0]
137
max_grad_norm = self.defaults['max_grad_norm']
138
139
for group in self.param_groups:
140
bias_correction = 1 if group['bias_correction'] else 0
141
beta1, beta2 = group['betas']
142
grad_averaging = 1 if group['grad_averaging'] else 0
143
144
# assume same step across group now to simplify things
145
# per parameter step can be easily support by making it tensor, or pass list into kernel
146
if 'step' in group:
147
group['step'] += 1
148
else:
149
group['step'] = 1
150
151
# create lists for multi-tensor apply
152
g_16, p_16, m_16, v_16 = [], [], [], []
153
g_32, p_32, m_32, v_32 = [], [], [], []
154
155
for p in group['params']:
156
if p.grad is None:
157
continue
158
if p.grad.data.is_sparse:
159
raise RuntimeError('FusedLAMB does not support sparse gradients, please consider SparseAdam instead')
160
161
state = self.state[p]
162
# State initialization
163
if len(state) == 0:
164
# Exponential moving average of gradient values
165
state['exp_avg'] = torch.zeros_like(p.data)
166
# Exponential moving average of gradient values
167
state['exp_avg_sq'] = torch.zeros_like(p.data)
168
169
if p.dtype == torch.float16:
170
g_16.append(p.grad.data)
171
p_16.append(p.data)
172
m_16.append(state['exp_avg'])
173
v_16.append(state['exp_avg_sq'])
174
elif p.dtype == torch.float32:
175
g_32.append(p.grad.data)
176
p_32.append(p.data)
177
m_32.append(state['exp_avg'])
178
v_32.append(state['exp_avg_sq'])
179
else:
180
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
181
182
if(len(g_16) > 0):
183
multi_tensor_applier(self.multi_tensor_lamb,
184
self._dummy_overflow_buf,
185
[g_16, p_16, m_16, v_16],
186
group['lr'],
187
beta1,
188
beta2,
189
group['eps'],
190
group['step'],
191
bias_correction,
192
group['weight_decay'],
193
grad_averaging,
194
self.adam_w_mode,
195
global_grad_norm,
196
max_grad_norm,
197
self.use_nvlamb)
198
if(len(g_32) > 0):
199
multi_tensor_applier(self.multi_tensor_lamb,
200
self._dummy_overflow_buf,
201
[g_32, p_32, m_32, v_32],
202
group['lr'],
203
beta1,
204
beta2,
205
group['eps'],
206
group['step'],
207
bias_correction,
208
group['weight_decay'],
209
grad_averaging,
210
self.adam_w_mode,
211
global_grad_norm,
212
max_grad_norm,
213
self.use_nvlamb)
214
215
return loss
216
217