# 该函数的实现包括了warmup和lr_decay
def warmup_linear(x, warmup = 0.002):
if x < warmup:
return x/warmup
return 1.0 - x

global_step_th = int(len(train_examples)/batch_size/gradient_accumulation_steps * start_epoch)

for epoch in range(start_epoch, total_train_epoch):
train_loss = 0
train_start = time.time()
model.train()
for step, batch in enumerate(train_dataloader):
create input and output for batch object_loss
# loss regularization
if gradient_accumulation_steps > 1:
object_loss = object_loss / gradient_accumulation_steps

# Implementation of backpropagation
object_loss.backward()
train_loss = train_loss + object_loss.item()
if (step+1) % gradient_accumulation_steps == 0:
# modifying and update the learning rate with warm up which bert uses.
lr_this_step = learning_rate*warmup_linear(global_step_th/total_train_steps)
# for the params in optimizer, we update the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
global_step_th = global_step_th + 1

https://blog.csdn.net/Princeicon/article/details/108058822

https://www.cnblogs.com/lart/p/11628696.html

### 二）调节学习率（学习率的预热与衰减）

（１）学习率的调节步骤一般是先预热，后衰减。

（２）学习率的调节方式包括手动调节和库函数调节。二者除了在引入方法上不一样之外，库函数调节不需要显式的更换模型中的学习率，而手动调节需要在optimizer.param_groups['lr']=updated_lr进行调节。

①　Warmup（学习率预热）

https://blog.csdn.net/Xiao_CangTian/article/details/109269555

https://blog.csdn.net/sinat_36618660/article/details/99650804

https://blog.csdn.net/shanglianlm/article/details/85143614

https://blog.csdn.net/Guo_Python/article/details/106019396

https://zhuanlan.zhihu.com/p/136183319

②　lr_decay（学习率衰减）

https://cloud.tencent.com/developer/article/1539729

https://blog.csdn.net/qq_40367479/article/details/82530324

https://blog.csdn.net/dou3516/article/details/105329103

https://zhuanlan.zhihu.com/p/392350994

https://www.cnblogs.com/wuliytTaotao/p/11101652.html

# 该函数的实现包括了warmup和lr_decay
def warmup_linear(x, warmup = 0.002):
if x < warmup:
return x/warmup
return 1.0 - x

for epoch in range(start_epoch, total_train_epoch):
train_loss = 0
train_start = time.time()
model.train()
for step, batch in enumerate(train_dataloader):
create input and output for batch object_loss
# loss regularization
if gradient_accumulation_steps > 1:
object_loss = object_loss / gradient_accumulation_steps

# Implementation of backpropagation
object_loss.backward()
train_loss = train_loss + object_loss.item()
if (step+1) % gradient_accumulation_steps == 0:
# modifying and update the learning rate with warm up which bert uses.
lr_this_step = learning_rate*warmup_linear(global_step_th/total_train_steps)
# for the params in optimizer, we update the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
global_step_th = global_step_th + 1

import numpy as np
warmup_steps = 2500
init_lr = 0.1
# 模拟训练15000步
max_steps = 15000
for train_steps in range(max_steps):
# 实现warmup
if warmup_steps and train_steps < warmup_steps:
warmup_percent_done = train_steps / warmup_steps
warmup_learning_rate = init_lr * warmup_percent_done  #gradual warmup_lr
learning_rate = warmup_learning_rate
# 实现lr_decay
else:
#learning_rate = np.sin(learning_rate)  #预热学习率结束后,学习率呈sin衰减
learning_rate = learning_rate**1.0001 #预热学习率结束后,学习率呈指数衰减(近似模拟指数衰减)

torch.optim.lr_scheduler.StepLR(optimizer, step_size, gamma=0.1, last_epoch=-1)
torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1, last_epoch=-1)
torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma, last_epoch=-1)
torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min=0, last_epoch=-1)
torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1)

EXAMPLE.1(该代码仅实现了lr_decay)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1) # 学习率初值0.1
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5) # 每十次迭代，学习率减半
for i in range(1,100):
scheduler.step() # 学习率迭代次数+1
arr.append(optimizer.state_dict()['param_groups'][0]['lr'])
#arr.append(scheduler.get_lr())  # 与前一句功能相同，都是为了获取学习率的数值

EXAMPLE.2(该代码仅实现了lr_decay)
lambdaf = lambda epoch: 0.05 + (epoch) / 100
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambdaf)
for i in range(1,100):
scheduler.step()# 学习率迭代次数+1,同时将迭代次数作为参数传给lambdaf

EXAMPLE.3(该代码仅实现了warmup)
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x): # x是step次数
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters # 当前进度 0-1
return warmup_factor * (1 - alpha) + alpha

if warmup:
warmup_factor = 1. / 1000
warmup_iters = min(1000, len(train_loader) - 1)
lr_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for i in range(1,100):
lr_scheduler.step() #将学习率迭代次数+1传给f

EXAMPLE.4(该代码实现warmup和lr_decay)
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)
warmup_epoch = 5
scheduler = CosineAnnealingLR(optimizer, 100 - warmup_epoch)
warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * warmup_epoch)
for epoch in range(1, max_epoch+1):
# lr_decay过程
if epoch >= warmup_epoch:
scheduler.step()
learn_rate = scheduler.get_lr()[0]
print("Learn_rate:%s" % learn_rate)
# warmup过程
else:
warmup_scheduler.step()
warm_lr = warmup_scheduler.get_lr()
print("warm_lr:%s" % warm_lr)

EXAMPLE.5(transformers.get_linear_schedule_with_warmup(),实现学习率预热和学习率下降)
total_steps = len(train_dataloader) * num_epoch
optimizer = AdamW(optimizer_grouped_parameters, lr=3e-5, eps=1e-8)
lr_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps= warmup_rate* total_steps, num_training_steps = total_steps)
for __ in trange(num_epoch, desc = 'Epoch'):
model.train()
total_loss = 0
......
loss = outputs.loss
loss.backward()
total_loss = total_loss + loss.cpu().item() # item() return the value of tensor
# update the parameters
optimizer.step()
# update the learning rate
lr_scheduler.step()

lr_decay = torch.optim.lr_scheduler.ReduceLROnPlateau()　（学习率衰减方法之一）

factor是学习率变换时乘的因子；

patience是指，当连续patience个epoch而指标(loss或accuracy)没有变换时，我们对学习率进行更改；

threshold指，当性能变化小于该值时，即认为没有性能变化；

min_lr模型所允许的最小学习率，即学习率不能小于该值；

eps，学习率变化的最小值。当学习率变化小于该值时，忽略掉；

lr_decay.step(val_loss/accuracy)，学习率更新的指标依据。当某指标loss(或accuracy)在最近几个epoch中都没有变化下降（或升高）超过给定阈值时，调整学习率。如当验证集的loss不再下降时，调整学习率；或监测验证集的accuracy不再升高时，调整学习率。

https://blog.csdn.net/weixin_40100431/article/details/84311430

https://zhuanlan.zhihu.com/p/69411064

https://www.jianshu.com/p/26a7dbc15246

https://www.cnblogs.com/xym4869/p/11654611.html

https://blog.csdn.net/qyhaill/article/details/103043637

https://blog.csdn.net/emperinter/article/details/108917935

https://www.emperinter.info/2020/08/05/change-leaning-rate-by-reducelronplateau-in-pytorch/

# 将参数(权值)梯度全部置为０
# 通过输入，计算模型的输出
outputs = net(inputs)
# 计算损失
loss = criterion(outputs, labels)
# 通过损失，反向传播计算权值梯度
loss.backward()
# 更新权值
optimizer.step()

### （四）optimizer和scheduler

optimizer的作用在于根据选用的优化器以及设置相应的lr、momentum等(超参数)对模型参数(优化器中的参数)进行更新，更新的方法是optimizer.step()。

scheduler的作用在于对optimizer中的学习率进行更新、调整，更新的方法是scheduler.step()。

注：scheduler.step()函数用来完成optimizer实例中学习率的更新，如果没有scheduler中的step方法，也就无法对optimizer中的学习率进行调整 。

https://zhuanlan.zhihu.com/p/367999849

https://blog.csdn.net/qq_40178291/article/details/99963586

https://www.jianshu.com/p/1db8581edd4f