4@contact: sherlockliao01@gmail.com
8from bisect
import bisect_right
12from torch.optim.lr_scheduler
import _LRScheduler
14__all__ = [
"WarmupMultiStepLR",
"WarmupCosineAnnealingLR"]
20 optimizer: torch.optim.Optimizer,
21 milestones: List[int],
23 warmup_factor: float = 0.001,
24 warmup_iters: int = 1000,
25 warmup_method: str =
"linear",
29 if not list(milestones) == sorted(milestones):
31 "Milestones should be a list of" " increasing integers. Got {}", milestones
38 super().
__init__(optimizer, last_epoch)
46 for base_lr
in self.base_lrs
55 r"""Set the learning rate of each parameter group using a cosine annealing
56 schedule, where :math:`\eta_{max}` is set to the initial lr and
57 :math:`T_{cur}` is the number of epochs since the last restart in SGDR:
60 \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
61 \cos(\frac{T_{cur}}{T_{max}}\pi))
63 When last_epoch=-1, sets initial lr as lr.
65 It has been proposed in
66 `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
67 implements the cosine annealing part of SGDR, and not the restarts.
70 optimizer (Optimizer): Wrapped optimizer.
71 T_max (int): Maximum number of iterations.
72 eta_min (float): Minimum learning rate. Default: 0.
73 last_epoch (int): The index of last epoch. Default: -1.
75 .. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
76 https://arxiv.org/abs/1608.03983
81 optimizer: torch.optim.Optimizer,
85 warmup_factor: float = 0.001,
86 warmup_iters: int = 1000,
87 warmup_method: str =
"linear",
98 super(WarmupCosineAnnealingLR, self).
__init__(optimizer, last_epoch)
106 base_lr * warmup_factor
for base_lr
in self.base_lrs
116 for base_lr
in self.base_lrs]
120 method: str, iter: int, warmup_iters: int, warmup_factor: float
123 Return the learning rate warmup factor at a specific iteration.
124 See https://arxiv.org/abs/1706.02677 for more details.
126 method (str): warmup method; either "constant" or "linear".
127 iter (int): iteration at which to calculate the warmup factor.
128 warmup_iters (int): the number of warmup iterations.
129 warmup_factor (float): the base warmup factor (the meaning changes according
132 float: the effective warmup factor at the given iteration.
134 if iter >= warmup_iters:
137 if method ==
"constant":
139 elif method ==
"linear":
140 alpha = iter / warmup_iters
141 return warmup_factor * (1 - alpha) + alpha
143 raise ValueError(
"Unknown warmup method: {}".format(method))
__init__(self, torch.optim.Optimizer optimizer, int max_iters, int delay_iters=0, int eta_min_lr=0, float warmup_factor=0.001, int warmup_iters=1000, str warmup_method="linear", last_epoch=-1, **kwargs)
List[float] _compute_values(self)
__init__(self, torch.optim.Optimizer optimizer, List[int] milestones, float gamma=0.1, float warmup_factor=0.001, int warmup_iters=1000, str warmup_method="linear", int last_epoch=-1, **kwargs)
float _get_warmup_factor_at_iter(str method, int iter, int warmup_iters, float warmup_factor)