allennlp.training.learning_rate_schedulers.learning_rate_scheduler#

ExponentialLearningRateScheduler#

ExponentialLearningRateScheduler(
    self,
    optimizer: allennlp.training.optimizers.Optimizer,
    gamma: float = 0.1,
    last_epoch: int = -1,
) -> None

Registered as a LearningRateScheduler with name "exponential".

MultiStepLearningRateScheduler#

MultiStepLearningRateScheduler(
    self,
    optimizer: allennlp.training.optimizers.Optimizer,
    milestones: List[int],
    gamma: float = 0.1,
    last_epoch: int = -1,
) -> None

Registered as a LearningRateScheduler with name "multi_step".

ReduceOnPlateauLearningRateScheduler#

ReduceOnPlateauLearningRateScheduler(
    self,
    optimizer: allennlp.training.optimizers.Optimizer,
    mode: str = 'min',
    factor: float = 0.1,
    patience: int = 10,
    verbose: bool = False,
    threshold_mode: str = 'rel',
    threshold: float = 0.0001,
    cooldown: int = 0,
    min_lr: Union[float, List[float]] = 0,
    eps: float = 1e-08,
) -> None

Registered as a LearningRateScheduler with name "reduce_on_plateau".

StepLearningRateScheduler#

StepLearningRateScheduler(
    self,
    optimizer: allennlp.training.optimizers.Optimizer,
    step_size: int,
    gamma: float = 0.1,
    last_epoch: int = -1,
) -> None

Registered as a LearningRateScheduler with name "step".