Skip to content

log_writer

allennlp.training.callbacks.log_writer

[SOURCE]


LogWriterCallback

class LogWriterCallback(TrainerCallback):
 | def __init__(
 |     self,
 |     serialization_dir: str,
 |     summary_interval: int = 100,
 |     distribution_interval: Optional[int] = None,
 |     batch_size_interval: Optional[int] = None,
 |     should_log_parameter_statistics: bool = True,
 |     should_log_learning_rate: bool = False,
 |     batch_loss_moving_average_count: int = 100
 | ) -> None

An abstract baseclass for callbacks that Log training statistics and metrics. Examples of concrete implementations are the TensorBoardCallback and WandBCallback.

Parameters

  • serialization_dir : str
    The training serialization directory.

    In a typical AllenNLP configuration file, this parameter does not get an entry in the file, it gets passed in separately.

  • summary_interval : int, optional (default = 100)
    Most statistics will be written out only every this many batches.

  • distribution_interval : int, optional (default = None)
    When this parameter is specified, the following additional logging is enabled every this many batches:

    * Distributions of model parameters
    * The ratio of parameter update norm to parameter norm
    * Distribution of layer activations
    

    The layer activations are logged for any modules in the Model that have the attribute should_log_activations set to True.

    Logging distributions requires a number of GPU-CPU copies during training and is typically slow, so we recommend logging distributions relatively infrequently.

    Note

    Only Modules that return tensors, tuples of tensors or dicts with tensors as values currently support activation logging.

  • batch_size_interval : int, optional (default = None)
    If defined, how often to log the average batch size.

  • should_log_parameter_statistics : bool, optional (default = True)
    Whether to log parameter statistics (mean and standard deviation of parameters and gradients). If True, parameter stats are logged every summary_interval batches.

  • should_log_learning_rate : bool, optional (default = False)
    Whether to log (parameter-specific) learning rate. If True, learning rates are logged every summary_interval batches.

  • batch_loss_moving_average_count : int, optional (default = 100)
    The length of the moving average for batch loss.

log_scalars

class LogWriterCallback(TrainerCallback):
 | ...
 | def log_scalars(
 |     self,
 |     scalars: Dict[str, Union[int, float]],
 |     log_prefix: str = "",
 |     epoch: Optional[int] = None
 | ) -> None

Required to be implemented by subclasses.

Defines how batch or epoch scalar metrics are logged.

log_tensors

class LogWriterCallback(TrainerCallback):
 | ...
 | def log_tensors(
 |     self,
 |     tensors: Dict[str, torch.Tensor],
 |     log_prefix: str = "",
 |     epoch: Optional[int] = None
 | ) -> None

Required to be implemented by subclasses.

Defines how batch or epoch tensor metrics are logged.

log_inputs

class LogWriterCallback(TrainerCallback):
 | ...
 | def log_inputs(
 |     self,
 |     inputs: List[TensorDict],
 |     log_prefix: str = ""
 | ) -> None

Can be optionally implemented by subclasses.

Defines how batch inputs are logged. This is called once at the start of each epoch.

close

class LogWriterCallback(TrainerCallback):
 | ...
 | def close(self) -> None

Called at the end of training to remove any module hooks and close out any other logging resources.

on_start

class LogWriterCallback(TrainerCallback):
 | ...
 | def on_start(
 |     self,
 |     trainer: "GradientDescentTrainer",
 |     is_primary: bool = True,
 |     **kwargs
 | ) -> None

on_batch

class LogWriterCallback(TrainerCallback):
 | ...
 | def on_batch(
 |     self,
 |     trainer: "GradientDescentTrainer",
 |     batch_inputs: List[TensorDict],
 |     batch_outputs: List[Dict[str, Any]],
 |     batch_metrics: Dict[str, Any],
 |     epoch: int,
 |     batch_number: int,
 |     is_training: bool,
 |     is_primary: bool = True,
 |     batch_grad_norm: Optional[float] = None,
 |     **kwargs
 | ) -> None

on_epoch

class LogWriterCallback(TrainerCallback):
 | ...
 | def on_epoch(
 |     self,
 |     trainer: "GradientDescentTrainer",
 |     metrics: Dict[str, Any],
 |     epoch: int,
 |     is_primary: bool = True,
 |     **kwargs
 | ) -> None

on_end

class LogWriterCallback(TrainerCallback):
 | ...
 | def on_end(
 |     self,
 |     trainer: "GradientDescentTrainer",
 |     metrics: Dict[str, Any] = None,
 |     epoch: int = None,
 |     is_primary: bool = True,
 |     **kwargs
 | ) -> None

log_batch

class LogWriterCallback(TrainerCallback):
 | ...
 | def log_batch(
 |     self,
 |     batch_grad_norm: Optional[float],
 |     metrics: Dict[str, float],
 |     batch_group: List[TensorDict],
 |     param_updates: Optional[Dict[str, torch.Tensor]],
 |     batch_number: int
 | ) -> None

Called every batch to perform all of the logging that is due.

log_epoch

class LogWriterCallback(TrainerCallback):
 | ...
 | def log_epoch(
 |     self,
 |     train_metrics: Dict[str, Any],
 |     val_metrics: Dict[str, Any],
 |     epoch: int
 | ) -> None

Called at the end of every epoch to log training and validation metrics.