Skip to content

tensorboard_writer

allennlp.training.tensorboard_writer

[SOURCE]


TensorboardWriter#

class TensorboardWriter(FromParams):
 | def __init__(
 |     self,
 |     serialization_dir: Optional[str] = None,
 |     summary_interval: int = 100,
 |     histogram_interval: int = None,
 |     batch_size_interval: Optional[int] = None,
 |     should_log_parameter_statistics: bool = True,
 |     should_log_learning_rate: bool = False,
 |     get_batch_num_total: Callable[[], int] = None
 | ) -> None

Class that handles Tensorboard (and other) logging.

Parameters

  • serialization_dir : str, optional (default = None)
    If provided, this is where the Tensorboard logs will be written.

    In a typical AllenNLP configuration file, this parameter does not get an entry under the "tensorboard_writer", it gets passed in separately. - summary_interval : int, optional (default = 100)
    Most statistics will be written out only every this many batches. - histogram_interval : int, optional (default = None)
    If provided, activation histograms will be written out every this many batches. If None, activation histograms will not be written out. When this parameter is specified, the following additional logging is enabled: * Histograms of model parameters * The ratio of parameter update norm to parameter norm * Histogram of layer activations We log histograms of the parameters returned by model.get_parameters_for_histogram_tensorboard_logging. The layer activations are logged for any modules in the Model that have the attribute should_log_activations set to True. Logging histograms requires a number of GPU-CPU copies during training and is typically slow, so we recommend logging histograms relatively infrequently. Note: only Modules that return tensors, tuples of tensors or dicts with tensors as values currently support activation logging. - batch_size_interval : int, optional (default = None)
    If defined, how often to log the average batch size. - should_log_parameter_statistics : bool, optional (default = True)
    Whether to log parameter statistics (mean and standard deviation of parameters and gradients). - should_log_learning_rate : bool, optional (default = False)
    Whether to log (parameter-specific) learning rate. - get_batch_num_total : Callable[[], int], optional (default = None)
    A thunk that returns the number of batches so far. Most likely this will be a closure around an instance variable in your Trainer class. Because of circular dependencies in constructing this object and the Trainer, this is typically None when you construct the object, but it gets set inside the constructor of our Trainer.

log_memory_usage#

class TensorboardWriter(FromParams):
 | ...
 | def log_memory_usage(
 |     self,
 |     cpu_memory_usage: Dict[int, int],
 |     gpu_memory_usage: Dict[int, int]
 | )

log_batch#

class TensorboardWriter(FromParams):
 | ...
 | def log_batch(
 |     self,
 |     model: Model,
 |     optimizer: Optimizer,
 |     batch_grad_norm: Optional[float],
 |     metrics: Dict[str, float],
 |     batch_group: List[List[TensorDict]],
 |     param_updates: Optional[Dict[str, torch.Tensor]]
 | ) -> None

reset_epoch#

class TensorboardWriter(FromParams):
 | ...
 | def reset_epoch(self) -> None

should_log_this_batch#

class TensorboardWriter(FromParams):
 | ...
 | def should_log_this_batch(self) -> bool

should_log_histograms_this_batch#

class TensorboardWriter(FromParams):
 | ...
 | def should_log_histograms_this_batch(self) -> bool

add_train_scalar#

class TensorboardWriter(FromParams):
 | ...
 | def add_train_scalar(
 |     self,
 |     name: str,
 |     value: float,
 |     timestep: int = None
 | ) -> None

add_train_histogram#

class TensorboardWriter(FromParams):
 | ...
 | def add_train_histogram(self, name: str, values: torch.Tensor) -> None

add_validation_scalar#

class TensorboardWriter(FromParams):
 | ...
 | def add_validation_scalar(
 |     self,
 |     name: str,
 |     value: float,
 |     timestep: int = None
 | ) -> None

log_parameter_and_gradient_statistics#

class TensorboardWriter(FromParams):
 | ...
 | def log_parameter_and_gradient_statistics(
 |     self,
 |     model: Model,
 |     batch_grad_norm: float = None
 | ) -> None

Send the mean and std of all parameters and gradients to tensorboard, as well as logging the average gradient norm.

log_learning_rates#

class TensorboardWriter(FromParams):
 | ...
 | def log_learning_rates(self, model: Model, optimizer: Optimizer)

Send current parameter specific learning rates to tensorboard

log_histograms#

class TensorboardWriter(FromParams):
 | ...
 | def log_histograms(self, model: Model) -> None

Send histograms of parameters to tensorboard.

log_gradient_updates#

class TensorboardWriter(FromParams):
 | ...
 | def log_gradient_updates(
 |     self,
 |     model: Model,
 |     param_updates: Dict[str, torch.Tensor]
 | ) -> None

log_metrics#

class TensorboardWriter(FromParams):
 | ...
 | def log_metrics(
 |     self,
 |     train_metrics: dict,
 |     val_metrics: dict = None,
 |     epoch: int = None,
 |     log_to_console: bool = False
 | ) -> None

Sends all of the train metrics (and validation metrics, if provided) to tensorboard.

enable_activation_logging#

class TensorboardWriter(FromParams):
 | ...
 | def enable_activation_logging(self, model: Model) -> None

log_activation_histogram#

class TensorboardWriter(FromParams):
 | ...
 | def log_activation_histogram(self, outputs, log_prefix: str) -> None

close#

class TensorboardWriter(FromParams):
 | ...
 | def close(self) -> None

Calls the close method of the SummaryWriter s which makes sure that pending scalars are flushed to disk and the tensorboard event files are closed properly.