Skip to content

Reference for ultralytics/utils/callbacks/tensorboard.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/tensorboard.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.utils.callbacks.tensorboard._log_scalars

_log_scalars(scalars, step=0)

Logs scalar values to TensorBoard.

Source code in ultralytics/utils/callbacks/tensorboard.py
def _log_scalars(scalars, step=0):
    """Logs scalar values to TensorBoard."""
    if WRITER:
        for k, v in scalars.items():
            WRITER.add_scalar(k, v, step)





ultralytics.utils.callbacks.tensorboard._log_tensorboard_graph

_log_tensorboard_graph(trainer)

Log model graph to TensorBoard.

Source code in ultralytics/utils/callbacks/tensorboard.py
def _log_tensorboard_graph(trainer):
    """Log model graph to TensorBoard."""
    # Input image
    imgsz = trainer.args.imgsz
    imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
    p = next(trainer.model.parameters())  # for device, type
    im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype)  # input image (must be zeros, not empty)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)  # suppress jit trace warning
        warnings.simplefilter("ignore", category=torch.jit.TracerWarning)  # suppress jit trace warning

        # Try simple method first (YOLO)
        try:
            trainer.model.eval()  # place in .eval() mode to avoid BatchNorm statistics changes
            WRITER.add_graph(torch.jit.trace(de_parallel(trainer.model), im, strict=False), [])
            LOGGER.info(f"{PREFIX}model graph visualization added ✅")
            return

        except:  # noqa E722
            # Fallback to TorchScript export steps (RTDETR)
            try:
                model = deepcopy(de_parallel(trainer.model))
                model.eval()
                model = model.fuse(verbose=False)
                for m in model.modules():
                    if hasattr(m, "export"):  # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
                        m.export = True
                        m.format = "torchscript"
                model(im)  # dry run
                WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
                LOGGER.info(f"{PREFIX}model graph visualization added ✅")
            except Exception as e:
                LOGGER.warning(f"{PREFIX}WARNING ⚠️ TensorBoard graph visualization failure {e}")





ultralytics.utils.callbacks.tensorboard.on_pretrain_routine_start

on_pretrain_routine_start(trainer)

Initialize TensorBoard logging with SummaryWriter.

Source code in ultralytics/utils/callbacks/tensorboard.py
def on_pretrain_routine_start(trainer):
    """Initialize TensorBoard logging with SummaryWriter."""
    if SummaryWriter:
        try:
            global WRITER
            WRITER = SummaryWriter(str(trainer.save_dir))
            LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
        except Exception as e:
            LOGGER.warning(f"{PREFIX}WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}")





ultralytics.utils.callbacks.tensorboard.on_train_start

on_train_start(trainer)

Log TensorBoard graph.

Source code in ultralytics/utils/callbacks/tensorboard.py
def on_train_start(trainer):
    """Log TensorBoard graph."""
    if WRITER:
        _log_tensorboard_graph(trainer)





ultralytics.utils.callbacks.tensorboard.on_train_epoch_end

on_train_epoch_end(trainer)

Logs scalar statistics at the end of a training epoch.

Source code in ultralytics/utils/callbacks/tensorboard.py
def on_train_epoch_end(trainer):
    """Logs scalar statistics at the end of a training epoch."""
    _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
    _log_scalars(trainer.lr, trainer.epoch + 1)





ultralytics.utils.callbacks.tensorboard.on_fit_epoch_end

on_fit_epoch_end(trainer)

Logs epoch metrics at end of training epoch.

Source code in ultralytics/utils/callbacks/tensorboard.py
def on_fit_epoch_end(trainer):
    """Logs epoch metrics at end of training epoch."""
    _log_scalars(trainer.metrics, trainer.epoch + 1)




📅 Created 11 months ago ✏️ Updated 1 month ago