Skip to content

zamba.models.slowfast_models

Classes

SlowFast

Bases: ZambaVideoClassificationLightningModule

Pretrained SlowFast model for fine-tuning with the following architecture:

Input -> SlowFast Base (including trainable Backbone) -> Res Basic Head -> Output

Attributes:

Name Type Description
backbone torch.nn.Module

When scheduling the backbone to train with the BackboneFinetune callback, this indicates the trainable part of the base.

base torch.nn.Module

The entire model prior to the head.

head torch.nn.Module

The trainable head.

_backbone_output_dim int

Dimensionality of the backbone output (and head input).

Source code in /home/runner/work/zamba/zamba/zamba/models/slowfast_models.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
@register_model
class SlowFast(ZambaVideoClassificationLightningModule):
    """Pretrained SlowFast model for fine-tuning with the following architecture:

    Input -> SlowFast Base (including trainable Backbone) -> Res Basic Head -> Output

    Attributes:
        backbone (torch.nn.Module): When scheduling the backbone to train with the
            `BackboneFinetune` callback, this indicates the trainable part of the base.
        base (torch.nn.Module): The entire model prior to the head.
        head (torch.nn.Module): The trainable head.
        _backbone_output_dim (int): Dimensionality of the backbone output (and head input).
    """

    _default_model_name = "slowfast"  # used to look up default configuration for checkpoints

    def __init__(
        self,
        backbone_mode: str = "train",
        post_backbone_dropout: Optional[float] = None,
        output_with_global_average: bool = True,
        head_dropout_rate: Optional[float] = None,
        head_hidden_layer_sizes: Optional[Tuple[int]] = None,
        finetune_from: Optional[Union[os.PathLike, str]] = None,
        **kwargs,
    ):
        """Initializes the SlowFast model.

        Args:
            backbone_mode (str): If "eval", treat the backbone as a feature extractor
                and set to evaluation mode in all forward passes.
            post_backbone_dropout (float, optional): Dropout that operates on the output of the
                backbone + pool (before the fully-connected layer in the head).
            output_with_global_average (bool): If True, apply an adaptive average pooling
                operation after the fully-connected layer in the head.
            head_dropout_rate (float, optional): Optional dropout rate applied after backbone and
                between projection layers in the head.
            head_hidden_layer_sizes (tuple of int): If not None, the size of hidden layers in the
                head multilayer perceptron.
            finetune_from (pathlike or str, optional): If not None, load an existing model from
                the path and resume training from an existing model.
        """
        super().__init__(**kwargs)

        if finetune_from is None:
            self.initialize_from_torchub()
        else:
            model = self.from_disk(finetune_from)
            self._backbone_output_dim = model.head.proj.in_features
            self.backbone = model.backbone
            self.base = model.base

        for param in self.base.parameters():
            param.requires_grad = False

        head = ResNetBasicHead(
            proj=build_multilayer_perceptron(
                self._backbone_output_dim,
                head_hidden_layer_sizes,
                self.num_classes,
                activation=torch.nn.ReLU,
                dropout=head_dropout_rate,
                output_activation=None,
            ),
            activation=None,
            pool=None,
            dropout=None
            if post_backbone_dropout is None
            else torch.nn.Dropout(post_backbone_dropout),
            output_pool=torch.nn.AdaptiveAvgPool3d(1),
        )

        self.backbone_mode = backbone_mode
        self.head = head

        self.save_hyperparameters(
            "backbone_mode",
            "head_dropout_rate",
            "head_hidden_layer_sizes",
            "output_with_global_average",
            "post_backbone_dropout",
        )

    def initialize_from_torchub(self):
        """Loads SlowFast model from torchhub and prepares ZambaVideoClassificationLightningModule
        by removing the head and setting the backbone and base."""

        # workaround for pytorch bug
        torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
        base = torch.hub.load(
            "facebookresearch/pytorchvideo:0.1.3", model="slowfast_r50", pretrained=True
        )
        self._backbone_output_dim = base.blocks[-1].proj.in_features

        base.blocks = base.blocks[:-1]  # Remove the pre-trained head

        # self.backbone attribute lets `BackboneFinetune` freeze and unfreeze that module
        self.backbone = base.blocks[-2:]
        self.base = base

    def forward(self, x, *args, **kwargs):
        if self.backbone_mode == "eval":
            self.base.eval()

        x = self.base(x)
        return self.head(x)

Attributes

backbone = model.backbone instance-attribute
backbone_mode = backbone_mode instance-attribute
base = model.base instance-attribute
head = head instance-attribute
lr = lr instance-attribute
model_class = type(self).__name__ instance-attribute
num_classes = len(species) instance-attribute
scheduler = torch.optim.lr_scheduler.__dict__[scheduler] instance-attribute
scheduler_params = scheduler_params instance-attribute
species = species instance-attribute
test_step_outputs = [] instance-attribute
training_step_outputs = [] instance-attribute
validation_step_outputs = [] instance-attribute

Functions

__init__(backbone_mode: str = 'train', post_backbone_dropout: Optional[float] = None, output_with_global_average: bool = True, head_dropout_rate: Optional[float] = None, head_hidden_layer_sizes: Optional[Tuple[int]] = None, finetune_from: Optional[Union[os.PathLike, str]] = None, **kwargs: Optional[Union[os.PathLike, str]])

Initializes the SlowFast model.

Parameters:

Name Type Description Default
backbone_mode str

If "eval", treat the backbone as a feature extractor and set to evaluation mode in all forward passes.

'train'
post_backbone_dropout float

Dropout that operates on the output of the backbone + pool (before the fully-connected layer in the head).

None
output_with_global_average bool

If True, apply an adaptive average pooling operation after the fully-connected layer in the head.

True
head_dropout_rate float

Optional dropout rate applied after backbone and between projection layers in the head.

None
head_hidden_layer_sizes tuple of int

If not None, the size of hidden layers in the head multilayer perceptron.

None
finetune_from pathlike or str

If not None, load an existing model from the path and resume training from an existing model.

None
Source code in /home/runner/work/zamba/zamba/zamba/models/slowfast_models.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def __init__(
    self,
    backbone_mode: str = "train",
    post_backbone_dropout: Optional[float] = None,
    output_with_global_average: bool = True,
    head_dropout_rate: Optional[float] = None,
    head_hidden_layer_sizes: Optional[Tuple[int]] = None,
    finetune_from: Optional[Union[os.PathLike, str]] = None,
    **kwargs,
):
    """Initializes the SlowFast model.

    Args:
        backbone_mode (str): If "eval", treat the backbone as a feature extractor
            and set to evaluation mode in all forward passes.
        post_backbone_dropout (float, optional): Dropout that operates on the output of the
            backbone + pool (before the fully-connected layer in the head).
        output_with_global_average (bool): If True, apply an adaptive average pooling
            operation after the fully-connected layer in the head.
        head_dropout_rate (float, optional): Optional dropout rate applied after backbone and
            between projection layers in the head.
        head_hidden_layer_sizes (tuple of int): If not None, the size of hidden layers in the
            head multilayer perceptron.
        finetune_from (pathlike or str, optional): If not None, load an existing model from
            the path and resume training from an existing model.
    """
    super().__init__(**kwargs)

    if finetune_from is None:
        self.initialize_from_torchub()
    else:
        model = self.from_disk(finetune_from)
        self._backbone_output_dim = model.head.proj.in_features
        self.backbone = model.backbone
        self.base = model.base

    for param in self.base.parameters():
        param.requires_grad = False

    head = ResNetBasicHead(
        proj=build_multilayer_perceptron(
            self._backbone_output_dim,
            head_hidden_layer_sizes,
            self.num_classes,
            activation=torch.nn.ReLU,
            dropout=head_dropout_rate,
            output_activation=None,
        ),
        activation=None,
        pool=None,
        dropout=None
        if post_backbone_dropout is None
        else torch.nn.Dropout(post_backbone_dropout),
        output_pool=torch.nn.AdaptiveAvgPool3d(1),
    )

    self.backbone_mode = backbone_mode
    self.head = head

    self.save_hyperparameters(
        "backbone_mode",
        "head_dropout_rate",
        "head_hidden_layer_sizes",
        "output_with_global_average",
        "post_backbone_dropout",
    )
aggregate_step_outputs(outputs: Dict[str, np.ndarray]) -> Tuple[np.ndarray, np.ndarray, np.ndarray] staticmethod
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
206
207
208
209
210
211
212
213
214
@staticmethod
def aggregate_step_outputs(
    outputs: Dict[str, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    y_true = np.vstack([output["y_true"] for output in outputs])
    y_pred = np.vstack([output["y_pred"] for output in outputs])
    y_proba = np.vstack([output["y_proba"] for output in outputs])

    return y_true, y_pred, y_proba
compute_and_log_metrics(y_true: np.ndarray, y_pred: np.ndarray, y_proba: np.ndarray, subset: str)
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
def compute_and_log_metrics(
    self, y_true: np.ndarray, y_pred: np.ndarray, y_proba: np.ndarray, subset: str
):
    self.log(
        f"{subset}_macro_f1",
        f1_score(y_true, y_pred, average="macro", zero_division=0),
    )

    # if only two classes, skip top_k accuracy since not enough classes
    if self.num_classes > 2:
        for k in DEFAULT_TOP_K:
            if k < self.num_classes:
                self.log(
                    f"{subset}_top_{k}_accuracy",
                    top_k_accuracy_score(
                        y_true.argmax(
                            axis=1
                        ),  # top k accuracy only supports single label case
                        y_proba,
                        labels=np.arange(y_proba.shape[1]),
                        k=k,
                    ),
                )
    else:
        self.log(f"{subset}_accuracy", accuracy_score(y_true, y_pred))

    for metric_name, label, metric in compute_species_specific_metrics(
        y_true, y_pred, self.species
    ):
        self.log(f"species/{subset}_{metric_name}/{label}", metric)
configure_optimizers()

Setup the Adam optimizer. Note, that this function also can return a lr scheduler, which is usually useful for training video models.

Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def configure_optimizers(self):
    """
    Setup the Adam optimizer. Note, that this function also can return a lr scheduler, which is
    usually useful for training video models.
    """
    optim = self._get_optimizer()

    if self.scheduler is None:
        return optim
    else:
        return {
            "optimizer": optim,
            "lr_scheduler": self.scheduler(
                optim, **({} if self.scheduler_params is None else self.scheduler_params)
            ),
        }
forward(x, *args, **kwargs)
Source code in /home/runner/work/zamba/zamba/zamba/models/slowfast_models.py
111
112
113
114
115
116
def forward(self, x, *args, **kwargs):
    if self.backbone_mode == "eval":
        self.base.eval()

    x = self.base(x)
    return self.head(x)
from_disk(path: os.PathLike, **kwargs: os.PathLike) classmethod
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
305
306
307
308
@classmethod
def from_disk(cls, path: os.PathLike, **kwargs):
    # note: we always load models onto CPU; moving to GPU is handled by `devices` in pl.Trainer
    return cls.load_from_checkpoint(path, map_location="cpu", **kwargs)
initialize_from_torchub()

Loads SlowFast model from torchhub and prepares ZambaVideoClassificationLightningModule by removing the head and setting the backbone and base.

Source code in /home/runner/work/zamba/zamba/zamba/models/slowfast_models.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
def initialize_from_torchub(self):
    """Loads SlowFast model from torchhub and prepares ZambaVideoClassificationLightningModule
    by removing the head and setting the backbone and base."""

    # workaround for pytorch bug
    torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
    base = torch.hub.load(
        "facebookresearch/pytorchvideo:0.1.3", model="slowfast_r50", pretrained=True
    )
    self._backbone_output_dim = base.blocks[-1].proj.in_features

    base.blocks = base.blocks[:-1]  # Remove the pre-trained head

    # self.backbone attribute lets `BackboneFinetune` freeze and unfreeze that module
    self.backbone = base.blocks[-2:]
    self.base = base
on_test_epoch_end()
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
259
260
261
262
def on_test_epoch_end(self):
    y_true, y_pred, y_proba = self.aggregate_step_outputs(self.test_step_outputs)
    self.compute_and_log_metrics(y_true, y_pred, y_proba, subset="test")
    self.test_step_outputs.clear()  # free memory
on_train_start()
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
162
163
164
165
166
167
168
169
170
171
172
173
def on_train_start(self):
    metrics = {"val_macro_f1": {}}

    if self.num_classes > 2:
        metrics.update(
            {f"val_top_{k}_accuracy": {} for k in DEFAULT_TOP_K if k < self.num_classes}
        )
    else:
        metrics.update({"val_accuracy": {}})

    # write hparams to hparams.yaml file, log metrics to tb hparams tab
    self.logger.log_hyperparams(self.hparams, metrics)
on_validation_epoch_end()

Aggregates validation_step outputs to compute and log the validation macro F1 and top K metrics.

Parameters:

Name Type Description Default
outputs List[dict]

list of output dictionaries from each validation step containing y_pred and y_true.

required
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
247
248
249
250
251
252
253
254
255
256
257
def on_validation_epoch_end(self):
    """Aggregates validation_step outputs to compute and log the validation macro F1 and top K
    metrics.

    Args:
        outputs (List[dict]): list of output dictionaries from each validation step
            containing y_pred and y_true.
    """
    y_true, y_pred, y_proba = self.aggregate_step_outputs(self.validation_step_outputs)
    self.compute_and_log_metrics(y_true, y_pred, y_proba, subset="val")
    self.validation_step_outputs.clear()  # free memory
predict_step(batch, batch_idx, dataloader_idx: Optional[int] = None)
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
264
265
266
267
268
def predict_step(self, batch, batch_idx, dataloader_idx: Optional[int] = None):
    x, y = batch
    y_hat = self(x)
    pred = torch.sigmoid(y_hat).cpu().numpy()
    return pred
test_step(batch, batch_idx)
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
201
202
203
204
def test_step(self, batch, batch_idx):
    output = self._val_step(batch, batch_idx)
    self.test_step_outputs.append(output)
    return output
to_disk(path: os.PathLike)

Save out model weights to a checkpoint file on disk.

Note: this does not include callbacks, optimizer_states, or lr_schedulers. To include those, use Trainer.save_checkpoint() instead.

Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def to_disk(self, path: os.PathLike):
    """Save out model weights to a checkpoint file on disk.

    Note: this does not include callbacks, optimizer_states, or lr_schedulers.
    To include those, use `Trainer.save_checkpoint()` instead.
    """

    checkpoint = {
        "state_dict": self.state_dict(),
        "hyper_parameters": self.hparams,
        "global_step": self.global_step,
        "pytorch-lightning_version": pl.__version__,
    }
    torch.save(checkpoint, path)
training_step(batch, batch_idx)
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
175
176
177
178
179
180
181
def training_step(self, batch, batch_idx):
    x, y = batch
    y_hat = self(x)
    loss = F.binary_cross_entropy_with_logits(y_hat, y)
    self.log("train_loss", loss.detach())
    self.training_step_outputs.append(loss)
    return loss
validation_step(batch, batch_idx)
Source code in /home/runner/work/zamba/zamba/zamba/pytorch_lightning/utils.py
196
197
198
199
def validation_step(self, batch, batch_idx):
    output = self._val_step(batch, batch_idx)
    self.validation_step_outputs.append(output)
    return output

Functions