Skip to content

zamba.pytorch_lightning.utils

ZambaVideoClassificationLightningModule

Bases: LightningModule

Source code in zamba/pytorch_lightning/utils.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
class ZambaVideoClassificationLightningModule(LightningModule):
    def __init__(
        self,
        species: List[str],
        lr: float = 1e-3,
        scheduler: Optional[str] = None,
        scheduler_params: Optional[dict] = None,
        **kwargs,
    ):
        super().__init__()
        if (scheduler is None) and (scheduler_params is not None):
            warnings.warn(
                "scheduler_params provided without scheduler. scheduler_params will have no effect."
            )

        self.lr = lr
        self.species = species
        self.num_classes = len(species)

        if scheduler is not None:
            self.scheduler = torch.optim.lr_scheduler.__dict__[scheduler]
        else:
            self.scheduler = scheduler

        self.scheduler_params = scheduler_params
        self.model_class = type(self).__name__

        self.save_hyperparameters("lr", "scheduler", "scheduler_params", "species")
        self.hparams["model_class"] = self.model_class

        self.training_step_outputs = []
        self.validation_step_outputs = []
        self.test_step_outputs = []

    def forward(self, x):
        return self.model(x)

    def on_train_start(self):
        metrics = {"val_macro_f1": {}}

        if self.num_classes > 2:
            metrics.update(
                {f"val_top_{k}_accuracy": {} for k in DEFAULT_TOP_K if k < self.num_classes}
            )
        else:
            metrics.update({"val_accuracy": {}})

        # write hparams to hparams.yaml file, log metrics to tb hparams tab
        self.logger.log_hyperparams(self.hparams, metrics)

    def training_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.binary_cross_entropy_with_logits(y_hat, y)
        self.log("train_loss", loss.detach())
        self.training_step_outputs.append(loss)
        return loss

    def _val_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.binary_cross_entropy_with_logits(y_hat, y)
        self.log("val_loss", loss.detach())

        y_proba = torch.sigmoid(y_hat.cpu()).numpy()
        return {
            "y_true": y.cpu().numpy().astype(int),
            "y_pred": y_proba.round().astype(int),
            "y_proba": y_proba,
        }

    def validation_step(self, batch, batch_idx):
        output = self._val_step(batch, batch_idx)
        self.validation_step_outputs.append(output)
        return output

    def test_step(self, batch, batch_idx):
        output = self._val_step(batch, batch_idx)
        self.test_step_outputs.append(output)
        return output

    @staticmethod
    def aggregate_step_outputs(
        outputs: Dict[str, np.ndarray]
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        y_true = np.vstack([output["y_true"] for output in outputs])
        y_pred = np.vstack([output["y_pred"] for output in outputs])
        y_proba = np.vstack([output["y_proba"] for output in outputs])

        return y_true, y_pred, y_proba

    def compute_and_log_metrics(
        self, y_true: np.ndarray, y_pred: np.ndarray, y_proba: np.ndarray, subset: str
    ):
        self.log(
            f"{subset}_macro_f1",
            f1_score(y_true, y_pred, average="macro", zero_division=0),
        )

        # if only two classes, skip top_k accuracy since not enough classes
        if self.num_classes > 2:
            for k in DEFAULT_TOP_K:
                if k < self.num_classes:
                    self.log(
                        f"{subset}_top_{k}_accuracy",
                        top_k_accuracy_score(
                            y_true.argmax(
                                axis=1
                            ),  # top k accuracy only supports single label case
                            y_proba,
                            labels=np.arange(y_proba.shape[1]),
                            k=k,
                        ),
                    )
        else:
            self.log(f"{subset}_accuracy", accuracy_score(y_true, y_pred))

        for metric_name, label, metric in compute_species_specific_metrics(
            y_true, y_pred, self.species
        ):
            self.log(f"species/{subset}_{metric_name}/{label}", metric)

    def on_validation_epoch_end(self):
        """Aggregates validation_step outputs to compute and log the validation macro F1 and top K
        metrics.

        Args:
            outputs (List[dict]): list of output dictionaries from each validation step
                containing y_pred and y_true.
        """
        y_true, y_pred, y_proba = self.aggregate_step_outputs(self.validation_step_outputs)
        self.compute_and_log_metrics(y_true, y_pred, y_proba, subset="val")
        self.validation_step_outputs.clear()  # free memory

    def on_test_epoch_end(self):
        y_true, y_pred, y_proba = self.aggregate_step_outputs(self.test_step_outputs)
        self.compute_and_log_metrics(y_true, y_pred, y_proba, subset="test")
        self.test_step_outputs.clear()  # free memory

    def predict_step(self, batch, batch_idx, dataloader_idx: Optional[int] = None):
        x, y = batch
        y_hat = self(x)
        pred = torch.sigmoid(y_hat).cpu().numpy()
        return pred

    def _get_optimizer(self):
        return torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=self.lr)

    def configure_optimizers(self):
        """
        Setup the Adam optimizer. Note, that this function also can return a lr scheduler, which is
        usually useful for training video models.
        """
        optim = self._get_optimizer()

        if self.scheduler is None:
            return optim
        else:
            return {
                "optimizer": optim,
                "lr_scheduler": self.scheduler(
                    optim, **({} if self.scheduler_params is None else self.scheduler_params)
                ),
            }

    def to_disk(self, path: os.PathLike):
        """Save out model weights to a checkpoint file on disk.

        Note: this does not include callbacks, optimizer_states, or lr_schedulers.
        To include those, use `Trainer.save_checkpoint()` instead.
        """

        checkpoint = {
            "state_dict": self.state_dict(),
            "hyper_parameters": self.hparams,
            "global_step": self.global_step,
            "pytorch-lightning_version": pl.__version__,
        }
        torch.save(checkpoint, path)

    @classmethod
    def from_disk(cls, path: os.PathLike, **kwargs):
        # note: we always load models onto CPU; moving to GPU is handled by `devices` in pl.Trainer
        return cls.load_from_checkpoint(path, map_location="cpu", **kwargs)

configure_optimizers()

Setup the Adam optimizer. Note, that this function also can return a lr scheduler, which is usually useful for training video models.

Source code in zamba/pytorch_lightning/utils.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def configure_optimizers(self):
    """
    Setup the Adam optimizer. Note, that this function also can return a lr scheduler, which is
    usually useful for training video models.
    """
    optim = self._get_optimizer()

    if self.scheduler is None:
        return optim
    else:
        return {
            "optimizer": optim,
            "lr_scheduler": self.scheduler(
                optim, **({} if self.scheduler_params is None else self.scheduler_params)
            ),
        }

on_validation_epoch_end()

Aggregates validation_step outputs to compute and log the validation macro F1 and top K metrics.

Parameters:

Name Type Description Default
outputs List[dict]

list of output dictionaries from each validation step containing y_pred and y_true.

required
Source code in zamba/pytorch_lightning/utils.py
247
248
249
250
251
252
253
254
255
256
257
def on_validation_epoch_end(self):
    """Aggregates validation_step outputs to compute and log the validation macro F1 and top K
    metrics.

    Args:
        outputs (List[dict]): list of output dictionaries from each validation step
            containing y_pred and y_true.
    """
    y_true, y_pred, y_proba = self.aggregate_step_outputs(self.validation_step_outputs)
    self.compute_and_log_metrics(y_true, y_pred, y_proba, subset="val")
    self.validation_step_outputs.clear()  # free memory

to_disk(path)

Save out model weights to a checkpoint file on disk.

Note: this does not include callbacks, optimizer_states, or lr_schedulers. To include those, use Trainer.save_checkpoint() instead.

Source code in zamba/pytorch_lightning/utils.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def to_disk(self, path: os.PathLike):
    """Save out model weights to a checkpoint file on disk.

    Note: this does not include callbacks, optimizer_states, or lr_schedulers.
    To include those, use `Trainer.save_checkpoint()` instead.
    """

    checkpoint = {
        "state_dict": self.state_dict(),
        "hyper_parameters": self.hparams,
        "global_step": self.global_step,
        "pytorch-lightning_version": pl.__version__,
    }
    torch.save(checkpoint, path)