Scalable AI Models with PyTorch Lightning
Sergiy Tkachuk
Director, GenAI Productivity
def validation_step(self, batch, batch_idx): x, y = batch preds = self(x) loss = F.cross_entropy(preds, y) self.log('val_loss', loss)
def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() self.log('avg_val_loss', avg_loss)
def test_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.cross_entropy(y_hat, y) self.log('test_loss', loss)
def test_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() self.log('avg_test_loss', avg_loss)
$$
from torchmetrics import Accuracy class BaseModel(pl.LightningModule): def __init__(self): super().__init__() self.accuracy = Accuracy()
def validation_step(self, batch, batch_idx): x, y = batch preds = self(x) acc = self.accuracy(preds, y) self.log('val_acc', acc)
Data logic centralized in DataModule
Consistent train/val/test data splits
Automatic validation metric logging
Reproducible pipeline from prep to reporting
Scalable AI Models with PyTorch Lightning