Chapter 12: Examples

A few examples created using babygrad.

12.1 Simple MNIST

data/ folder consists of :

  • t10k-images-idx3-ubyte.gz
  • t10k-labels-idx1-ubyte.gz
  • train-images-idx3-ubyte.gz
  • train-labels-idx1-ubyte.gz

File : examples/simple_mnist.py


"""
A simple example of training a two-layer neural network on MNIST
"""
import struct
import gzip
import numpy as np
from babygrad import Tensor, ops


def parse_mnist(image_filename, label_filename):
    """
    Args:
        image_filename (str): Path to the gzipped image file.
        label_filename (str): Path to the gzipped label file.

    Returns:
        Tuple (X, y):
            X (np.ndarray): Images as a (num_examples, 784) array.
            y (np.ndarray): Labels as a (num_examples,) array.
    """
    with gzip.open(image_filename, 'rb') as f:
        magic, num_images, rows, cols = struct.unpack('>IIII', f.read(16))
        image_data = np.frombuffer(f.read(), dtype=np.uint8)
        images = image_data.reshape(num_images, rows * cols)

    with gzip.open(label_filename, "rb") as f:
        magic, num_labels = struct.unpack('>II', f.read(8))
        labels = np.frombuffer(f.read(), dtype=np.uint8)

    normalized_images = images.astype(np.float32) / 255.0
    return normalized_images, labels


class SimpleNN:
    def __init__(self, input_size, hidden_size, num_classes):
        self.W1 = Tensor(np.random.randn(input_size, hidden_size)
                    .astype(np.float32) / np.sqrt(hidden_size))
        self.W2 = Tensor(np.random.randn(hidden_size, num_classes)
                    .astype(np.float32) / np.sqrt(num_classes))

    def forward(self, x: Tensor) -> Tensor:
        z1 = x @ self.W1 # (8,784) @ (784, 100) -> (8,100)
        a1 = ops.relu(z1)
        logits = a1 @ self.W2 #  (8,100) @ (100,10) -> (8,10)
        return logits
    def parameters(self):
        return [self.W1, self.W2]

def softmax_loss(logits: Tensor, y_true: Tensor) -> Tensor:
    """
    Args:
        logits (Tensor): The raw output scores from the model.
        y_true (Tensor): The one-hot encoded true labels.
    Returns:
        A scalar tensor representing the average loss.
    """
    batch_size = logits.shape[0]
    log_sum_exp = ops.log(ops.exp(logits).sum(axes=1))
    z_y = (logits * y_true).sum(axes=1)
    loss = log_sum_exp - z_y
    return loss.sum() / batch_size


def train_epoch(model: SimpleNN, X_train: np.ndarray, y_train: np.ndarray,
                 lr: float, batch_size: int):
    num_examples = X_train.shape[0]
    for i in range(0, num_examples, batch_size):
        # Get a batch of data
        x_batch_np = X_train[i:i+batch_size]
        y_batch_np = y_train[i:i+batch_size]

        x_batch = Tensor(x_batch_np)

        logits = model.forward(x_batch)

        num_classes = logits.shape[1]
        y_one_hot_np = np.zeros((y_batch_np.shape[0], num_classes),
                         dtype=np.float32)
        y_one_hot_np[np.arange(y_batch_np.shape[0]), y_batch_np] = 1
        y_one_hot = Tensor(y_one_hot_np)

        loss = softmax_loss(logits, y_one_hot)

        for p in model.parameters():
            p.grad = None
        loss.backward()
        for p in model.parameters():
            p.data = p.data - lr * p.grad
        preds = logits.data.argmax(axis=1)
        acc = np.mean(preds == y_batch_np)

        print(f"Batch {i//batch_size+1:3d}: Loss = {loss.data:.4f},
                 Accuracy = {acc*100:.2f}%")
if __name__ == "__main__":
    EPOCHS = 3
    LEARNING_RATE = 0.1
    BATCH_SIZE = 8
    INPUT_SIZE = 784
    HIDDEN_SIZE = 100
    NUM_CLASSES = 10

    print("Loading MNIST data...")
    X_train, y_train = parse_mnist("data/train-images-idx3-ubyte.gz",
                         "data/train-labels-idx1-ubyte.gz")
    X_test, y_test = parse_mnist("data/t10k-images-idx3-ubyte.gz",
                     "data/t10k-labels-idx1-ubyte.gz")
    print("Data loaded.\n")

    np.random.seed(42)
    model = SimpleNN(INPUT_SIZE, HIDDEN_SIZE, NUM_CLASSES)

    for epoch in range(EPOCHS):
        print(f"--- Epoch {epoch+1}/{EPOCHS} ---")
        train_epoch(model, X_train, y_train, LEARNING_RATE, BATCH_SIZE)
        print("-" * 20)
  Batch  13: Loss = 0.2163, Accuracy = 96.09%
  Batch  14: Loss = 0.1742, Accuracy = 96.09%
  Batch  15: Loss = 0.1630, Accuracy = 96.88%
  Batch  16: Loss = 0.1862, Accuracy = 95.31%
  Batch  17: Loss = 0.1637, Accuracy = 96.09%
  Batch  18: Loss = 0.1812, Accuracy = 95.31%
  Batch  19: Loss = 0.2156, Accuracy = 94.53%
  Batch  20: Loss = 0.1259, Accuracy = 99.22%

12.2 Decent MNIST

import numpy as np
from baby import Tensor
from baby.optim import Adam
from baby.nn import Dropout, Module, Linear, ReLU,
Sequential, SoftmaxLoss, LayerNorm1d, BatchNorm1d
from baby.data import MNISTDataset, DataLoader

def train_epoch(model: Module, loss_fn: Module, optimizer: Adam,
             train_loader: DataLoader):
    for batch_idx, (x_batch, y_batch) in enumerate(train_loader):
        logits = model(x_batch)
        loss = loss_fn(logits, y_batch.data)

        optimizer.reset_grad()
        loss.backward()
        optimizer.step()

        preds = logits.data.argmax(axis=1)
        acc = np.mean(preds == y_batch.data)

        print(f"Batch {batch_idx+1:3d}: Loss = {loss.data:.4f},
                 Accuracy = {acc*100:.2f}%")

def evaluate(model: Module, test_loader: DataLoader):
    model.eval()
    total_correct = 0
    total_count = 0
    for x_batch, y_batch in test_loader:
        logits = model(x_batch)
        preds = logits.data.argmax(axis=1)
        total_correct += np.sum(preds == y_batch.data)
        total_count += y_batch.data.shape[0]
    return total_correct / total_count


if __name__ == "__main__":
    EPOCHS = 5
    LEARNING_RATE = 0.001
    BATCH_SIZE = 128
    INPUT_SIZE = 784
    HIDDEN_SIZE = 100
    NUM_CLASSES = 10

    print("Loading MNIST data via Dataset...")
    train_dataset = MNISTDataset("data/train-images-idx3-ubyte.gz",
                     "data/train-labels-idx1-ubyte.gz")
    test_dataset = MNISTDataset("data/t10k-images-idx3-ubyte.gz",
                     "data/t10k-labels-idx1-ubyte.gz")

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                     shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                 shuffle=False)
    print("Data loaded.\n")

    np.random.seed(42)

    model = Sequential(
        Flatten(),
        Linear(INPUT_SIZE, HIDDEN_SIZE),
        BatchNorm1d(HIDDEN_SIZE),
        ReLU(),
        Dropout(),
        Linear(HIDDEN_SIZE, NUM_CLASSES)
    )

    loss_fn = SoftmaxLoss()
    optimizer = Adam(params=model.parameters(), lr=LEARNING_RATE)

    for epoch in range(EPOCHS):
        model.train()
        print(f"\n--- Epoch {epoch+1}/{EPOCHS} ---")
        train_epoch(model, loss_fn, optimizer, train_loader)
        test_acc = evaluate(model, test_loader)
        print(f"--- Test Accuracy = {test_acc*100:.2f}% ---")
        print("-" * 40)

12.3 Better MNIST

import numpy as np
from baby import Tensor
from baby.optim import Adam
from baby.nn import Dropout, Flatten, Linear,
 ReLU, Sequential, SoftmaxLoss, BatchNorm1d
from baby.data import MNISTDataset, DataLoader
from baby.trainer import Trainer

if __name__ == "__main__":
    EPOCHS = 5
    LEARNING_RATE = 0.001
    BATCH_SIZE = 128
    INPUT_SIZE = 784
    HIDDEN_SIZE = 100
    NUM_CLASSES = 10

    print("Loading MNIST data via Dataset...")
    train_dataset = MNISTDataset("data/train-images-idx3-ubyte.gz",
                 "data/train-labels-idx1-ubyte.gz")
    test_dataset = MNISTDataset("data/t10k-images-idx3-ubyte.gz",
                 "data/t10k-labels-idx1-ubyte.gz")

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                    shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                     shuffle=False)
    print("Data loaded.\n")

    np.random.seed(42)
    model = Sequential(
        Flatten(),
        Linear(INPUT_SIZE, HIDDEN_SIZE),
        BatchNorm1d(HIDDEN_SIZE),
        ReLU(),
        Dropout(),
        Linear(HIDDEN_SIZE, NUM_CLASSES)
    )

    loss_fn = SoftmaxLoss()
    optimizer = Adam(params=model.parameters(), lr=LEARNING_RATE)

    trainer = Trainer(model, optimizer, loss_fn, train_loader,
             val_loader=test_loader)
    trainer.fit(EPOCHS)

12.4 Simple CNN

In nn.Conv I mentioned that most datasets use the NCHW format. So we used transpose before calling op.Conv.

But in MNISTDataset We are returning NHWC format. That means we need to tranpose NHWC to NCHW and then feed the data into our model.

CIFAR10 Will use NCHW only.

import numpy as np
from baby import Tensor
from baby.optim import Adam
from baby.nn import Module, Conv, Linear, ReLU,
 Sequential,Dropout, SoftmaxLoss, Flatten, BatchNorm1d
from baby.data import MNISTDataset, DataLoader
from baby.trainer import Trainer


class SimpleCNN(Module):
    """
    Conv -> ReLU -> Flatten -> Linear -> BatchNorm -> ReLU -> Dropout -> Linear
    """
    def __init__(self):
        super().__init__()
        self.model = Sequential(
            Conv(in_channels=1, out_channels=16, kernel_size=3, stride=1,
                 bias=True),
            ReLU(),
            Flatten(),
            Linear(16 * 28 * 28, 128),
            BatchNorm1d(128),
            ReLU(),
            Dropout(p=0.5),
            Linear(128, 10)
        )

    def forward(self, x):
        # x shape: (batch, 28, 28, 1) from MNISTDataset in NHWC format
        # Need to convert to (batch, 1, 28, 28) for Conv (NCHW)

        # Transpose: (batch, 28, 28, 1) -> (batch, 1, 28, 28)
        x_data = x.data if isinstance(x, Tensor) else x
        x_reshaped = x_data.transpose(0, 3, 1, 2)  # NHWC -> NCHW
        x = Tensor(x_reshaped)

        return self.model(x)

if __name__ == "__main__":
    EPOCHS = 6
    LEARNING_RATE = 0.001
    BATCH_SIZE = 128

    print("Loading MNIST data...")
    train_dataset = MNISTDataset(
        "data/train-images-idx3-ubyte.gz",
        "data/train-labels-idx1-ubyte.gz"
    )

    test_dataset = MNISTDataset(
        "data/t10k-images-idx3-ubyte.gz",
        "data/t10k-labels-idx1-ubyte.gz"
    )

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                    shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                     shuffle=False)

    print("Data loaded.\n")

    np.random.seed(42)
    model = SimpleCNN()
    loss_fn = SoftmaxLoss()
    optimizer = Adam(params=model.parameters(), lr=LEARNING_RATE)

    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        loss_fn=loss_fn,
        train_loader=train_loader,
        val_loader=test_loader
    )

    print("Starting training...\n")

    trainer.fit(epochs=EPOCHS)

    print("\n" + "="*40)
    final_acc = trainer.evaluate(loader=test_loader)
    print(f"Final Test Accuracy: {final_acc*100:.2f}%")
    print("="*40)

    print("\nSaving model...")
    model.save("simple_cnn_mnist.pt")
    print("Done!")  
End of Epoch 1 - Avg Loss: 0.1653 | Val Acc: 97.96%
--- Epoch 2/6 ---
  Batch   0: Loss = 0.0402 | Acc = 98.44%
  Batch  50: Loss = 0.0371 | Acc = 99.22%
  Batch 100: Loss = 0.0640 | Acc = 97.66%
  Batch 150: Loss = 0.0437 | Acc = 99.22%
  Batch 200: Loss = 0.0293 | Acc = 99.22%
  Batch 250: Loss = 0.0153 | Acc = 100.00%
  Batch 300: Loss = 0.0319 | Acc = 99.22%
  Batch 350: Loss = 0.0444 | Acc = 99.22%
  Batch 400: Loss = 0.0359 | Acc = 99.22%
  Batch 450: Loss = 0.0999 | Acc = 97.66%
End of Epoch 2 - Avg Loss: 0.0443 | Val Acc: 98.21%
--- Epoch 3/6 ---
  Batch   0: Loss = 0.0118 | Acc = 100.00%
  Batch  50: Loss = 0.0115 | Acc = 100.00%
  

12.5 Better CNN

import numpy as np
from baby import Tensor
from baby.optim import Adam
from baby.nn import Module, Conv, Linear,
 ReLU, Sequential, Dropout, SoftmaxLoss, Flatten, BatchNorm1d
from baby.data import MNISTDataset, DataLoader
from baby.trainer import Trainer

class HierarchicalCNN(Module):
    def __init__(self):
        super().__init__()

        # 32 channels * 28 * 28 = 25,088
        flattened_size = 25088

        self.model = Sequential(
            # Block 1
            Conv(in_channels=1, out_channels=16,
                 kernel_size=3, stride=1, bias=True),
            ReLU(),

            # Block 2
            Conv(in_channels=16, out_channels=32,
                 kernel_size=3, stride=1, bias=True),
            ReLU(),

            Flatten(),

            Linear(flattened_size, 128),
            BatchNorm1d(128),
            ReLU(),
            Dropout(p=0.5),
            Linear(128, 10)
        )

    def forward(self, x):
        # Transpose: NHWC -> NCHW
        x_data = x.data if isinstance(x, Tensor) else x
        x_reshaped = x_data.transpose(0, 3, 1, 2)
        x = Tensor(x_reshaped)

        return self.model(x)

if __name__ == "__main__":
    EPOCHS = 6
    LEARNING_RATE = 0.001
    BATCH_SIZE = 128

    print("Loading MNIST data...")
    train_dataset = MNISTDataset("data/train-images-idx3-ubyte.gz",
             "data/train-labels-idx1-ubyte.gz")
    test_dataset = MNISTDataset("data/t10k-images-idx3-ubyte.gz",
                     "data/t10k-labels-idx1-ubyte.gz")

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                     shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                 shuffle=False)

    np.random.seed(42)
    model = HierarchicalCNN()
    loss_fn = SoftmaxLoss()
    optimizer = Adam(params=model.parameters(), lr=LEARNING_RATE)

    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        loss_fn=loss_fn,
        train_loader=train_loader,
        val_loader=test_loader
    )

    print("Starting training ...")
    trainer.fit(epochs=EPOCHS)

    print("\n" + "="*40)
    final_acc = trainer.evaluate(loader=test_loader)
    print(f"Final Test Accuracy: {final_acc*100:.2f}%")
    print("="*40)

    model.save("improved_cnn_mnist.pt")
End of Epoch 1 - Avg Loss: 0.1832 | Val Acc: 98.38%
--- Epoch 2/6 ---
  Batch   0: Loss = 0.0766 | Acc = 98.44%
  Batch  50: Loss = 0.0655 | Acc = 99.22%
  Batch 100: Loss = 0.0280 | Acc = 98.44%
  Batch 150: Loss = 0.0521 | Acc = 98.44%

12.6 Simple CIFAR10

First we will write the CIFAR10Dataset and then use it to train a model.

data/ folder consists of :

  • cifar-10-batches-py

FILE : babygrad/data.py

import pickle
from typing import Optional, List
import numpy as np


def unpickle(file):
    with open(file, 'rb') as fo:
        batch = pickle.load(fo, encoding='bytes')
    batch[b'filenames']=[name.decode('utf-8') for name in batch[b'filenames']]
    return batch

class CIFAR10Dataset(Dataset):
    def __init__(self, base_folder: str, train: bool,
                 p: Optional[int] = 0.5,
                 transforms: Optional[List] = None):

        self.base_folder = base_folder
        self.transforms = transforms
        self.p = p
        self.train = train

        if train:
            data_list = []
            label_list = []
            for i in range(1, 6):
                batch = unpickle(f"{base_folder}/data_batch_{i}")
                data_list.append(batch[b'data'])
                label_list.extend(batch[b'labels'])
            self.images = np.vstack(data_list)
            self.labels = np.array(label_list)
        else:
            batch = unpickle(f"{base_folder}/test_batch")
            self.images = batch[b'data']
            self.labels = np.array(batch[b'labels'])


    def __getitem__(self, index):
        if isinstance(index, slice):
            images_flat = np.array(self.images[index]) / 255
            images_reshaped = images_flat.reshape(-1, 3, 32, 32)
            labels_batch = np.array(self.labels[index])
            return (images_reshaped, labels_batch)


        sample_image = self.images[index] / 255
        sample_label = self.labels[index]

        new_sample_image = sample_image.reshape(3, 32, 32)

        if self.transforms:
            for tform in self.transforms:
                new_sample_image = tform(new_sample_image)

        return (new_sample_image, sample_label)

    def __len__(self):
        return len(self.images)
import numpy as np
from baby import Tensor
from baby.optim import Adam
from baby.nn import Module, Conv,
 Linear, ReLU, Sequential, Dropout, SoftmaxLoss, Flatten, BatchNorm1d
from baby.data import DataLoader,CIFAR10Dataset
from baby.trainer import Trainer

class CIFAR10CNN(Module):
    """
    Conv(3->16) -> ReLU -> Conv(16->32) -> ReLU -> Flatten -> Linear -> BN
         -> ReLU -> Dropout -> Linear
    """
    def __init__(self):
        super().__init__()

        # 32 channels * 32 width * 32 height = 32,768
        flattened_size = 32768

        self.net = Sequential(
            Conv(in_channels=3, out_channels=16,
                     kernel_size=3, stride=1, bias=True),
            ReLU(),

            Conv(in_channels=16, out_channels=32,
                 kernel_size=3, stride=1, bias=True),
            ReLU(),
            Flatten(),

            Linear(flattened_size, 256),
            BatchNorm1d(256),
            ReLU(),
            Dropout(p=0.4),
            Linear(256, 10)
        )

    def forward(self, x):
        #CIFAR10Dataset already provides (3, 32, 32), (N,C,H,W)
        # so no transpose needed
        if not isinstance(x, Tensor):
            x = Tensor(x)
        return self.net(x)

if __name__ == "__main__":
    EPOCHS = 10
    LEARNING_RATE = 0.001
    BATCH_SIZE = 64

    print("Loading CIFAR-10 data...")
    train_dataset = CIFAR10Dataset(base_folder="data/cifar-10-batches-py",
                     train=True)
    test_dataset = CIFAR10Dataset(base_folder="data/cifar-10-batches-py",
                     train=False)

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

    print(f"Data loaded. Training samples: {len(train_dataset)}")

    np.random.seed(42)

    model = CIFAR10CNN()
    loss_fn = SoftmaxLoss()
    optimizer = Adam(params=model.parameters(), lr=LEARNING_RATE)

    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        loss_fn=loss_fn,
        train_loader=train_loader,
        val_loader=test_loader
    )

    trainer.fit(epochs=EPOCHS)

    print("\n" + "="*40)
    final_acc = trainer.evaluate(loader=test_loader)
    print(f"Final CIFAR-10 Test Accuracy: {final_acc*100:.2f}%")
    print("="*40)

    model.save("cifar10_model.pt")
    print("Model saved as cifar10_model.pt")
  Batch 450: Loss = 0.9458 | Acc = 62.50%
  Batch 500: Loss = 0.6915 | Acc = 79.69%
  Batch 550: Loss = 0.9032 | Acc = 67.19%
  Batch 600: Loss = 0.9140 | Acc = 65.62%
  Batch 650: Loss = 0.8343 | Acc = 70.31%
  Batch 700: Loss = 0.7392 | Acc = 70.31%
  Batch 750: Loss = 0.7067 | Acc = 70.31%
End of Epoch 3 - Avg Loss: 0.7867 | Val Acc: 65.34%

Validation accuracy is not improving much.

12.7 Simple Resnet

import numpy as np
from baby import Tensor
from baby.optim import Adam
from baby.nn import Module, Conv, Linear, ReLU,
 Sequential, Dropout, SoftmaxLoss, Flatten, BatchNorm1d
from baby.data import DataLoader,CIFAR10Dataset
from baby.trainer import Trainer

class MiniResNet(Module):
    def __init__(self):
        super().__init__()

        self.stem = Sequential(
            Conv(3, 32, kernel_size=3, stride=1, bias=True),
            ReLU()
        )

        def res_block():
            return Residual(
                Sequential(
                    Conv(32, 32, kernel_size=3, stride=1, bias=True),
                    ReLU(),
                    Conv(32, 32, kernel_size=3, stride=1, bias=True)
                )
            )

        self.layers = Sequential(
            res_block(),
            ReLU(),
            res_block(),
            ReLU()
        )

        # Input: 32 * 32 * 32 = 32,768
        self.head = Sequential(
            Flatten(),
            Linear(32768, 256),
            BatchNorm1d(256),
            ReLU(),
            Dropout(p=0.3),
            Linear(256, 10)
        )

    def forward(self, x):
        if not isinstance(x, Tensor):
            x = Tensor(x)

        x = self.stem(x)
        x = self.layers(x)
        return self.head(x)

if __name__ == "__main__":
    EPOCHS = 10
    LEARNING_RATE = 0.0005
    BATCH_SIZE = 64

    print("Loading CIFAR-10...")
    train_dataset = CIFAR10Dataset(base_folder="data/cifar-10-batches-py",
                     train=True)
    test_dataset = CIFAR10Dataset(base_folder="data/cifar-10-batches-py",
                     train=False)

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                     shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                     shuffle=False)

    np.random.seed(42)
    model = MiniResNet()
    loss_fn = SoftmaxLoss()
    optimizer = Adam(params=model.parameters(), lr=LEARNING_RATE)

    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        loss_fn=loss_fn,
        train_loader=train_loader,
        val_loader=test_loader
    )

    trainer.fit(epochs=EPOCHS)
Batch 750: Loss = 0.2567 | Acc = 92.19%
End of Epoch 9 - Avg Loss: 0.1254 | Val Acc: 67.60%
--- Epoch 10/10 ---
  Batch   0: Loss = 0.0681 | Acc = 96.88%
  Batch  50: Loss = 0.0727 | Acc = 98.44%
  Batch 100: Loss = 0.0660 | Acc = 98.44%
  Batch 150: Loss = 0.0567 | Acc = 98.44%
  Batch 200: Loss = 0.0977 | Acc = 98.44%
  Batch 250: Loss = 0.1006 | Acc = 95.31%
  Batch 300: Loss = 0.0590 | Acc = 96.88%
  Batch 350: Loss = 0.1152 | Acc = 98.44%
  Batch 400: Loss = 0.1864 | Acc = 93.75%
  Batch 450: Loss = 0.1204 | Acc = 95.31%
  Batch 500: Loss = 0.1351 | Acc = 95.31%
  Batch 550: Loss = 0.2492 | Acc = 92.19%
  Batch 600: Loss = 0.2578 | Acc = 89.06%
  Batch 650: Loss = 0.0796 | Acc = 98.44%
  Batch 700: Loss = 0.1613 | Acc = 92.19%
  Batch 750: Loss = 0.1807 | Acc = 93.75%
End of Epoch 10 - Avg Loss: 0.1158 | Val Acc: 68.11%

Even here it is not that impressive. With just adding 2 classes and making no changes to the architecture we will breakout Validation accuracy > 70.

Using Transformations

class RandomFlipHorizontal:
    def __init__(self, p=0.5):
        self.p = p

    def __call__(self, img):
        # img shape: (3, 32, 32) -> (C, H, W)
        if np.random.rand() < self.p:
            # Flip along the last axis (Width)
            return img[:, :, ::-1]
        return img

class RandomCrop:
    def __init__(self, padding=4):
        self.padding = padding

    def __call__(self, img):
        # img shape: (3, 32, 32)
        c, h, w = img.shape

        # 1. Pad only the H and W dimensions with zeros
        padded = np.pad(img, ((0, 0), (self.padding, self.padding),
                 (self.padding, self.padding)), mode='constant')

        # Pick a random top-left corner
        top = np.random.randint(0, 2 * self.padding + 1)
        left = np.random.randint(0, 2 * self.padding + 1)

        # Crop back to original size
        return padded[:, top:top+h, left:left+w]


# Same architecture
train_transforms = [RandomFlipHorizontal(), RandomCrop(padding=4)]
train_dataset = CIFAR10Dataset(base_folder="data/cifar-10-batches-py",
                 train=True, transforms=train_transforms)

By doing the above Transformations Validation accuracy gets past 70.

  Batch 750: Loss = 0.8504 | Acc = 71.88%
End of Epoch 5 - Avg Loss: 0.9396 | Val Acc: 70.43%
--- Epoch 6/10 ---
  Batch   0: Loss = 1.0325 | Acc = 62.50%
  Batch  50: Loss = 0.7775 | Acc = 70.31%
  Batch 100: Loss = 0.7756 | Acc = 73.44%
  Batch 150: Loss = 0.9060 | Acc = 68.75%
  Batch 200: Loss = 0.8084 | Acc = 73.44%
  Batch 250: Loss = 0.8299 | Acc = 65.62%
  Batch 300: Loss = 0.7393 | Acc = 75.00%
  Batch 350: Loss = 0.7488 | Acc = 70.31%
  Batch 400: Loss = 0.6893 | Acc = 79.69%
  Batch 450: Loss = 0.6177 | Acc = 79.69%
  Batch 500: Loss = 0.8677 | Acc = 71.88%
  Batch 550: Loss = 0.8649 | Acc = 67.19%
  Batch 600: Loss = 0.7627 | Acc = 68.75%
  Batch 650: Loss = 0.8702 | Acc = 68.75%
  Batch 700: Loss = 0.6581 | Acc = 76.56%
  Batch 750: Loss = 0.9947 | Acc = 65.62%
End of Epoch 6 - Avg Loss: 0.8865 | Val Acc: 71.91%

Original: zekcrates/examples