# -*- coding: utf-8 -*- """mnist_torch_dec25.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1IxH-hIFwyEUPOjFB6MltbKAZzPHUwoxk #CNN for MNIST classification (Pytorch) (c) G. Turinici 2025 #NN definition : class etc """ import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms from torch.utils.data import DataLoader from tqdm import tqdm # --- Data --- transform = transforms.ToTensor() train = datasets.MNIST(root='.', train=True, download=True, transform=transform) test = datasets.MNIST(root='.', train=False, download=True, transform=transform) train_loader = DataLoader(train, batch_size=64, shuffle=True) test_loader = DataLoader(test, batch_size=1000) # --- Model --- class CNN(nn.Module): def __init__(self): super().__init__() self.conv = nn.Sequential( nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(), nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2) ) self.fc = nn.Linear(64*14*14, 10) def forward(self, x): x = self.conv(x) x = x.view(x.size(0), -1) return self.fc(x) """#Instantiation of the model""" model = CNN() opt = optim.Adam(model.parameters()) loss_fn = nn.CrossEntropyLoss() """#Train""" # --- Train --- for epoch in range(1): for x, y in tqdm(train_loader): opt.zero_grad() loss = loss_fn(model(x), y) loss.backward() opt.step() """#Test""" # --- Test --- correct = 0 with torch.no_grad(): for x, y in tqdm(test_loader): pred = model(x).argmax(1) correct += (pred == y).sum().item() print("Accuracy:", correct / len(test))