Question: Hello, below is my code for Data transfer learning assignment. If someone could look over, it and ensure that it is optimal or could be
Hello, below is my code for Data transfer learning assignment. If someone could look over, it and ensure that it is optimal or could be improved on. Thank You
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import numpy as np
from sklearn.metrics import f1_score
# Define transforms for training and test sets
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# Load CIFAR100 dataset
train_set = datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
test_set = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
# Set batch size and number of workers
batch_size = 64
num_workers = 2
# Create dataloaders for training and test sets
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# Define device to train on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define DenseNet161 model
model = torch.hub.load('pytorch/vision:v0.9.0', 'densenet161', pretrained=True)
model.classifier = nn.Linear(2208, 100)
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer_scratch = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
optimizer_transfer = optim.SGD(model.classifier.parameters(), lr=0.01, momentum=0.9)
# Train model from scratch
def train_scratch(model, train_loader, criterion, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
# Train model with transfer learning
def train_transfer(model, train_loader, criterion, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model.classifier(model.features(data))
loss = criterion(output, target)
loss.backward()
optimizer.step()
# Test model
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
targets = []
predictions = []
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
targets += target.cpu().numpy().tolist()
predictions += pred.cpu().numpy().tolist()
test_loss /= len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
f1 = f1_score(targets, predictions, average='macro')
return accuracy, f1
for epoch in range(20):
train_scratch(model, train_loader, criterion, optimizer_scratch)
test_accuracy, test_f1 = test(model, test_loader)
print(f"Epoch {epoch+1}, Test accuracy: {test_accuracy:.4f}, F1 score: {test_f1:.4f}")
Step by Step Solution
There are 3 Steps involved in it
Get step-by-step solutions from verified subject matter experts
