import cv2 import mathimport numpy as np import torch import torch.nn as nn import torchvision.transforms as transforms from torch.hub import load_state_dict_from_urlfrom PIL import Image import matplotlib.pyplot as plt from torchvision.datasets import ImageFolder from torch.utils.data import Dataset,DataLoader from torchvision import datasets
loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(cnn.parameters(), lr=0.001, momentum=0.9)for epoch inrange(nepochs):# loop over the dataset multiple timescorrect =0# number of examples predicted correctly (for accuracy)total =0# number of examplesrunning_loss =0.0# accumulated loss (for mean loss)n =0# number of minibatches# 每个epoch,我们都取第一个batch来进行训练。dataiter =iter(train_loader) inputs, labels = dataiter.next()# ================== main body=========================# 将梯度清空(每轮epoch都清空)optimizer.zero_grad()# Forward, backward, and update parametersoutputs = cnn(inputs)loss = loss_fn(outputs, labels)loss.backward()optimizer.step()# =====================================================# accumulate lossrunning_loss += loss.item()n +=1# accumulate data for accuracy_, predicted = torch.max(outputs.data,1)total += labels.size(0)# add in the number of labels in this minibatchcorrect +=(predicted == labels).sum().item()# add in the number of correct labels# collect together statistics for this epochltrn = running_loss/natrn = correct/total print(f"epoch: {epoch} training loss: {ltrn: .3f} training accuracy: {atrn: .1%} ")