1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
| import torch import torchvision from torch.utils.tensorboard import SummaryWriter
from torch import nn from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_data = torchvision.datasets.CIFAR10( root="./A01_PytorchLearning/database/CIFAR10", train=True, transform=torchvision.transforms.ToTensor(), download=True, ) test_data = torchvision.datasets.CIFAR10( root="./A01_PytorchLearning/database/CIFAR10", train=False, transform=torchvision.transforms.ToTensor(), download=True, )
train_data_size = len(train_data) test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size))
train_dataloader = DataLoader(train_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64)
class Module(nn.Module): def __init__(self): super(Module, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64 * 4 * 4, 64), nn.Linear(64, 10), )
def forward(self, x): x = self.model(x) return x
module = Module() module = module.to(device)
loss_fn = nn.CrossEntropyLoss() loss_fn = loss_fn.to(device)
learning_rate = 1e-2 optimizer = torch.optim.SGD(module.parameters(), lr=learning_rate)
total_train_step = 0
total_test_step = 0
epoch = 10
writer = SummaryWriter("../logs_train")
for i in range(epoch): print("-------第 {} 轮训练开始-------".format(i + 1))
module.train() for data in train_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) outputs = module(imgs) loss = loss_fn(outputs, targets)
optimizer.zero_grad() loss.backward() optimizer.step()
total_train_step = total_train_step + 1 if total_train_step % 100 == 0: print("训练次数:{}, Loss: {}".format(total_train_step, loss.item())) writer.add_scalar("train_loss", loss.item(), total_train_step)
module.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) outputs = module(imgs) loss = loss_fn(outputs, targets) total_test_loss = total_test_loss + loss.item() accuracy = (outputs.argmax(1) == targets).sum() total_accuracy = total_accuracy + accuracy
print("整体测试集上的Loss: {}".format(total_test_loss)) print("整体测试集上的正确率: {}".format(total_accuracy / test_data_size)) writer.add_scalar("test_loss", total_test_loss, total_test_step) writer.add_scalar("test_accuracy", total_accuracy / test_data_size, total_test_step) total_test_step = total_test_step + 1
torch.save( module, "A01_PytorchLearning/B03_Complete_Module/module/module{}.pth".format(i) ) print("模型已保存")
writer.close()
|