首页 > 分享 > 残差网络Resnet50:花卉识别

残差网络Resnet50:花卉识别

残差网络Resnet50

网络结构

由于卷积层的堆叠,前面的信息可能在之后被丢失,造成精度下降。为了防止这种情况,将前面的层和后面的层进行叠加,防止信息丢失
在这里插入图片描述

Resnet网络结构:
在这里插入图片描述

Resnet50网络结构:

levelinputstrideoutput1224*224*37*7*642112*112*64MaxPool112*112*64MaxPool256*56*64256*56*641*1*64156*56*641356*56*643*3*64156*56*64456*56*641*1*256156*56*256556*56*2561*1*64156*56*641656*56*643*3*64156*56*64756*56*641*1*256156*56*256856*56*2561*1*64156*56*641956*56*643*3*64156*56*641056*56*641*1*256156*56*2561156*56*2561*1*128156*56*12821256*56*1283*3*128228*28*1281328*28*1281*1*512128*28*5121428*28*5121*1*128128*28*12821528*28*1283*3*128128*28*1281628*28*1281*1*512128*28*5121728*28*5121*1*128128*28*12821828*28*1283*3*128128*28*1281928*28*1281*1*512128*28*5122028*28*5121*1*128128*28*12822128*28*1283*3*128128*28*1282228*28*1281*1*512128*28*5122328*28*5121*1*256128*28*25632428*28*2563*3*256214*14*2562514*14*2561*1*1024114*14*10242614*14*10241*1*256114*14*25632714*14*2563*3*256114*14*2562814*14*2561*1*1024114*14*10242914*14*10241*1*256114*14*25633014*14*2563*3*256114*14*2563114*14*2561*1*1024114*14*10243214*14*10241*1*256114*14*25633314*14*2563*3*256114*14*2563414*14*2561*1*1024114*14*10243514*14*10241*1*256114*14*25633614*14*2563*3*256114*14*2563714*14*2561*1*1024114*14*10243814*14*10241*1*256114*14*25633914*14*2563*3*256114*14*2564014*14*2561*1*1024114*14*10244114*14*10241*1*512114*14*51244214*14*5123*3*51227*7*512437*7*5121*1*204817*7*2048447*7*20481*1*51217*7*5124457*7*5123*3*51217*7*512467*7*5121*1*204817*7*2048477*7*20481*1*51217*7*5124487*7*5123*3*51217*7*512497*7*5121*1*204817*7*2048AvgPool7*7*20481*1*204850FC1*1*20482048

以Resnet50为backbone、一个全连接层作为head,形成一个花卉检测的cnn网络结构

加载数据集并划分训练集

import torch from torchvision import datasets, transforms from torch.utils.data import DataLoader import numpy as np from sklearn.model_selection import train_test_split from torch.utils.data import Subset # 数据预处理和增强 transform = transforms.Compose([ transforms.Resize((224, 224)), # 调整图像大小为 224x224像素 transforms.ToTensor(), # 转换为张量 transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) # 标准化 ]) # 加载数据集 dataset = datasets.ImageFolder('flower', transform=transform) # set_size = 200 # 设置要使用的数据集大小 # if len(dataset) > set_size: # dataset = Subset(dataset, range(set_size)) # 划分训练集和测试集 train_size = int(0.7 * len(dataset)) test_size = len(dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size]) # 创建数据加载器 train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False) # 查看数据加载器的样本数量 print('训练集样本数量:', len(train_loader.dataset)) print('测试集样本数量:', len(test_loader.dataset)) class_labels = dataset.classes print('花卉类别:',dataset.classes)

1234567891011121314151617181920212223242526272829303132333435363738

训练集样本数量: 3021 测试集样本数量: 1296 花卉类别: ['daisy', 'dandelion', 'rose', 'sunflower', 'tulip'] 123

进行数据集的可视化

import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler # 归一化转为[0,255] transfer=MinMaxScaler(feature_range=(0, 255)) def visualize_loader(batch,predicted=''): # batch=[32*1*224*224,32] imgs=batch[0] fig, axes = plt.subplots(2, 4, figsize=(12, 6)) labels=batch[1].numpy() # print(imgs.shape) if str(predicted)=='': predicted=labels for i, ax in enumerate(axes.flat): img=imgs[i].permute(1, 2, 0).numpy() img=np.clip(img, 0, 1) ax.imshow(img) title=class_labels[predicted[i]] color='black' if predicted[i]!=labels[i]: title+= '('+str(class_labels[labels[i]])+')' color='red' ax.set_title(title,color=color ) ax.axis('off') plt.tight_layout() plt.show() # loader.shape=3000*[32*1*224*224,32] for batch in train_loader: break visualize_loader(batch)

123456789101112131415161718192021222324252627282930313233

在这里插入图片描述

残缺块ResidualBlock和网络ResNet50

# 残缺块和ResNet50 import torch import torch.nn as nn # 残缺块 class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels//stride, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(in_channels//stride) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(in_channels//stride, in_channels//stride, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(in_channels//stride) self.conv3 = nn.Conv2d(in_channels//stride, out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(out_channels) self.shortcut = nn.Sequential() # 将输入特征图通过1x1的卷积操作调整其通道数,使其与残差块的输出特征图具有相同的通道数,以便进行跳跃连接 if stride != 1 or in_channels!=out_channels: self.shortcut = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_channels) ) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.conv3(out) out = self.bn3(out) out += self.shortcut(residual) out = self.relu(out) return out # ResNet50 class ResNet50(nn.Module): def __init__(self, num_classes=1000): super(ResNet50, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(64, 256, blocks=3, stride=1) self.layer2 = self._make_layer(256, 512, blocks=4, stride=2) self.layer3 = self._make_layer(512, 1024, blocks=6, stride=2) self.layer4 = self._make_layer(1024, 2048, blocks=3, stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.flatten = nn.Flatten() def _make_layer(self, in_channels, out_channels, blocks, stride): layers = [] layers.append(ResidualBlock(in_channels, out_channels, stride)) for _ in range(1, blocks): layers.append(ResidualBlock(out_channels, out_channels)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = self.flatten(x) return x

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485

CNN(Resnet50:backbone,Linear:head)

import torch import torch.nn as nn import torchvision.models as models class CNN(nn.Module): def __init__(self, num_classes=5): super(CNN, self).__init__() # 骨干网络(ResNet-50) self.backbone = ResNet50() # 头部(Head) self.head = nn.Sequential( nn.ReLU(inplace=True), nn.Linear(2048, num_classes) ) def forward(self, x): x = self.backbone(x) x = self.head(x) return x model = CNN() print(model)

1234567891011121314151617181920212223

CNN( (backbone): ResNet50( (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (layer1): Sequential( (0): ResidualBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential( (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): ResidualBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (2): ResidualBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) ) (layer2): Sequential( (0): ResidualBlock( (conv1): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): ResidualBlock( (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (2): ResidualBlock( (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (3): ResidualBlock( (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) ) (layer3): Sequential( (0): ResidualBlock( (conv1): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential( (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): ResidualBlock( (conv1): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (2): ResidualBlock( (conv1): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (3): ResidualBlock( (conv1): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (4): ResidualBlock( (conv1): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (5): ResidualBlock( (conv1): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) ) (layer4): Sequential( (0): ResidualBlock( (conv1): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential( (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): ResidualBlock( (conv1): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) (2): ResidualBlock( (conv1): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (shortcut): Sequential() ) ) (avgpool): AdaptiveAvgPool2d(output_size=(1, 1)) (flatten): Flatten(start_dim=1, end_dim=-1) ) (head): Sequential( (0): ReLU(inplace=True) (1): Linear(in_features=2048, out_features=5, bias=True) ) )

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194

训练模型

import torch.optim as optim import time num_epochs=7 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) for epoch in range(num_epochs): model.train() running_loss = 0.0 correct = 0 total = 0 start_time = time.time() for images, labels in train_loader: images = images.to(device) labels = labels.to(device) # 前向传播 outputs = model(images) loss = criterion(outputs, labels) # 反向传播和优化 optimizer.zero_grad() loss.backward() optimizer.step() # 统计准确率 _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() running_loss += loss.item() train_loss = running_loss / len(train_loader) train_accuracy = correct / total # 在测试集上评估模型 model.eval() test_loss = 0.0 correct = 0 total = 0 with torch.no_grad(): for images, labels in test_loader: images = images.to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() test_loss += loss.item() end_time=time.time() duration = int(end_time - start_time ) m,s=duration//60,duration%60 test_loss = test_loss / len(test_loader) test_accuracy = correct / total torch.save(model.state_dict(), 'FlowerRecognitionModel.pth') # 打印训练过程中的损失和准确率 print(f"Epoch [{epoch+1}/{num_epochs}] :{m}minutes{s}seconds") print(f" Train Loss: {train_loss:.4f}, Train Accuracy: {train_accuracy:.4f}") print(f" Test Loss: {test_loss:.4f}, Test Accuracy: {test_accuracy:.4f}")

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566

Epoch [1/7] :3minutes5seconds Train Loss: 1.1862, Train Accuracy: 0.4826 Test Loss: 1.1131, Test Accuracy: 0.5154 Epoch [2/7] :3minutes11seconds Train Loss: 1.1405, Train Accuracy: 0.5008 Test Loss: 1.0442, Test Accuracy: 0.5409 Epoch [3/7] :3minutes11seconds Train Loss: 1.1196, Train Accuracy: 0.5167 Test Loss: 1.1199, Test Accuracy: 0.5386 Epoch [4/7] :3minutes11seconds Train Loss: 1.0836, Train Accuracy: 0.5468 Test Loss: 0.9931, Test Accuracy: 0.6119 Epoch [5/7] :3minutes10seconds Train Loss: 1.0372, Train Accuracy: 0.5922 Test Loss: 0.9380, Test Accuracy: 0.6312 Epoch [6/7] :3minutes9seconds Train Loss: 0.9958, Train Accuracy: 0.6087 Test Loss: 0.9079, Test Accuracy: 0.6535 Epoch [7/7] :3minutes9seconds Train Loss: 0.9515, Train Accuracy: 0.6316 Test Loss: 0.8918, Test Accuracy: 0.6543

123456789101112131415161718192021

保存和加载模型

#保存模型 # torch.save(model.state_dict(), 'FlowerRecognitionModel.pth') # 创建一个新的模型实例 model = CNN() # 加载模型的参数 model.load_state_dict(torch.load('FlowerRecognitionModel.pth')) 12345678

<All keys matched successfully> 1

测试模型

for batch in test_loader: break imgs=batch[0] outputs = model(imgs) _, predicted = torch.max(outputs.data, 1) predicted=predicted.numpy() visualize_loader(batch,predicted) 123456789

在这里插入图片描述

有一定的准确度了,模型能差不多识别就好,因为资源问题,就没有一直练下去了。

相关知识

基于ResNet50的植物病害识别研究与系统应用实现
基于残差网络迁移学习的花卉识别系统
【花卉识别系统】Python+卷积神经网络算法+人工智能+深度学习+图像识别+算法模型
花朵识别系统Python+卷积神经网络算法+人工智能+深度学习+计算机课设项目+TensorFlow+模型训练
探索Python中的花朵识别算法:从恶之花到智能分类
深度学习下的小样本玉米叶片病害识别研究
基于残差网络的花卉及其病害识别的研究与应用
7 Resnet深度残差网络实现102种花卉分类
人工智能毕业设计基于python的花朵识别系统
深度学习基于python+TensorFlow+Django的花朵识别系统

网址: 残差网络Resnet50:花卉识别 https://m.huajiangbk.com/newsview1193242.html

所属分类:花卉
上一篇: 醇基燃料
下一篇: 算法毕设分享 深度学习花卉识别