首页 > 分享 > 教你搭建一个花卉识别系统(超级简单)

教你搭建一个花卉识别系统(超级简单)

目录

一.开源神经网络(AlexNet)

1.获取数据集

2.神经网络模型

3.训练神经网络

4.对模型进行预测

二、花卉识别系统搭建(flask)

1.构建页面:

2.调用神经网络模型

3.系统识别结果

4.启动系统:

三、总结

为什么会弄这个花卉识别系统?

这学期修了一门机器视觉的选修课,课设要是弄一个花卉识别的神经网络,所以我网上找了开源代码进行了修改,最后成功跑起来,结果只有一个准确率(94%)

既然都跑了这个神经网络的代码,那么干脆就把这个神经网络真正的使用起来,为此我继续写代码,把这个神经网络弄成一个可视化界面(花卉识别系统

1.获取数据集

使用步骤如下:
* (1)在data_set文件夹下创建新文件夹"flower_data"
* (2)点击链接下载花分类数据集http://download.tensorflow.org/example_images/flower_photos.tgz
* (3)解压数据集到flower_data文件夹下
* (4)执行"split_data.py"脚本自动将数据集划分成训练集train和验证集val 

split_data.py

import os

from shutil import copy, rmtree

import random

def mk_file(file_path: str):

if os.path.exists(file_path):

rmtree(file_path)

os.makedirs(file_path)

def main():

random.seed(0)

split_rate = 0.1

cwd = os.getcwd()

data_root = os.path.join(cwd, "flower_data")

origin_flower_path = os.path.join(data_root, "flower_photos")

assert os.path.exists(origin_flower_path)

flower_class = [cla for cla in os.listdir(origin_flower_path)

if os.path.isdir(os.path.join(origin_flower_path, cla))]

train_root = os.path.join(data_root, "train")

mk_file(train_root)

for cla in flower_class:

mk_file(os.path.join(train_root, cla))

val_root = os.path.join(data_root, "val")

mk_file(val_root)

for cla in flower_class:

mk_file(os.path.join(val_root, cla))

for cla in flower_class:

cla_path = os.path.join(origin_flower_path, cla)

images = os.listdir(cla_path)

num = len(images)

eval_index = random.sample(images, k=int(num*split_rate))

for index, image in enumerate(images):

if image in eval_index:

image_path = os.path.join(cla_path, image)

new_path = os.path.join(val_root, cla)

copy(image_path, new_path)

else:

image_path = os.path.join(cla_path, image)

new_path = os.path.join(train_root, cla)

copy(image_path, new_path)

print("r[{}] processing [{}/{}]".format(cla, index+1, num), end="")

print()

print("processing done!")

if __name__ == '__main__':

main()

2.神经网络模型

model.py

import torch.nn as nn

import torch

class AlexNet(nn.Module):

def __init__(self, num_classes=1000, init_weights=False):

super(AlexNet, self).__init__()

self.features = nn.Sequential(

nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2),

nn.ReLU(inplace=True),

nn.MaxPool2d(kernel_size=3, stride=2),

nn.Conv2d(48, 128, kernel_size=5, padding=2),

nn.ReLU(inplace=True),

nn.MaxPool2d(kernel_size=3, stride=2),

nn.Conv2d(128, 192, kernel_size=3, padding=1),

nn.ReLU(inplace=True),

nn.Conv2d(192, 192, kernel_size=3, padding=1),

nn.ReLU(inplace=True),

nn.Conv2d(192, 128, kernel_size=3, padding=1),

nn.ReLU(inplace=True),

nn.MaxPool2d(kernel_size=3, stride=2),

)

self.classifier = nn.Sequential(

nn.Dropout(p=0.5),

nn.Linear(128 * 6 * 6, 2048),

nn.ReLU(inplace=True),

nn.Dropout(p=0.5),

nn.Linear(2048, 2048),

nn.ReLU(inplace=True),

nn.Linear(2048, num_classes),

)

if init_weights:

self._initialize_weights()

def forward(self, x):

x = self.features(x)

x = torch.flatten(x, start_dim=1)

x = self.classifier(x)

return x

def _initialize_weights(self):

for m in self.modules():

if isinstance(m, nn.Conv2d):

nn.init.kaiming_normal_(m.weight, mode='fan_out',

nonlinearity='relu')

if m.bias is not None:

nn.init.constant_(m.bias, 0)

elif isinstance(m, nn.Linear):

nn.init.normal_(m.weight, 0, 0.01)

nn.init.constant_(m.bias, 0)

3.训练神经网络

train.py

import torch

import torch.nn as nn

from torchvision import transforms, datasets, utils

import matplotlib.pyplot as plt

import numpy as np

import torch.optim as optim

from model import AlexNet

import os

import json

import time

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

with open(os.path.join("train.log"), "a") as log:

log.write(str(device)+"n")

data_transform = {

"train": transforms.Compose([transforms.RandomResizedCrop(224),

transforms.RandomHorizontalFlip(p=0.5),

transforms.ToTensor(),

transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),

"val": transforms.Compose([transforms.Resize((224, 224)),

transforms.ToTensor(),

transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))

image_path = data_root + "/jqsj/data_set/flower_data/"

train_dataset = datasets.ImageFolder(root=image_path + "/train",

transform=data_transform["train"])

train_num = len(train_dataset)

train_loader = torch.utils.data.DataLoader(train_dataset,

batch_size=32,

shuffle=True,

num_workers=0)

validate_dataset = datasets.ImageFolder(root=image_path + "/val",

transform=data_transform["val"])

val_num = len(validate_dataset)

validate_loader = torch.utils.data.DataLoader(validate_dataset,

batch_size=32,

shuffle=True,

num_workers=0)

flower_list = train_dataset.class_to_idx

cla_dict = dict((val, key) for key, val in flower_list.items())

json_str = json.dumps(cla_dict, indent=4)

with open('class_indices.json', 'w') as json_file:

json_file.write(json_str)

net = AlexNet(num_classes=5, init_weights=True)

net.to(device)

loss_function = nn.CrossEntropyLoss()

optimizer = optim.Adam(net.parameters(), lr=0.0002)

save_path = './AlexNet.pth'

best_acc = 0.0

for epoch in range(150):

net.train()

running_loss = 0.0

time_start = time.perf_counter()

for step, data in enumerate(train_loader, start=0):

images, labels = data

optimizer.zero_grad()

outputs = net(images.to(device))

loss = loss_function(outputs, labels.to(device))

loss.backward()

optimizer.step()

running_loss += loss.item()

rate = (step + 1) / len(train_loader)

a = "*" * int(rate * 50)

b = "." * int((1 - rate) * 50)

with open(os.path.join("train.log"), "a") as log:

log.write(str("rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(int(rate * 100), a, b, loss))+"n")

print("rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(int(rate * 100), a, b, loss), end="")

print()

with open(os.path.join("train.log"), "a") as log:

log.write(str('%f s' % (time.perf_counter()-time_start))+"n")

print('%f s' % (time.perf_counter()-time_start))

net.eval()

acc = 0.0

with torch.no_grad():

for val_data in validate_loader:

val_images, val_labels = val_data

outputs = net(val_images.to(device))

predict_y = torch.max(outputs, dim=1)[1]

acc += (predict_y == val_labels.to(device)).sum().item()

val_accurate = acc / val_num

if val_accurate > best_acc:

best_acc = val_accurate

torch.save(net.state_dict(), save_path)

with open(os.path.join("train.log"), "a") as log:

log.write(str('[epoch %d] train_loss: %.3f test_accuracy: %.3f n' %

(epoch + 1, running_loss / step, val_accurate))+"n")

print('[epoch %d] train_loss: %.3f test_accuracy: %.3f n' %

(epoch + 1, running_loss / step, val_accurate))

with open(os.path.join("train.log"), "a") as log:

log.write(str('Finished Training')+"n")

print('Finished Training')

训练结果后,准确率是94%

训练日志如下:

4.对模型进行预测

predict.py

import torch

from model import AlexNet

from PIL import Image

from torchvision import transforms

import matplotlib.pyplot as plt

import json

data_transform = transforms.Compose(

[transforms.Resize((224, 224)),

transforms.ToTensor(),

transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

img = Image.open("pgy2.jpg")

img = data_transform(img)

img = torch.unsqueeze(img, dim=0)

try:

json_file = open('./class_indices.json', 'r')

class_indict = json.load(json_file)

except Exception as e:

print(e)

exit(-1)

model = AlexNet(num_classes=5)

model_weight_path = "./AlexNet.pth"

model.load_state_dict(torch.load(model_weight_path, map_location='cpu'))

model.eval()

with torch.no_grad():

output = torch.squeeze(model(img))

predict = torch.softmax(output, dim=0)

predict_cla = torch.argmax(predict).numpy()

print(class_indict[str(predict_cla)], predict[predict_cla].item())

plt.show()

接着对其中一个花卉图片进行识别,其结果如下:

可以看到只有一个识别结果(daisy雏菊)和准确率1.0是100%(范围是0~1,所以1对应100%)

为了方便使用这个神经网络,接着我们将其开发成一个可视化的界面操作

二、花卉识别系统搭建(flask)

1.构建页面:

upload.html

<!DOCTYPE html>

<html lang="en">

<head>

<meta charset="UTF-8">

<title>李运辰-花卉识别系统v1.0</title>

<link rel="stylesheet" type="text/css" href="../static/css/bootstrap.min.css">

<link rel="stylesheet" type="text/css" href="../static/css/fileinput.css">

<script src="../static/js/jquery-2.1.4.min.js"></script>

<script src="../static/js/bootstrap.min.js"></script>

<script src="../static/js/fileinput.js"></script>

<script src="../static/js/locales/zh.js"></script>

</head>

<body>

<h1 align="center">李运辰-花卉识别系统v1.0</h1>

<div align="center">

<form action="" enctype='multipart/form-data' method='POST'>

<input type="file" name="file" class="file" data-show-preview="false" style="margin-top:20px;"/>

<br>

<input type="submit" value="上传" class="btn btn-primary button-new " style="margin-top:15px;"/>

</form>

</div>

</body>

</html>

2.调用神经网络模型

main.py

from flask import Flask, render_template, request, redirect, url_for, make_response, jsonify

from werkzeug.utils import secure_filename

import os

import time

import torch

from model import AlexNet

from PIL import Image

from torchvision import transforms

import matplotlib.pyplot as plt

import json

try:

json_file = open('./class_indices.json', 'r')

class_indict = json.load(json_file)

except Exception as e:

print(e)

exit(-1)

model = AlexNet(num_classes=5)

model_weight_path = "./AlexNet.pth"

model.load_state_dict(torch.load(model_weight_path, map_location='cpu'))

model.eval()

from datetime import timedelta

ALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])

def allowed_file(filename):

return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS

app = Flask(__name__)

app.send_file_max_age_default = timedelta(seconds=1)

def tran(img_path):

data_transform = transforms.Compose(

[transforms.Resize((224, 224)),

transforms.ToTensor(),

transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

img = Image.open("pgy2.jpg")

img = data_transform(img)

img = torch.unsqueeze(img, dim=0)

return img

@app.route('/upload', methods=['POST', 'GET'])

def upload():

path=""

if request.method == 'POST':

f = request.files['file']

if not (f and allowed_file(f.filename)):

return jsonify({"error": 1001, "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"})

basepath = os.path.dirname(__file__)

path = secure_filename(f.filename)

upload_path = os.path.join(basepath, 'static/images', secure_filename(f.filename))

print(path)

img = tran('static/images'+path)

with torch.no_grad():

output = torch.squeeze(model(img))

predict = torch.softmax(output, dim=0)

predict_cla = torch.argmax(predict).numpy()

res = class_indict[str(predict_cla)]

pred = predict[predict_cla].item()

res_chinese = ""

if res=="daisy":

res_chinese="雏菊"

if res=="dandelion":

res_chinese="蒲公英"

if res=="roses":

res_chinese="玫瑰"

if res=="sunflower":

res_chinese="向日葵"

if res=="tulips":

res_chinese="郁金香"

f.save(upload_path)

pred = pred*100

return render_template('upload_ok.html', path=path, res_chinese=res_chinese,pred = pred, val1=time.time())

return render_template('upload.html')

if __name__ == '__main__':

app.run(host='127.0.0.1', port=80,debug = True)

3.系统识别结果

<!DOCTYPE html>

<html lang="en">

<head>

<meta charset="UTF-8">

<title>李运辰-花卉识别系统v1.0</title>

<link rel="stylesheet" type="text/css" href="../static/css/bootstrap.min.css">

<link rel="stylesheet" type="text/css" href="../static/css/fileinput.css">

<script src="../static/js/jquery-2.1.4.min.js"></script>

<script src="../static/js/bootstrap.min.js"></script>

<script src="../static/js/fileinput.js"></script>

<script src="../static/js/locales/zh.js"></script>

</head>

<body>

<h1 align="center">李运辰-花卉识别系统v1.0</h1>

<div align="center">

<form action="" enctype='multipart/form-data' method='POST'>

<input type="file" name="file" class="file" data-show-preview="false" style="margin-top:20px;"/>

<br>

<input type="submit" value="上传" class="button-new btn btn-primary" style="margin-top:15px;"/>

</form>

<p style="size:15px;color:blue;">识别结果:{{res_chinese}}</p>

</br>

<p style="size:15px;color:red;">准确率:{{pred}}%</p>

<img src="{{ './static/images/'+path }}" width="400" height="400" alt=""/>

</div>

</body>

</html>

4.启动系统:

python main.py

接着在浏览器在浏览器里面访问

http://127.0.0.1/upload

出现如下界面:

最后来一个识别过程的动图

三、总结

ok,这个花卉系统就已经搭建完成了,是不是超级简单,我也是趁着修了这个机器视觉这么课,才弄这么一个系统,回顾一下之前的知识,哈哈哈。

如果有任何问题欢迎在下方评论

                           最后附上系统完整源码获取:公众号回复:花识别系统

                              【加群获取学习资料QQ群:901381280

                                         【各种爬虫源码获取方式

                                      识别文末二维码,回复:爬虫源码

                        欢迎关注公众号:Python爬虫数据分析挖掘,方便及时阅读最新文章

                             回复【开源源码】免费获取更多开源项目源码;

                 

相关知识

python 花卉识别系统 用python搭建一个花卉识别系统(IT技术)
病虫害识别系统:农业智能化的利器
如何实现基于深度学习的花卉识别系统:YOLO模型与UI界面集成指南
人工智能毕业设计基于python的花朵识别系统
深度学习基于python+TensorFlow+Django的花朵识别系统
教你搭建一个美好的屋顶花园活动空间
基于YOLOv8深度学习的花卉检测与识别系统:UI界面 + YOLOv8 + 数据集全流程详解
做法超级简单的圆形花球,轻松就能学会,两个组合起来有点像葫芦
用python搭建一个花卉识别系统
简单七招教你拍好花卉摄影

网址: 教你搭建一个花卉识别系统(超级简单) https://m.huajiangbk.com/newsview516082.html

所属分类:花卉
上一篇: 一种基于机器学习的花朵种类识别方
下一篇: 基于TensorFlow训练花朵