python+基于Jupyter notebook完成卷积神经网络笑脸识别
2022/2/2 14:14:20
本文主要是介绍python+基于Jupyter notebook完成卷积神经网络笑脸识别,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!
1.将下载的数据集放到D盘
2.根据猫狗数据集训练的方法来训练笑脸数据集
(1)首先将train_folder文件夹下俩个文件夹内的图片的名字做修改
#coding=gbk import os import sys def rename(): path=input("请输入路径(例如D:\\\\picture):") name=input("请输入开头名:") startNumber=input("请输入开始数:") fileType=input("请输入后缀名(如 .jpg、.txt等等):") print("正在生成以"+name+startNumber+fileType+"迭代的文件名") count=0 filelist=os.listdir(path) for files in filelist: Olddir=os.path.join(path,files) if os.path.isdir(Olddir): continue Newdir=os.path.join(path,name+str(count+int(startNumber))+fileType) os.rename(Olddir,Newdir) count+=1 print("一共修改了"+str(count)+"个文件") rename()
继续修改1
(2)图片分类
import os, shutil #复制文件 # 原始目录所在的路径 # 数据集未压缩 original_dataset_dir1 = 'D:\\smile\\datasets\\train_folder\\1' ##笑脸 original_dataset_dir0 = 'D:\\smile\\datasets\\train_folder\\0' ##非笑脸 # 我们将在其中的目录存储较小的数据集 base_dir = 'D:\\smile1' os.mkdir(base_dir) # # 训练、验证、测试数据集的目录 train_dir = os.path.join(base_dir, 'train') os.mkdir(train_dir) validation_dir = os.path.join(base_dir, 'validation') os.mkdir(validation_dir) test_dir = os.path.join(base_dir, 'test') os.mkdir(test_dir) # 猫训练图片所在目录 train_cats_dir = os.path.join(train_dir, 'smile') os.mkdir(train_cats_dir) # 狗训练图片所在目录 train_dogs_dir = os.path.join(train_dir, 'unsmile') os.mkdir(train_dogs_dir) # 猫验证图片所在目录 validation_cats_dir = os.path.join(validation_dir, 'smile') os.mkdir(validation_cats_dir) # 狗验证数据集所在目录 validation_dogs_dir = os.path.join(validation_dir, 'unsmile') os.mkdir(validation_dogs_dir) # 猫测试数据集所在目录 test_cats_dir = os.path.join(test_dir, 'smile') os.mkdir(test_cats_dir) # 狗测试数据集所在目录 test_dogs_dir = os.path.join(test_dir, 'unsmile') os.mkdir(test_dogs_dir) # 将前1000张笑脸图像复制到train_cats_dir fnames = ['smile{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(original_dataset_dir1, fname) dst = os.path.join(train_cats_dir, fname) shutil.copyfile(src, dst) # 将下500张笑脸图像复制到validation_cats_dir fnames = ['smile{}.jpg'.format(i) for i in range(500)] for fname in fnames: src = os.path.join(original_dataset_dir1, fname) dst = os.path.join(validation_cats_dir, fname) shutil.copyfile(src, dst) # 将下500张笑脸图像复制到test_cats_dir fnames = ['smile{}.jpg'.format(i) for i in range(500)] for fname in fnames: src = os.path.join(original_dataset_dir1, fname) dst = os.path.join(test_cats_dir, fname) shutil.copyfile(src, dst) # 将前1000张非笑脸图像复制到train_dogs_dir fnames = ['unsmile{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(original_dataset_dir0, fname) dst = os.path.join(train_dogs_dir, fname) shutil.copyfile(src, dst) # 将下500张非笑脸图像复制到validation_dogs_dir fnames = ['unsmile{}.jpg'.format(i) for i in range(500)] for fname in fnames: src = os.path.join(original_dataset_dir0, fname) dst = os.path.join(validation_dogs_dir, fname) shutil.copyfile(src, dst) # 将下500张非笑脸图像复制到test_dogs_dir fnames = ['unsmile{}.jpg'.format(i) for i in range(500)] for fname in fnames: src = os.path.join(original_dataset_dir0, fname) dst = os.path.join(test_dogs_dir, fname) shutil.copyfile(src, dst)
(3)计算一下在每个训练分割中我们有多少图片
print('total training cat images:', len(os.listdir(train_cats_dir))) print('total training dog images:', len(os.listdir(train_dogs_dir))) print('total validation cat images:', len(os.listdir(validation_cats_dir))) print('total validation dog images:', len(os.listdir(validation_dogs_dir))) print('total test cat images:', len(os.listdir(test_cats_dir))) print('total test dog images:', len(os.listdir(test_dogs_dir)))
(4)卷积网络模型搭建
from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary()
(5)图像生成器读取文件中数据,进行数据预处理
from keras import optimizers model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) from keras.preprocessing.image import ImageDataGenerator # 所有图像将按1/255重新缩放 train_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # 这是目标目录 train_dir, # 所有图像将调整为150x150 target_size=(150, 150), batch_size=20, # 因为我们使用二元交叉熵损失,我们需要二元标签 class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
(6)开始训练
history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)
(7)保存训练模型
model.save('D:\\smile1\\smiles_and_unsmiles_small_1.h5')
(8)在培训和验证数据上绘制模型的损失和准确性
import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()
(9)使用数据扩充
datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') # 这是带有图像预处理实用程序的模块 from keras.preprocessing import image fnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)] # 我们选择一个图像来“增强” img_path = fnames[3] # 读取图像并调整其大小 img = image.load_img(img_path, target_size=(150, 150)) # 将其转换为具有形状的Numpy数组(150、150、3) x = image.img_to_array(img) # 把它改成(1150150,3) x = x.reshape((1,) + x.shape) # 下面的.flow()命令生成一批随机转换的图像。 # 它将无限循环,所以我们需要在某个时刻“打破”循环! i = 0 for batch in datagen.flow(x, batch_size=1): plt.figure(i) imgplot = plt.imshow(image.array_to_img(batch[0])) i += 1 if i % 4 == 0: break plt.show() model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
(10)使用数据扩充和退出来训练我们的网络
train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,) # 请注意,不应增加验证数据! test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # 这是目标目录 train_dir, # 所有图像将调整为150x150 target_size=(150, 150), batch_size=32, # 因为我们使用二元交叉熵损失,我们需要二元标签 class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary') history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=100, validation_data=validation_generator, validation_steps=50)
(11)保存模型
model.save('D:\\smile1\\smiles_and_unsmiles_small_2.h5')
(12)在培训和验证数据上绘制模型的损失和准确性
acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()
3.基于上面卷积神经网络的笑脸识别
#检测视频或者摄像头中的人脸 import cv2 from keras.preprocessing import image from keras.models import load_model import numpy as np import dlib from PIL import Image model = load_model('D:\\smile1\\smiles_and_unsmiles_small_2.h5') detector = dlib.get_frontal_face_detector() video=cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX def rec(img): gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) dets=detector(gray,1) if dets is not None: for face in dets: left=face.left() top=face.top() right=face.right() bottom=face.bottom() cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2) img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)) img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) img1 = np.array(img1)/255. img_tensor = img1.reshape(-1,150,150,3) prediction =model.predict(img_tensor) if prediction[0][0]>0.5: result='unsmile' else: result='smile' cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow('Video', img) while video.isOpened(): res, img_rd = video.read() if not res: break rec(img_rd) if cv2.waitKey(5) & 0xFF == ord('q'): break video.release() cv2.destroyAllWindows()
参考:
https://blog.csdn.net/qq_42585108/article/details/107107313?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522164117795416780271579676%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fall.%2522%257D&request_id=164117795416780271579676&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2allfirst_rank_ecpm_v1~rank_v31_ecpm-2-107107313.pc_search_em_sort&utm_term=PYTHON%E5%9F%BA%E4%BA%8E%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E4%BA%BA%E8%84%B8%E8%A1%A8%E6%83%85%E8%AF%86%E5%88%AB&spm=1018.2226.3001.4449
这篇关于python+基于Jupyter notebook完成卷积神经网络笑脸识别的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!
- 2024-11-21Python编程基础教程
- 2024-11-20Python编程基础与实践
- 2024-11-20Python编程基础与高级应用
- 2024-11-19Python 基础编程教程
- 2024-11-19Python基础入门教程
- 2024-11-17在FastAPI项目中添加一个生产级别的数据库——本地环境搭建指南
- 2024-11-16`PyMuPDF4LLM`:提取PDF数据的神器
- 2024-11-16四种数据科学Web界面框架快速对比:Rio、Reflex、Streamlit和Plotly Dash
- 2024-11-14获取参数学习:Python编程入门教程
- 2024-11-14Python编程基础入门