搜索
您的当前位置:首页正文

黑马深度学习和CV入门4——图像分类

来源:好走旅游网

图像分类简介

AlexNet

import tensorflow as tf
from tensorflow.keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt

(train_images,train_label),(test_images,test_label)=mnist.load_data()
#维度调整
train_images=np.reshape(train_images,(train_images.shape[0],train_images.shape[1],train_images.shape[2],1))
test_images = np.reshape(test_images,(test_images.shape[0],test_images.shape[1],test_images.shape[2],1))
#对训练数据进行抽样
def get_train(size):
    #随机生成index
    index = np.random.randint(0,train_images.shape[0],size)
    #选择图像并进行resize
    resized_image = tf.image.resize_with_pad(train_images[index],227,227)
    return resized_image.numpy(),train_label[index]
#对测试数据进行抽样
def get_test(size):
    #随机生成index
    index = np.random.randint(0,test_images.shape[0],size)
    #选择图像并进行resize
    resized_image = tf.image.resize_with_pad(test_images[index],227,227)
    return resized_image.numpy(),test_label[index]
#抽样结果
train_images,train_label=get_train(256)
test_images,test_label = get_test(128)
plt.imshow(train_images[4].astype(np.int8).squeeze(),cmap='gray')
#构建AlexNet模型
net = tf.keras.models.Sequential([
    #卷积层:96个卷积核,卷积核为11*11,步幅为4,激活函数relu
    tf.keras.layers.Conv2D(filters=96,kernel_size=11,strides=4,activation="relu"),
    #池化:窗口大小为3*3,步幅为2
    tf.keras.layers.MaxPool2D(pool_size=3,strides=2),
    #卷积层:256个卷积核,卷积核为5*5,步幅为1,padding为same,激活函数relu
    tf.keras.layers.Conv2D(filters=256,kernel_size=5,padding="same",activation="relu"),
    #池化:窗口大小为3*3,步幅为2
    tf.keras.layers.MaxPool2D(pool_size=3,strides=2),
    #卷积层:384个卷积核,卷积核为3*3,步幅为1,padding为same,激活函数为relu
    tf.keras.layers.Conv2D(filters=384,kernel_size=3,padding="same",activation="relu"),
    #卷积层:384个卷积核,卷积核为3*3,步幅为1,padding为same,激活函数为relu
    tf.keras.layers.Conv2D(filters=384,kernel_size=3,padding="same",activation="relu"),
    #卷积层:256个卷积核,卷积核为3*3,步幅为1,padding为same,激活函数为relu
    tf.keras.layers.Conv2D(filters=256,kernel_size=3,padding="same",activation="relu"),
    #池化:窗口大小为3*3,步幅为2
    tf.keras.layers.MaxPool2D(pool_size=3,strides=2),
    #伸展为1维向量
    tf.keras.layers.Flatten(),
    #全连接层:4096个神经元,激活函数relu
    tf.keras.layers.Dense(4096,activation="relu"),
    #随机失活
    tf.keras.layers.Dropout(0.5),
    #全连接层:4096个神经元,激活函数relu
    tf.keras.layers.Dense(4096,activation="relu"),
    #随机失活
    tf.keras.layers.Dropout(0.5),
    #输出层:10个神经元,激活函数softmax
    tf.keras.layers.Dense(10,activation="softmax")
])
#优化器,损失函数,评价指标
net.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),loss=tf.keras.losses.sparse_categorical_crossentropy,
            metrics=['accuracy'])
net.fit(train_images,train_label,batch_size=128,epochs=3,validation_split=0.1,verbose=1)






VGG网络








VGG手写数字识别




import tensorflow as tf
from tensorflow.keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt

(train_images,train_labels),(test_images,test_labels)=mnist.load_data()
#维度调整
train_images=np.reshape(train_images,(train_images.shape[0],train_images.shape[1],train_images.shape[2],1))
test_images = np.reshape(test_images,(test_images.shape[0],test_images.shape[1],test_images.shape[2],1))
#定义两个方法随机抽取部分样本演示
#获取训练集数据
def get_train(size):
    #随机生成要抽取的样本的索引
    index = np.random.randint(0,np.shape(train_images)[0],size)
    #将这些数据resize成224*224
    resized_images = tf.image.resize_with_pad(train_images[index],224,224,)
    #返回抽取的
    return resized_images.numpy(),train_labels[index]
#获取测试集数据
def get_test(size):
    #随机生成要抽样的样本的索引
    index = np.random.randint(0,np.shape(test_images)[0],size)
    #将这些数据resize成224*224
    resized_images = tf.image.resize_with_pad(test_images[index],224,224,)
    return resized_images.numpy(),test_labels[index]
#获取训练样本和测试样本
train_images,train_labels=get_train(256)
test_images,test_labels = get_test(128)


#构建VGG网络中的卷积块:卷积层的个数,卷积层中卷积核的个数
def vgg_block(num_convs,num_filters):
    blk = tf.keras.models.Sequential()
    for _ in range(num_convs):
        blk.add(tf.keras.layers.Conv2D(num_filters,kernel_size=3,
                                       padding='same',activation='relu'))
        blk.add(tf.keras.layers.MaxPool2D(pool_size=2,strides=2))
        return blk
#定义5个卷积块,指明每个卷积块中的卷积层个数及相应的卷积核个数
conv_arch = ((2,64),(2,128),(3,256),(3,512),(3,512))
#定义VGG网络
def vgg(conv_arch):
    #构建序列模型
    net = tf.keras.models.Sequential()
    #根据conv_arch生成卷积部分
    for(num_convs,num_filters) in conv_arch:
        net.add(vgg_block(num_convs,num_filters))
    #卷积块序列化后添加全连接层
    net.add(tf.keras.models.Sequential([
        #将特征图展成一维向量
        tf.keras.layers.Flatten(),
        #全连接层:4096个神经元,激活函数是relu
        tf.keras.layers.Dense(4096,activation='relu'),
        #随机失活
        tf.keras.layers.Dropout(0.5),
        #全连接层:4096个神经元,激活函数是relu
        tf.keras.layers.Dense(4096,activation='relu'),
        #随机失活
        tf.keras.layers.Dropout(0.5),
        #全连接层:10个神经元,激活函数是softmax
        tf.keras.layers.Dense(10,activation='softmax')
    ]))
    return net
#网络实例化
net = vgg(conv_arch)
#指定优化器,损失函数和评价指标
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01,momentum=0.0)
net.compile(optimizer=optimizer,
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])
net.fit(train_images,train_labels,batch_size=128,epochs=3,verbose=1,validation_split=0.1)



GoogLeNet







GoogLeNet的构建













因篇幅问题不能全部显示,请点此查看更多更全内容

Top