在上一篇文章中,我们介绍了如何用梯度下降法进行神经网络的训练与优化。现在我们用一个实际的数据集:FashionMNIST,完整的实现构建神经网络并训练优化,在这一过程中,我们将会对神经网络建模并解决实际问题有个初步的了解。
FashionMNIST 数据集
FashionMNIST 是一个衣物图像数据集,包含60000个样本的训练集以及10000个样本的测试集。样本分为10类标签:T-Shirt/Top(T恤),Trouser(裤子),Pullover(套衫),Dress(连衣裙),Coat(大衣),Sandals(凉鞋),Shirt(衬衣),Sneaker(运动鞋),Bag(包),Ankle boots(踝靴)。每个样本都是28*28的灰度图像。
在Pytorch中,我们可以使用 torchvision.datasets
来导入 FashionMNIST 数据集。
import torchvision
import torchvision.transforms as transforms
mnist = torchvision.datasets.FashionMNIST(
root = "./data"
, train=True # 使用训练数据集
, download=False
, transform=transforms.ToTensor() # 将数据转换为Tensor
)
mnist
# 代码结果如下:
> Dataset FashionMNIST
Number of datapoints: 60000
Root location: ./data
Split: Train
StandardTransform
Transform: ToTensor()
查看mnist第一个样本:
mnist[0]
# 代码结果如下:
> (tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0039, 0.0000, 0.0000, 0.0510,
0.2863, 0.0000, 0.0000, 0.0039, 0.0157, 0.0000, 0.0000, 0.0000,
0.0000, 0.0039, 0.0039, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0118, 0.0000, 0.1412, 0.5333,
0.4980, 0.2431, 0.2118, 0.0000, 0.0000, 0.0000, 0.0039, 0.0118,
0.0157, 0.0000, 0.0000, 0.0118],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0235, 0.0000, 0.4000, 0.8000,
0.6902, 0.5255, 0.5647, 0.4824, 0.0902, 0.0000, 0.0000, 0.0000,
0.0000, 0.0471, 0.0392, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
...
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000]]]),
9)
我们可以用.data
查看样本的特征张量:
mnist.data[0]
# 代码结果如下:
> tensor([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 13, 73, 0, 0, 1, 4, 0, 0, 0, 0, 1, 1, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
36, 136, 127, 62, 54, 0, 0, 0, 1, 3, 4, 0, 0, 3],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0,
102, 204, 176, 134, 144, 123, 23, 0, 0, 0, 0, 12, 10, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
155, 236, 207, 178, 107, 156, 161, 109, 64, 23, 77, 130, 72, 15],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 69,
207, 223, 218, 216, 216, 163, 127, 121, 122, 146, 141, 88, 172, 66],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 200,
232, 232, 233, 229, 223, 223, 215, 213, 164, 127, 123, 196, 229, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 183,
225, 216, 223, 228, 235, 227, 224, 222, 224, 221, 223, 245, 173, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 193,
228, 218, 213, 198, 180, 212, 210, 211, 213, 223, 220, 243, 202, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 0, 12, 219,
220, 212, 218, 192, 169, 227, 208, 218, 224, 212, 226, 197, 209, 52],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 99, 244,
...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=torch.uint8)
注意:虽然 mnist[0][0]
和 mnist.data[0]
都可以获得第一个样本的特征张量,但他们的维度不同:
mnist[0][0].shape
torch.Size([1, 28, 28])
mnist.data[0].shape
torch.Size([28, 28])
我们可以用.targets
查看样本的标签:
mnist.targets[0:5]
tensor([9, 0, 0, 3, 0])
我们可以将数据集中的图像可视化:
import matplotlib.pyplot as plt
from IPython import display
import numpy as np
def get_text_label(labels):
text_label = ["T-Shirt/Top"
,"Trouser"
,"Pullover"
,"Dress"
,"Coat"
,"Sandals"
,"Shirt"
,"Sneaker"
,"Bag"
,"Ankle boots"
]
out = [text_label[int(i)] for i in labels]
return out
def show_fashion_mnist(images, labels):
display.set_matplotlib_formats("svg")
_, figs = plt.subplots(1, len(images), figsize=(12,12))
# _ 表示我们不使用的变量
for f, img, lbl in zip(figs, images, labels):
f.imshow(img.view((28,28)).numpy())
f.set_title(lbl)
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.show()
images, labels = [], []
for i in range(5):
images.append(mnist[i][0])
labels.append(mnist[i][1])
show_fashion_mnist(images, get_text_label(labels))
构建神经网络并使用梯度下降法训练优化
1)导入所需的库
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import torch.optim as optim
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
2)构建神经网络
这里我们构建一个简单的全连接神经网络:第一层有128个神经元,激活函数是ReLU,第二层是输出层。
class Model(nn.Module):
def __init__(self, in_features=3, out_features=10):
super(Model, self).__init__()
self.linear1 = nn.Linear(in_features, 128, bias=False)
self.output = nn.Linear(128, out_features, bias=False)
def forward(self, x):
x = x.view(-1, 28*28)
sigma1 = torch.relu(self.linear1(x))
z_hat = self.output(sigma1)
# log_sigma2 = torch.log_softmax(z_hat, dim=1)
return z_hat
3)定义训练函数
def fit(net, batchdata, lr=0.01, gamma=0.9, epochs=5):
criterion = CrossEntropyLoss()
opt = optim.SGD(net.parameters(), lr=lr, momentum=gamma)
correct = 0
samples = 0
for i_epoch in range(epochs):
for i_batch, (xb, yb) in enumerate(batchdata):
yb = yb.view(xb.shape[0])
opt.zero_grad() # 清空梯度
z_hat = net.forward(xb)
loss = criterion(z_hat, yb)
loss.backward()
opt.step()
# 计算准确率
y_hat = torch.max(z_hat, dim=1)[1]
# softmax/logsoftmax函数对z_hat是单调递增的,因此对比z_hat的值也可以获得y_hat
correct += torch.sum(y_hat==yb)
samples += xb.shape[0]
# 打印进度及准确率
if (i_batch+1) % 125 == 0 or i_batch == len(batchdata)-1:
print("Epoch{}: [{}/{}({:.0f}%)] \t Loss: {:.6f} \t Accuracy:{:.3f}".format(
i_epoch+1
, samples
, len(batchdata.dataset)*epochs
, 100*samples/(len(batchdata.dataset)*epochs)
, loss.data.item()
, float(100*correct/samples)
)
)
4)进行模型训练
lr = 0.15
gamma = 0
epochs = 10
bs = 128
batchdata = DataLoader(mnist, batch_size=bs, shuffle=True)
input_ = mnist.data[0].numel() # 查看一个样本共有多少个特征
output_ = len(mnist.targets.unique())
torch.manual_seed(531)
net = Model(input_, output_)
fit(net, batchdata, lr=lr, gamma=gamma, epochs=epochs)
训练结果打印如下:
Epoch1: [16000/600000(3%)] Loss: 0.610511 Accuracy:65.381
Epoch1: [32000/600000(5%)] Loss: 0.608885 Accuracy:71.034
Epoch1: [48000/600000(8%)] Loss: 0.472458 Accuracy:73.779
Epoch1: [60000/600000(10%)] Loss: 0.441551 Accuracy:75.440
Epoch2: [76000/600000(13%)] Loss: 0.437011 Accuracy:76.824
Epoch2: [92000/600000(15%)] Loss: 0.399695 Accuracy:77.800
Epoch2: [108000/600000(18%)] Loss: 0.472766 Accuracy:78.640
Epoch2: [120000/600000(20%)] Loss: 0.320962 Accuracy:79.250
Epoch3: [136000/600000(23%)] Loss: 0.388086 Accuracy:79.923
Epoch3: [152000/600000(25%)] Loss: 0.462663 Accuracy:80.452
Epoch3: [168000/600000(28%)] Loss: 0.465469 Accuracy:80.903
Epoch3: [180000/600000(30%)] Loss: 0.380975 Accuracy:81.149
Epoch4: [196000/600000(33%)] Loss: 0.271844 Accuracy:81.539
Epoch4: [212000/600000(35%)] Loss: 0.367284 Accuracy:81.840
Epoch4: [228000/600000(38%)] Loss: 0.505810 Accuracy:82.173
Epoch4: [240000/600000(40%)] Loss: 0.463632 Accuracy:82.372
Epoch5: [256000/600000(43%)] Loss: 0.344181 Accuracy:82.652
Epoch5: [272000/600000(45%)] Loss: 0.363755 Accuracy:82.883
Epoch5: [288000/600000(48%)] Loss: 0.437372 Accuracy:83.074
Epoch5: [300000/600000(50%)] Loss: 0.415565 Accuracy:83.233
Epoch6: [316000/600000(53%)] Loss: 0.411210 Accuracy:83.421
Epoch6: [332000/600000(55%)] Loss: 0.282166 Accuracy:83.599
Epoch6: [348000/600000(58%)] Loss: 0.199532 Accuracy:83.764
Epoch6: [360000/600000(60%)] Loss: 0.287607 Accuracy:83.876
Epoch7: [376000/600000(63%)] Loss: 0.237670 Accuracy:84.042
...
Epoch10: [556000/600000(93%)] Loss: 0.357069 Accuracy:85.374
Epoch10: [572000/600000(95%)] Loss: 0.379821 Accuracy:85.481
Epoch10: [588000/600000(98%)] Loss: 0.388706 Accuracy:85.568
Epoch10: [600000/600000(100%)] Loss: 0.206537 Accuracy:85.629
可以看出,在整个训练过程中,损失函数有所波动,但是准确率是在不断提升的,当训练结束时,我们的算法准确率从最开始的65%提升到了85%。目前,成熟的算法在 FashionMNIST 数据集上的准确率可以很轻松的达到99%。不过,我们这里的目的是初步体验神经网络从构建到优化的整个过程,所以将不再对提升准确率做更多的尝试,当然,神经网络的调优还涉及到其他很多方面,在后续的文章的我们会逐一进行介绍。
完整代码
整个过程的完整代码如下:
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import torch.optim as optim
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from IPython import display
import numpy as np
lr = 0.15
gamma = 0
epochs = 10
bs = 128
mnist = torchvision.datasets.FashionMNIST(
root = "./data"
, train=True # 使用训练数据集
, download=False
, transform=transforms.ToTensor() # 将数据转换为Tensor
)
batchdata = DataLoader(mnist, batch_size=bs, shuffle=True)
input_ = mnist.data[0].numel() # 查看一个样本共有多少个特征
output_ = len(mnist.targets.unique())
class Model(nn.Module):
def __init__(self, in_features=3, out_features=10):
super(Model, self).__init__()
self.linear1 = nn.Linear(in_features, 128, bias=False)
self.output = nn.Linear(128, out_features, bias=False)
def forward(self, x):
x = x.view(-1, 28*28)
sigma1 = torch.relu(self.linear1(x))
z_hat = self.output(sigma1)
# log_sigma2 = torch.log_softmax(z_hat, dim=1)
return z_hat
def fit(net, batchdata, lr=0.01, gamma=0.9, epochs=5):
criterion = CrossEntropyLoss()
opt = optim.SGD(net.parameters(), lr=lr, momentum=gamma)
correct = 0
samples = 0
for i_epoch in range(epochs):
for i_batch, (xb, yb) in enumerate(batchdata):
yb = yb.view(xb.shape[0])
opt.zero_grad() # 清空梯度
z_hat = net.forward(xb)
loss = criterion(z_hat, yb)
loss.backward()
opt.step()
# 计算准确率
y_hat = torch.max(z_hat, dim=1)[1]
# softmax/logsoftmax函数对z_hat是单调递增的,因此对比z_hat的值也可以获得y_hat
correct += torch.sum(y_hat==yb)
samples += xb.shape[0]
if (i_batch+1) % 125 == 0 or i_batch == len(batchdata)-1:
print("Epoch{}: [{}/{}({:.0f}%)] \t Loss: {:.6f} \t Accuracy:{:.3f}".format(
i_epoch+1
, samples
, len(batchdata.dataset)*epochs
, 100*samples/(len(batchdata.dataset)*epochs)
, loss.data.item()
, float(100*correct/samples)
)
)
torch.manual_seed(531)
net = Model(input_, output_)
fit(net, batchdata, lr=lr, gamma=gamma, epochs=epochs)