import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# Hyper parameters
EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height
INPUT_SIZE = 28 # rnn input size / image width
LR = 0.01 # learning rate
DOWNLOAD_MNIST = False # set to True if haven't download the data
train_data = dsets.MNIST(
root='./mnist',
train=True,
transform=transforms.ToTensor(),
download=DOWNLOAD_MNIST,
)
train_loader = torch.utils.data.DataLoader(
dataset=train_data,
batch_size=BATCH_SIZE,
shuffle=True,
)
test_data = dsets.MNIST(
root='./mnist/',
train=False,
transform=transforms.ToTensor()
)
test_x = Variable(
test_data.test_data,
volatile=True
).type(torch.FloatTensor)[:2000]/255.
test_y = test_data.test_labels.numpy().squeeze()[:2000]
class RNN(nn.Module):
def __init__(self):
super(RNN,self).__init__()
self.rnn = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=64,
num_layers=1,
batch_first=True,
)
self.out = nn.Linear(64,10)
def forward(self,x):
r_out,(h_n,h_c) = self.rnn(x,None)
out = self.out(r_out[:,-1,:])
return out
rnn = RNN()
print(rnn)
# training
optimizer = torch.optim.Adam(rnn.parameters(),lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step,(x,y) in enumerate(train_loader): # gives batch data
b_x = Variable(x.view(-1,28,28)) # reshape x to (batch,time_step,input_size)
b_y = Variable(y)
output = rnn(b_x)
loss = loss_func(output,b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_output = rnn(test_x)
pred_y = torch.max(test_output,1)[1].data.numpy().squeeze()
accuracy = sum(pred_y == test_y) / float(test_y.size)
print('Epoch: ',epoch,
'| train loss: %.4f' % loss.data[0],
'| test accuracy: %.2f' % accuracy)
# print 10 predictions from test data
test_output = rnn(test_x[:10].view(-1,28,28))
pred_y = torch.max(test_output,1)[1].data.numpy().squeeze()
print(pred_y,'prediction number')
print(test_y[:10],'real number')
pytorch RNN循环神经网络
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- MNIST 卷积神经网络。https://github.com/nlintz/TensorFlow-Tutoria...
- R摘抄原文 每一次演讲都有一个相同的目标:说服别人。 绝大多数商业演讲变成了传达数据。 一个结构清晰、简明扼要的故...