首页IT科技mnist数据集有多大(使用cnn,bpnn,lstm实现mnist数据集的分类)

mnist数据集有多大(使用cnn,bpnn,lstm实现mnist数据集的分类)

时间2025-05-21 12:01:46分类IT科技浏览3362
导读:1.cnn import torch import torch.nn...

1.cnn

import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms # 设置随机数种子 torch.manual_seed(0) # 超参数 EPOCH = 1 # 训练整批数据的次数 BATCH_SIZE = 50 DOWNLOAD_MNIST = False # 表示还没有下载数据集            ,如果数据集下载好了就写False # 加载 MNIST 数据集 train_dataset = datasets.MNIST( root="./mnist", train=True,#True表示是训练集 transform=transforms.ToTensor(), download=False) test_dataset = datasets.MNIST( root="./mnist", train=False,#Flase表示测试集 transform=transforms.ToTensor(), download=False) # 将数据集放入 DataLoader 中 train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=100,#每个批次读取的数据样本数 shuffle=True)#是否将数据打乱                  ,在这种情况下为True      ,表示每次读取的数据是随机的 test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=100, shuffle=False) # 为了节约时间, 我们测试时只测试前2000个 test_x = torch.unsqueeze(test_dataset.test_data, dim=1).type(torch.FloatTensor)[ :2000] / 255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1) test_y = test_dataset.test_labels[:2000] # 定义卷积神经网络模型 class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(#输入图像的大小为(28,28,1) in_channels=1,#当前输入特征图的个数 out_channels=32,#输出特征图的个数 kernel_size=3,#卷积核大小         ,在一个3*3空间里对当前输入的特征图像进行特征提取 stride=1,#步长:卷积窗口每隔一个单位滑动一次 padding=1)#如果希望卷积后大小跟原来一样                  ,需要设置padding=(kernel_size-1)/2 #第一层结束后图像大小为(28,28,32)32是输出图像个数         ,28计算方法为(h-k+2p)/s+1=(28-3+2*1)/1 +1=28 self.pool = nn.MaxPool2d(kernel_size=2, stride=2)#可以缩小输入图像的尺寸      ,同时也可以防止过拟合 #通过池化层之后图像大小变为(14,14,32) self.conv2 = nn.Conv2d(#输入图像大小为(14,14,32) in_channels=32,#第一层的输出特征图的个数当做第二层的输入特征图的个数 out_channels=64, kernel_size=3, stride=1, padding=1)#二层卷积之后图像大小为(14,14,64) self.fc = nn.Linear(64 * 7 * 7, 10)#10表示最终输出的 # 下面定义x的传播路线 def forward(self, x): x = self.pool(F.relu(self.conv1(x)))# x先通过conv1 x = self.pool(F.relu(self.conv2(x)))# 再通过conv2 x = x.view(-1, 64 * 7 * 7) x = self.fc(x) return x # 实例化卷积神经网络模型 model = CNN() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() #lr(学习率)是控制每次更新的参数的大小的超参数 optimizer = torch.optim.Adam(model.parameters(), lr=0.01) # 训练模型 for epoch in range(1): for i, (images, labels) in enumerate(train_loader): outputs = model(images) # 先将数据放到cnn中计算output loss = criterion(outputs, labels)# 输出和真实标签的loss                  ,二者位置不可颠倒 optimizer.zero_grad()# 清除之前学到的梯度的参数 loss.backward() # 反向传播            ,计算梯度 optimizer.step()#应用梯度 if i % 50 == 0: data_all = model(test_x)#不分开写就会出现ValueError: too many values to unpack (expected 2) last_layer = data_all test_output = data_all pred_y = torch.max(test_output, 1)[1].data.numpy() accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0)) print(Epoch: , epoch, | train loss: %.4f % loss.data.numpy(), | test accuracy: %.4f % accuracy) # print 10 predictions from test data data_all1 = model(test_x[:10]) test_output = data_all1 _ = data_all1 pred_y = torch.max(test_output, 1)[1].data.numpy() print(pred_y, prediction number) print(test_y[:10].numpy(), real number)

2.bpnn

import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as func #import matplotlib.pyplot as plt import torch.utils.data as Data import torchvision # 超参数 EPOCH = 2 # 训练一个回合 BATCH_SIZE = 50 # 每次取样50个进行训练 LR = 0.001 # 学习率0.01 # DOWNLOAD_MNIST = False # 提取训练数据 # 将图像格式转为tensor格式 train_data = torchvision.datasets.MNIST( root=./mnist, train=True, transform=torchvision.transforms.ToTensor(), # download = DOWNLOAD_MNIST, ) # 选取相应批次的图像 train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) # 加载测试图像 test_data = torchvision.datasets.MNIST( root=./mnist, train=False, transform=torchvision.transforms.ToTensor(), ) test_loader = Data.DataLoader(dataset=test_data,batch_size=BATCH_SIZE) class BPNN(nn.Module): def __init__(self): super(BPNN, self).__init__() # 创建容器 # 按照sequential内模块的顺序执行 self.conv1 = nn.Sequential( # 二维卷积 nn.Linear(28,64), nn.ReLU(), ) self.conv2 = nn.Sequential( nn.Linear(64,128), nn.ReLU(), ) self.conv3 = nn.Sequential( nn.Linear(128, 32), nn.ReLU(), ) # 全连接层 self.out = nn.Linear(32 * 28, 10) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = x.view(x.size(0), -1) # 相当于维度转换   ,这里保留0维(batch_size)                  ,将后面的三个维度展平 output = self.out(x) return output bpnn = BPNN() # Adam优化器 optimizer = torch.optim.Adam(bpnn.parameters(), lr=LR) # loss函数 loss_func = nn.CrossEntropyLoss() # 迭代训练 for epoch in range(EPOCH): for step, (batch_x, batch_y) in enumerate(train_loader): # b_x = Variable(batch_x) # b_y = Variable(batch_y) out = bpnn(batch_x) loss = loss_func(out, batch_y) optimizer.zero_grad() # 梯度降为0 loss.backward() # 误差反向传递 optimizer.step() # 以学习效率优化梯度 equal = 0 i = 0 for step,(test_x,test_y) in enumerate(test_loader): if step % 10 == 0: i += 1 test_output = bpnn(test_x) pred_y = torch.max(test_output, 1)[1].data.squeeze() acc = (pred_y == test_y).sum().float() / test_y.size(0) print(Epoch: , epoch, | train loss: %.4f % loss.data.float(), test acc: , acc.numpy()) equal += acc.numpy() print(equal/i) test_output = bpnn(test_x[:10]) pred_y = torch.max(test_output, 1)[1].data.squeeze() print(pred_y, prediction number) print(test_y[:10].numpy(), real number)

3.lstm

import torch from torch import nn import torchvision.datasets as dsets import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np torch.manual_seed(1) # reproducible # Hyper Parameters EPOCH = 1 # 训练整批数据多少次, 为了节约时间, 我们只训练一次 BATCH_SIZE = 64 TIME_STEP = 28 # rnn 时间步数 / 图片高度 INPUT_SIZE = 28 # rnn 每步输入值 / 图片每行像素 LR = 0.01 # learning rate DOWNLOAD_MNIST = False # 如果你已经下载好了mnist数据就写上 Fasle # Mnist 手写数字 train_data = dsets.MNIST( root=./mnist/, # 保存或者提取位置 train=True, # this is training data transform=transforms.ToTensor(), # 转换 PIL.Image or numpy.ndarray 成 # torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间 download=DOWNLOAD_MNIST, # 没下载就下载, 下载了就不用再下了 ) test_data = dsets.MNIST(root=./mnist/, train=False) # 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28) train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) # 为了节约时间, 我们测试时只测试前2000个 test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[ :2000] / 255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1) test_y = test_data.test_labels[:2000] class RNN(nn.Module): def __init__(self): super(RNN, self).__init__() self.rnn = nn.LSTM( # LSTM 效果要比 nn.RNN() 好多了 input_size=28, # 图片每行的数据像素点 hidden_size=64, # rnn hidden unit num_layers=1, # 有几层 RNN layers batch_first=True, # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size) ) self.out = nn.Linear(64, 10) # 输出层               ,接入线性层 def forward(self, x): # 必须有这个方法 # x shape (batch, time_step, input_size) # r_out shape (batch, time_step, output_size) # h_n shape (n_layers, batch, hidden_size) LSTM 有两个 hidden states, h_n 是分线, h_c 是主线 # h_c shape (n_layers, batch, hidden_size) r_out, (h_n, h_c) = self.rnn(x, None) # None 表示 hidden state 会用全0的 state # 当RNN运行结束时刻,(h_n, h_c)表示最后的一组hidden states               ,这里用不到 # 选取最后一个时间点的 r_out 输出 # 这里 r_out[:, -1, :] 的值也是 h_n 的值 out = self.out(r_out[:, -1, :]) # (batch_size, time step, input)                  ,这里time step选择最后一个时刻 # output_np = out.detach().numpy() # 可以使用numpy的sciview监视每次结果 return out rnn = RNN() print(rnn) optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted # training and testing for epoch in range(EPOCH): for step, (x, b_y) in enumerate(train_loader): # gives batch data b_x = x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size) output = rnn(b_x) # rnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients # output_np = output.detach().numpy() if step % 50 == 0: test_x = test_x.view(-1, 28, 28) test_output = rnn(test_x) pred_y = torch.max(test_output, 1)[1].data.squeeze() acc = (pred_y == test_y).sum().float() / test_y.size(0) print(Epoch: , epoch, | train loss: %.4f % loss.data.float(), test acc: , acc.numpy()) test_output = rnn(test_x[:10].view(-1, 28, 28)) pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() print(pred_y, prediction number) print(test_y[:10], real number)

创心域SEO版权声明:以上内容作者已申请原创保护,未经允许不得转载,侵权必究!授权事宜、对本内容有异议或投诉,敬请联系网站管理员,我们将尽快回复您,谢谢合作!

展开全文READ MORE
怎么用u盘装电脑系统(怎么用U盘装系统) python2 nonlocal(python中nonlocal关键字是什么)