复制
收藏
提问
全网

# 导包 import torch import torchvision.datasets as datasets import torchvision.transforms as transforms import matplotlib.pyplot as plt from torchvision import datasets from torch.utils.data import random_split from torchvision import transforms import torch import torch.nn as nn import torch.nn.functional as F # 设置数据变换 transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize((28, 28)), ]) # 下载并加载数据集 train_data = datasets.FashionMNIST(root='./data', train=True, download=True) test_data = datasets.FashionMNIST(root='./data', train=False, download=True) # 划分验证集 validation_size = int(0.1 * len(train_data)) train_data, validation_data = random_split(train_data, [len(train_data) - validation_size, validation_size]) # 数据预处理 transform = transforms.ToTensor() train_data = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform) test_data = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform) class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() # 第一个卷积层 self.conv1 = nn.Conv2d(1, 6, 5, padding=2) # 第一个池化层 self.pool1 = nn.MaxPool2d(2, 2) # 第二个卷积层 self.conv2 = nn.Conv2d(6, 16, 5) # 第二个池化层 self.pool2 = nn.MaxPool2d(2, 2) # 第一个全连接层 self.fc1 = nn.Linear(16 * 5 * 5, 120) # 第二个全连接层 self.fc2 = nn.Linear(120, 84) # 输出层 self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) # 展平特征图 x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # 实例化模型 model = LeNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # 训练模型 for epoch in range(num_epochs): for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()

6月前
全网
文档
学术
百科
知识库
时间不限
自动

理解问题# 导包 import torch import torchvision.datasets as datasets import torchvision.transforms as transforms import matplotlib.pyplot as plt from torchvision import datasets from torch.utils.data import random_split from torchvision import transforms import torch import torch.nn as nn import torch.nn.functional as F # 设置数据变换 transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize((28, 28)), ]) # 下载并加载数据集 train_data = datasets.FashionMNIST(root='./data', train=True, download=True) test_data = datasets.FashionMNIST(root='./data', train=False, download=True) # 划分验证集 validation_size = int(0.1 * len(train_data)) train_data, validation_data = random_split(train_data, [len(train_data) - validation_size, validation_size]) # 数据预处理 transform = transforms.ToTensor() train_data = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform) test_data = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform) class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() # 第一个卷积层 self.conv1 = nn.Conv2d(1, 6, 5, padding=2) # 第一个池化层 self.pool1 = nn.MaxPool2d(2, 2) # 第二个卷积层 self.conv2 = nn.Conv2d(6, 16, 5) # 第二个池化层 self.pool2 = nn.MaxPool2d(2, 2) # 第一个全连接层 self.fc1 = nn.Linear(16 * 5 * 5, 120) # 第二个全连接层 self.fc2 = nn.Linear(120, 84) # 输出层 self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) # 展平特征图 x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # 实例化模型 model = LeNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # 训练模型 for epoch in range(num_epochs): for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()

已完成理解# 导包 import torch import torchvision.datasets as datasets import torchvision.transforms as transforms import matplotlib.pyplot as plt from torchvision import datasets from torch.utils.data import random_split from torchvision import transforms import torch import torch.nn as nn import torch.nn.functional as F # 设置数据变换 transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize((28, 28)), ]) # 下载并加载数据集 train_data = datasets.FashionMNIST(root='./data', train=True, download=True) test_data = datasets.FashionMNIST(root='./data', train=False, download=True) # 划分验证集 validation_size = int(0.1 * len(train_data)) train_data, validation_data = random_split(train_data, [len(train_data) - validation_size, validation_size]) # 数据预处理 transform = transforms.ToTensor() train_data = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform) test_data = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform) class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() # 第一个卷积层 self.conv1 = nn.Conv2d(1, 6, 5, padding=2) # 第一个池化层 self.pool1 = nn.MaxPool2d(2, 2) # 第二个卷积层 self.conv2 = nn.Conv2d(6, 16, 5) # 第二个池化层 self.pool2 = nn.MaxPool2d(2, 2) # 第一个全连接层 self.fc1 = nn.Linear(16 * 5 * 5, 120) # 第二个全连接层 self.fc2 = nn.Linear(120, 84) # 输出层 self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) # 展平特征图 x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # 实例化模型 model = LeNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # 训练模型 for epoch in range(num_epochs): for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()

展开阅读网页

更简洁一些
转笔记
简洁
# 导包 import torch import torchvision.datasets as datasets import torchvision.transforms as transforms import matplotlib.pyplot as plt from torchvision import datasets from torch.utils.data import random_split from torchvision import transforms import torch import torch.nn as nn import torch.nn.functional as F # 设置数据变换 transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize((28, 28)), ]) # 下载并加载数据集 train_data = datasets.FashionMNIST(root='./data', train=True, download=True) test_data = datasets.FashionMNIST(root='./data', train=False, download=True) # 划分验证集 validation_size = int(0.1 * len(train_data)) train_data, validation_data = random_split(train_data, [len(train_data) - validation_size, validation_size]) # 数据预处理 transform = transforms.ToTensor() train_data = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform) test_data = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform) class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() # 第一个卷积层 self.conv1 = nn.Conv2d(1, 6, 5, padding=2) # 第一个池化层 self.pool1 = nn.MaxPool2d(2, 2) # 第二个卷积层 self.conv2 = nn.Conv2d(6, 16, 5) # 第二个池化层 self.pool2 = nn.MaxPool2d(2, 2) # 第一个全连接层 self.fc1 = nn.Linear(16 * 5 * 5, 120) # 第二个全连接层 self.fc2 = nn.Linear(120, 84) # 输出层 self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) # 展平特征图 x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # 实例化模型 model = LeNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # 训练模型 for epoch in range(num_epochs): for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()
知识树
# 导包 import torch import torchvision.datasets as datasets import torchvision.transforms as transforms import matplotlib.pyplot as plt from torchvision import datasets from torch.utils.data import random_split from torchvision import transforms import torch import torch.nn as nn import torch.nn.functional as F # 设置数据变换 transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize((28, 28)), ]) # 下载并加载数据集 train_data = datasets.FashionMNIST(root='./data', train=True, download=True) test_data = datasets.FashionMNIST(root='./data', train=False, download=True) # 划分验证集 validation_size = int(0.1 * len(train_data)) train_data, validation_data = random_split(train_data, [len(train_data) - validation_size, validation_size]) # 数据预处理 transform = transforms.ToTensor() train_data = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform) test_data = datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform) class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() # 第一个卷积层 self.conv1 = nn.Conv2d(1, 6, 5, padding=2) # 第一个池化层 self.pool1 = nn.MaxPool2d(2, 2) # 第二个卷积层 self.conv2 = nn.Conv2d(6, 16, 5) # 第二个池化层 self.pool2 = nn.MaxPool2d(2, 2) # 第一个全连接层 self.fc1 = nn.Linear(16 * 5 * 5, 120) # 第二个全连接层 self.fc2 = nn.Linear(120, 84) # 输出层 self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) # 展平特征图 x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # 实例化模型 model = LeNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # 训练模型 for epoch in range(num_epochs): for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()
如何使用PyTorch进行数据增强?
PyTorch中如何定义自定义数据集?
FashionMNIST数据集的特点是什么?

以上内容由AI搜集生成,仅供参考

在线客服