在 Debian 上进行 PyTorch 编程
一 环境准备与安装
二 快速上手 训练一个 MNIST 分类器
python - <<‘PY’ import torch, torch.nn as nn, torch.nn.functional as F from torch.utils.data import DataLoader from torchvision import datasets, transforms
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) # MNIST 均值与标准差 ]) train_ds = datasets.MNIST(root=‘./data’, train=True, download=True, transform=transform) test_ds = datasets.MNIST(root=‘./data’, train=False, download=True, transform=transform) train_loader = DataLoader(train_ds, batch_size=64, shuffle=True) test_loader = DataLoader(test_ds, batch_size=64, shuffle=False)
class Net(nn.Module): def init(self): super().init() self.fc1 = nn.Linear(2828, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = x.view(-1, 2828) x = F.relu(self.fc1(x)) return F.log_softmax(self.fc2(x), dim=1)
device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”) model = Net().to(device) criterion = nn.NLLLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for epoch in range(3): model.train() for data, target in train_loader: data, target = data.to(device), target.to(device) optimizer.zero_grad() loss = criterion(model(data), target) loss.backward() optimizer.step() model.eval() correct = total = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) pred = model(data).argmax(dim=1) total += target.size(0) correct += (pred == target).sum().item() print(f"Epoch {epoch+1}: test acc = {100*correct/total:.2f}%") PY
三 图像处理与 torchvision 常用操作
python - <<‘PY’ from PIL import Image from torchvision import transforms
transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), # 转为 [0,1] 的 FloatTensor,形状 (C,H,W) ]) img = Image.open(‘path_to_image.jpg’).convert(‘RGB’) tensor = transform(img) # 张量化,可直接作为模型输入 print(tensor.shape, tensor.dtype) PY
四 模型保存加载与简单服务化部署
五 常见问题与排错要点