{"id":15,"date":"2026-01-26T02:48:47","date_gmt":"2026-01-26T02:48:47","guid":{"rendered":"https:\/\/www.yangjifang.cn\/?p=15"},"modified":"2026-01-26T02:48:47","modified_gmt":"2026-01-26T02:48:47","slug":"pytorch%e6%9c%80%e5%9f%ba%e7%a1%80%e7%9a%84%e7%bd%91%e7%bb%9c%e8%ae%ad%e7%bb%83","status":"publish","type":"post","link":"https:\/\/www.yangjifang.cn\/index.php\/2026\/01\/26\/pytorch%e6%9c%80%e5%9f%ba%e7%a1%80%e7%9a%84%e7%bd%91%e7%bb%9c%e8%ae%ad%e7%bb%83\/","title":{"rendered":"pytorch\u6700\u57fa\u7840\u7684\u7f51\u7edc\u8bad\u7ec3"},"content":{"rendered":"\n<p><strong>\u6570\u636e\u52a0\u8f7d<\/strong><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># =============================================================================\n# MNIST\u624b\u5199\u6570\u5b57\u8bc6\u522b\u795e\u7ecf\u7f51\u7edc\u9879\u76ee\n# \u8fd9\u662f\u4e00\u4e2a\u4f7f\u7528PyTorch\u6784\u5efa\u7684\u7b80\u5355\u795e\u7ecf\u7f51\u7edc\uff0c\u7528\u4e8e\u8bc6\u522b\u624b\u5199\u6570\u5b570-9\n# =============================================================================\n\n# \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport torch  # PyTorch\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\nfrom pathlib import Path  # \u7528\u4e8e\u5904\u7406\u6587\u4ef6\u8def\u5f84\nimport requests  # \u7528\u4e8e\u4e0b\u8f7d\u6570\u636e\uff08\u867d\u7136\u8fd9\u91cc\u88ab\u6ce8\u91ca\u4e86\uff09\nimport numpy as np  # \u6570\u503c\u8ba1\u7b97\u5e93\nfrom matplotlib import pyplot  # \u7ed8\u56fe\u5e93\uff0c\u7528\u4e8e\u663e\u793a\u56fe\u7247\nimport pickle  # \u7528\u4e8e\u5e8f\u5217\u5316\u548c\u53cd\u5e8f\u5217\u5316Python\u5bf9\u8c61\nimport gzip  # \u7528\u4e8e\u5904\u7406\u538b\u7f29\u6587\u4ef6\nimport pylab  # matplotlib\u7684\u7b80\u5316\u63a5\u53e3\nimport torch.nn.functional as F  # PyTorch\u7684\u51fd\u6570\u5f0f\u63a5\u53e3\nfrom torch import nn  # PyTorch\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u5757\nfrom torch.utils.data import TensorDataset  # \u7528\u4e8e\u521b\u5efa\u6570\u636e\u96c6\nfrom torch.utils.data import DataLoader  # \u7528\u4e8e\u6279\u91cf\u52a0\u8f7d\u6570\u636e\nfrom torch import optim  # PyTorch\u7684\u4f18\u5316\u5668\n\n# \u5b9a\u4e49\u635f\u5931\u51fd\u6570\uff1a\u4ea4\u53c9\u71b5\u635f\u5931\uff0c\u5e38\u7528\u4e8e\u591a\u5206\u7c7b\u95ee\u9898\nloss_fun = F.cross_entropy\n\n# =============================================================================\n# \u6570\u636e\u51c6\u5907\u90e8\u5206\n# =============================================================================\n\n# \u8bbe\u7f6e\u6570\u636e\u5b58\u50a8\u8def\u5f84\nDATA_PATH = Path(\"data\")  # \u6570\u636e\u6587\u4ef6\u5939\nPATH = DATA_PATH \/ \"mnist\"  # MNIST\u6570\u636e\u7684\u5177\u4f53\u8def\u5f84\n\n# \u521b\u5efa\u6570\u636e\u76ee\u5f55\uff08\u5982\u679c\u4e0d\u5b58\u5728\u7684\u8bdd\uff09\nPATH.mkdir(parents=True, exist_ok=True)\n\n# \u539f\u672c\u7528\u4e8e\u4e0b\u8f7d\u6570\u636e\u7684URL\uff08\u5df2\u88ab\u6ce8\u91ca\uff09\n# URL = \"http:\/\/deeplearning.net\/data\/minst\/\"\n\n# MNIST\u6570\u636e\u6587\u4ef6\u540d\nFILENAME = \"mnist.pkl.gz\"\n\n# \u539f\u672c\u7528\u4e8e\u4e0b\u8f7d\u6570\u636e\u7684\u4ee3\u7801\uff08\u5df2\u88ab\u6ce8\u91ca\uff09\n# if not(PATH\/FILENAME).exists():\n#     content = requests.get(URL + FILENAME).content\n#     (PATH \/FILENAME).open(\"wb\").write(content)\n\n# \u4ece\u538b\u7f29\u6587\u4ef6\u4e2d\u52a0\u8f7dMNIST\u6570\u636e\u96c6\n# MNIST\u6570\u636e\u96c6\u5305\u542b\u8bad\u7ec3\u96c6\u3001\u9a8c\u8bc1\u96c6\u548c\u6d4b\u8bd5\u96c6\nwith gzip.open((PATH\/FILENAME).as_posix(), \"rb\") as f:\n    ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding=\"latin-1\")\n# print(x_train.shape)  # \u6253\u5370\u8bad\u7ec3\u6570\u636e\u7684\u5f62\u72b6\n\n# \u5c06numpy\u6570\u7ec4\u8f6c\u6362\u4e3aPyTorch\u5f20\u91cf\uff08tensor\uff09\n# \u8fd9\u662fPyTorch\u5904\u7406\u6570\u636e\u7684\u57fa\u672c\u683c\u5f0f\nx_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid))\n\n# print(x_train.shape, y_train.shape, x_valid.shape, y_valid.shape)  # \u6253\u5370\u6240\u6709\u6570\u636e\u7684\u5f62\u72b6\n\n# \u4ee5\u4e0b\u662f\u4e00\u4e9b\u6d4b\u8bd5\u4ee3\u7801\uff08\u5df2\u88ab\u6ce8\u91ca\uff09\n# a = torch.randn(&#91;2,2])  # \u521b\u5efa\u968f\u673a2x2\u5f20\u91cf\n# b = torch.randn(&#91;2,2])  # \u521b\u5efa\u968f\u673a2x2\u5f20\u91cf\n# print(loss_fun(a,b))  # \u6d4b\u8bd5\u635f\u5931\u51fd\u6570\n\n# \u6d4b\u8bd5\u56fe\u7247\u663e\u793a\u7684\u4ee3\u7801\uff08\u5df2\u88ab\u6ce8\u91ca\uff09\n# img = torch.tensor(&#91;0,0,0,1,0,0,0,0,0])  # \u521b\u5efa\u7b80\u5355\u76843x3\u56fe\u7247\n# pyplot.imshow(img.reshape(3,3), cmap=\"gray\")  # \u663e\u793a\u56fe\u7247\n# pyplot.show()\n\n# \u663e\u793aMNIST\u8bad\u7ec3\u56fe\u7247\u7684\u4ee3\u7801\uff08\u5df2\u88ab\u6ce8\u91ca\uff09\n# pyplot.imshow(x_train&#91;2].reshape(28,28), cmap=\"gray\")  # \u663e\u793a\u7b2c3\u5f20\u8bad\u7ec3\u56fe\u7247\n# pyplot.show()\n\n# \u4ee5\u4e0b\u662f\u4e00\u4e9b\u624b\u52a8\u5b9e\u73b0\u795e\u7ecf\u7f51\u7edc\u7684\u4ee3\u7801\uff08\u5df2\u88ab\u6ce8\u91ca\uff09\n# bs = 64  # \u6279\u6b21\u5927\u5c0f\n# xb = x_train&#91;0:bs]  # \u53d6\u524d64\u4e2a\u8bad\u7ec3\u6837\u672c\n# yb = y_train&#91;0:bs]  # \u5bf9\u5e94\u7684\u6807\u7b7e\n# weights = torch.randn(&#91;784,10], dtype=torch.float, requires_grad=True)  # \u6743\u91cd\u77e9\u9635\n# bias = torch.zeros(10, requires_grad=True)  # \u504f\u7f6e\u5411\u91cf\n# loss_func = F.cross_entropy  # \u635f\u5931\u51fd\u6570\n# =============================================================================\n# \u795e\u7ecf\u7f51\u7edc\u6a21\u578b\u5b9a\u4e49\n# =============================================================================\n\nclass Minst_NN(nn.Module):\n    \"\"\"\n    MNIST\u624b\u5199\u6570\u5b57\u8bc6\u522b\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\n    \u7f51\u7edc\u7ed3\u6784\uff1a\u8f93\u5165\u5c42(784) -&gt; \u9690\u85cf\u5c421(128) -&gt; \u9690\u85cf\u5c422(256) -&gt; \u8f93\u51fa\u5c42(10)\n    784 = 28*28\uff0c\u56e0\u4e3aMNIST\u56fe\u7247\u662f28x28\u50cf\u7d20\n    10 = \u6570\u5b570-9\u768410\u4e2a\u7c7b\u522b\n    \"\"\"\n\n    def __init__(self):\n        super().__init__()  # \u8c03\u7528\u7236\u7c7b\u6784\u9020\u51fd\u6570\uff0c\u8fd9\u662f\u5fc5\u987b\u7684\n        # \u7b2c\u4e00\u4e2a\u5168\u8fde\u63a5\u5c42\uff1a784\u4e2a\u8f93\u5165\u7279\u5f81 -&gt; 128\u4e2a\u9690\u85cf\u5355\u5143\n        self.hidden1 = nn.Linear(784, 128)\n        # \u7b2c\u4e8c\u4e2a\u5168\u8fde\u63a5\u5c42\uff1a128\u4e2a\u8f93\u5165\u7279\u5f81 -&gt; 256\u4e2a\u9690\u85cf\u5355\u5143\n        self.hidden2 = nn.Linear(128, 256)\n        # \u8f93\u51fa\u5c42\uff1a256\u4e2a\u8f93\u5165\u7279\u5f81 -&gt; 10\u4e2a\u8f93\u51fa\uff08\u5bf9\u5e940-9\u6570\u5b57\uff09\n        self.out = nn.Linear(256, 10)\n        # Dropout\u5c42\uff1a\u968f\u673a\u4e22\u5f0350%\u7684\u795e\u7ecf\u5143\uff0c\u9632\u6b62\u8fc7\u62df\u5408\n        self.dropout = nn.Dropout(0.5)\n\n    def forward(self, x):\n        \"\"\"\n        \u524d\u5411\u4f20\u64ad\u51fd\u6570\n        \u5b9a\u4e49\u6570\u636e\u5982\u4f55\u901a\u8fc7\u7f51\u7edc\u5c42\n        \"\"\"\n        # \u7b2c\u4e00\u5c42\uff1a\u7ebf\u6027\u53d8\u6362 + ReLU\u6fc0\u6d3b\u51fd\u6570\n        x = F.relu(self.hidden1(x))\n        # \u5e94\u7528Dropout\u9632\u6b62\u8fc7\u62df\u5408\n        x = self.dropout(x)\n        # \u7b2c\u4e8c\u5c42\uff1a\u7ebf\u6027\u53d8\u6362 + ReLU\u6fc0\u6d3b\u51fd\u6570\n        x = F.relu(self.hidden2(x))\n        # \u518d\u6b21\u5e94\u7528Dropout\n        x = self.dropout(x)\n        # \u8f93\u51fa\u5c42\uff1a\u7ebf\u6027\u53d8\u6362\uff08\u4e0d\u9700\u8981\u6fc0\u6d3b\u51fd\u6570\uff0c\u56e0\u4e3a\u4f7f\u7528\u4ea4\u53c9\u71b5\u635f\u5931\uff09\n        x = self.out(x)\n        return x\n\n# =============================================================================\n# \u6570\u636e\u51c6\u5907\u548c\u8bad\u7ec3\u8bbe\u7f6e\n# =============================================================================\n\n# \u8bbe\u7f6e\u6279\u6b21\u5927\u5c0f\uff08\u6bcf\u6b21\u8bad\u7ec3\u4f7f\u7528\u7684\u6837\u672c\u6570\u91cf\uff09\nbs = 64\n\n# \u521b\u5efa\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\n# TensorDataset\u5c06\u8f93\u5165\u6570\u636e\u548c\u6807\u7b7e\u6253\u5305\u6210\u6570\u636e\u96c6\ntrain_ds = TensorDataset(x_train, y_train)  # \u8bad\u7ec3\u6570\u636e\u96c6\nvalid_ds = TensorDataset(x_valid, y_valid)  # \u9a8c\u8bc1\u6570\u636e\u96c6\n\ndef get_data(train_ds, valid_ds, bs):\n    \"\"\"\n    \u521b\u5efa\u6570\u636e\u52a0\u8f7d\u5668\n    \u6570\u636e\u52a0\u8f7d\u5668\u7528\u4e8e\u6279\u91cf\u52a0\u8f7d\u6570\u636e\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\n    \"\"\"\n    return (\n        DataLoader(train_ds, batch_size=bs, shuffle=True),  # \u8bad\u7ec3\u6570\u636e\u52a0\u8f7d\u5668\uff0c\u6253\u4e71\u6570\u636e\n        DataLoader(valid_ds, batch_size=bs*2),  # \u9a8c\u8bc1\u6570\u636e\u52a0\u8f7d\u5668\uff0c\u6279\u6b21\u5927\u5c0f\u662f\u8bad\u7ec3\u65f6\u76842\u500d\n    )\n\ndef loss_batch(model, loss_fun, xb, yb, opt=None):\n    \"\"\"\n    \u8ba1\u7b97\u4e00\u4e2a\u6279\u6b21\u7684\u635f\u5931\u5e76\u66f4\u65b0\u6a21\u578b\u53c2\u6570\n\n    \u53c2\u6570:\n    - model: \u795e\u7ecf\u7f51\u7edc\u6a21\u578b\n    - loss_fun: \u635f\u5931\u51fd\u6570\n    - xb: \u8f93\u5165\u6570\u636e\u6279\u6b21\n    - yb: \u6807\u7b7e\u6279\u6b21\n    - opt: \u4f18\u5316\u5668\uff08\u53ef\u9009\uff0c\u5982\u679c\u63d0\u4f9b\u5219\u66f4\u65b0\u53c2\u6570\uff09\n\n    \u8fd4\u56de:\n    - loss.item(): \u635f\u5931\u503c\n    - len(xb): \u6279\u6b21\u5927\u5c0f\n    \"\"\"\n    # \u8ba1\u7b97\u635f\u5931\uff1a\u6a21\u578b\u9884\u6d4b\u7ed3\u679c\u4e0e\u771f\u5b9e\u6807\u7b7e\u7684\u5dee\u5f02\n    loss = loss_fun(model(xb), yb)\n\n    # \u5982\u679c\u63d0\u4f9b\u4e86\u4f18\u5316\u5668\uff0c\u5219\u8fdb\u884c\u53cd\u5411\u4f20\u64ad\u548c\u53c2\u6570\u66f4\u65b0\n    if opt is not None:\n        loss.backward()  # \u53cd\u5411\u4f20\u64ad\uff0c\u8ba1\u7b97\u68af\u5ea6\n        opt.step()      # \u66f4\u65b0\u6a21\u578b\u53c2\u6570\n        opt.zero_grad() # \u6e05\u96f6\u68af\u5ea6\uff0c\u4e3a\u4e0b\u6b21\u8ba1\u7b97\u505a\u51c6\u5907\n\n    return loss.item(), len(xb)  # \u8fd4\u56de\u635f\u5931\u503c\u548c\u6279\u6b21\u5927\u5c0f\n\ndef fit(steps, model, loss_func, opt, train_dl, valid_dl):\n    \"\"\"\n    \u8bad\u7ec3\u6a21\u578b\u7684\u4e3b\u51fd\u6570\n\n    \u53c2\u6570:\n    - steps: \u8bad\u7ec3\u8f6e\u6570\n    - model: \u795e\u7ecf\u7f51\u7edc\u6a21\u578b\n    - loss_func: \u635f\u5931\u51fd\u6570\n    - opt: \u4f18\u5316\u5668\n    - train_dl: \u8bad\u7ec3\u6570\u636e\u52a0\u8f7d\u5668\n    - valid_dl: \u9a8c\u8bc1\u6570\u636e\u52a0\u8f7d\u5668\n    \"\"\"\n    for step in range(steps):\n        # \u8bbe\u7f6e\u6a21\u578b\u4e3a\u8bad\u7ec3\u6a21\u5f0f\uff08\u542f\u7528Dropout\u7b49\uff09\n        model.train()\n        # \u904d\u5386\u8bad\u7ec3\u6570\u636e\u7684\u6240\u6709\u6279\u6b21\n        for xb, yb in train_dl:\n            loss_batch(model, loss_func, xb, yb, opt)\n\n        # \u8bbe\u7f6e\u6a21\u578b\u4e3a\u8bc4\u4f30\u6a21\u5f0f\uff08\u7981\u7528Dropout\u7b49\uff09\n        model.eval()\n        # \u5728\u9a8c\u8bc1\u96c6\u4e0a\u8bc4\u4f30\u6a21\u578b\uff08\u4e0d\u8ba1\u7b97\u68af\u5ea6\uff09\n        with torch.no_grad():\n            losses, nums = zip(\n                *&#91;loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl]\n            )\n        # \u8ba1\u7b97\u9a8c\u8bc1\u96c6\u4e0a\u7684\u5e73\u5747\u635f\u5931\n        val_loss = np.sum(np.multiply(losses, nums)) \/ np.sum(nums)\n        print(\"\u5f53\u524dstep:\" + str(step), '\u9a8c\u8bc1\u96c6\u635f\u5931:' + str(val_loss))\n\ndef get_model():\n    \"\"\"\n    \u521b\u5efa\u6a21\u578b\u548c\u4f18\u5316\u5668\n\n    \u8fd4\u56de:\n    - model: \u795e\u7ecf\u7f51\u7edc\u6a21\u578b\n    - opt: Adam\u4f18\u5316\u5668\n    \"\"\"\n    model = Minst_NN()  # \u521b\u5efa\u6a21\u578b\u5b9e\u4f8b\n    return model, optim.Adam(model.parameters(), lr=0.001)  # \u8fd4\u56de\u6a21\u578b\u548cAdam\u4f18\u5316\u5668\n\n\n# =============================================================================\n# \u6a21\u578b\u8bad\u7ec3\u548c\u8bc4\u4f30\n# =============================================================================\n\n# \u521b\u5efa\u6570\u636e\u52a0\u8f7d\u5668\ntrain_dl, valid_dl = get_data(train_ds, valid_ds, bs)\n\n# \u521b\u5efa\u6a21\u578b\u548c\u4f18\u5316\u5668\nmodel, opt = get_model()\n\n# \u5f00\u59cb\u8bad\u7ec3\u6a21\u578b\uff08\u8bad\u7ec310\u4e2aepoch\uff09\nprint(\"\u5f00\u59cb\u8bad\u7ec3\u6a21\u578b...\")\nfit(10, model, loss_fun, opt, train_dl, valid_dl)\n\n# =============================================================================\n# \u6a21\u578b\u9a8c\u8bc1\u548c\u51c6\u786e\u7387\u8ba1\u7b97\n# =============================================================================\n\nprint(\"\\n\u5f00\u59cb\u9a8c\u8bc1\u6a21\u578b\u51c6\u786e\u7387...\")\n\n# \u521d\u59cb\u5316\u51c6\u786e\u7387\u8ba1\u7b97\u53d8\u91cf\ncorrect = 0  # \u9884\u6d4b\u6b63\u786e\u7684\u6837\u672c\u6570\ntotal = 0    # \u603b\u6837\u672c\u6570\n\n# \u8bbe\u7f6e\u6a21\u578b\u4e3a\u8bc4\u4f30\u6a21\u5f0f\nmodel.eval()\n\n# \u5728\u9a8c\u8bc1\u96c6\u4e0a\u6d4b\u8bd5\u6a21\u578b\nwith torch.no_grad():  # \u4e0d\u8ba1\u7b97\u68af\u5ea6\uff0c\u8282\u7701\u5185\u5b58\u548c\u8ba1\u7b97\u65f6\u95f4\n    for xb, yb in valid_dl:\n        # \u83b7\u53d6\u6a21\u578b\u9884\u6d4b\u7ed3\u679c\n        outputs = model(xb)\n        # \u627e\u5230\u9884\u6d4b\u6982\u7387\u6700\u9ad8\u7684\u7c7b\u522b\uff08torch.max\u8fd4\u56de\u503c\u548c\u7d22\u5f15\uff0c\u6211\u4eec\u53ea\u8981\u7d22\u5f15\uff09\n        _, predicted = torch.max(outputs.data, 1)\n\n        # \u7edf\u8ba1\u603b\u6837\u672c\u6570\n        total += yb.size(0)\n        # \u7edf\u8ba1\u9884\u6d4b\u6b63\u786e\u7684\u6837\u672c\u6570\n        correct += (predicted == yb).sum().item()\n\n# \u8ba1\u7b97\u5e76\u6253\u5370\u51c6\u786e\u7387\naccuracy = 100 * correct \/ total\nprint(\"\u6a21\u578b\u5728\u9a8c\u8bc1\u96c6\u4e0a\u7684\u51c6\u786e\u7387: {:.2f}%\".format(accuracy))\nprint(\"\u6b63\u786e\u9884\u6d4b: {}\/{}\".format(correct, total))<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>\u6570\u636e\u52a0\u8f7d<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[1],"tags":[],"class_list":["post-15","post","type-post","status-publish","format-standard","hentry","category-uncategorized"],"_links":{"self":[{"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/posts\/15","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/comments?post=15"}],"version-history":[{"count":1,"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/posts\/15\/revisions"}],"predecessor-version":[{"id":16,"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/posts\/15\/revisions\/16"}],"wp:attachment":[{"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/media?parent=15"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/categories?post=15"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.yangjifang.cn\/index.php\/wp-json\/wp\/v2\/tags?post=15"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}