← Back to Index
Research & Engineering Archive

D2 5.1 Layer & Block

By Jingnan Huang · December 21, 2024 · 861 Words

Last Edit: 12/21/24

Layer 层
#

Block 块
#

Img

MLP
#

import torch
from torch import nn
from torch.nn import functional as F

net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))

X = torch.rand(2, 20)
net(X)

5.1.1 Custom block 自定义块
#

class MLP(nn.Module):
    # 用模型参数声明层。这里,我们声明两个全连接的层
    def __init__(self):
        super().__init__()
        # 调用nn.Module的构造函数减少重新定义的代码
        self.hidden = nn.Linear(20, 256)  # 隐藏层
        self.out = nn.Linear(256, 10)  # 输出层

    # 定义前向传播流程
    def forward(self, X):
        #hidden -> relu -> out
        return self.out(F.relu(self.hidden(X)))

5.1.2 Sequence Block 顺序块
#

class MySequential(nn.Module):
    def __init__(self, *args):
        super().__init__() 
        for idx, module in enumerate(args):
            self._modules[str(idx)] = module

    def forward(self, X):
        for block in self._modules.values():
            X = block(X)
        return X
MySequential(
    nn.Linear(10, 20),
    nn.ReLU(),
    nn.Linear(20, 10))
self._modules[str(idx)] = module

5.1.3 Control Flow in forward propagation
#

class FixedHiddenMLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.rand_weight = torch.rand((20, 20), requires_grad=False)
        self.linear = nn.Linear(20, 20)

    def forward(self, X):
        X = self.linear(X)
        X = F.relu(torch.mm(X, self.rand_weight) + 1)
        X = self.linear(X)
        while X.abs().sum() > 1:
            X /= 2
        return X.sum()
self.rand_weight = torch.rand((20, 20), requires_grad=False)