天天看点

[pytorch]model.children(), model.modules()和parameters()的区别和用法childrenmoduleparameters

[pytorch]children,modules和parameters的区别和用法

  • children
  • module
  • parameters

children

children只获取最浅层的网络结构,相应的named_children则返回tuple的数据,tuple[0]是该层的名称,tuple[1]是相应的结构:

class Net4(torch.nn.Module):
    def __init__(self):
        super(Net4, self).__init__()
        self.conv = torch.nn.Sequential(
            OrderedDict(
                [
                    ("conv1", torch.nn.Conv2d(3, 32, 3, 1, 1)),
                    ("relu1", torch.nn.ReLU()),
                    ("pool1", torch.nn.MaxPool2d(2))
                ]
            ))

        self.dense = torch.nn.Sequential(
            OrderedDict([
                ("dense1", torch.nn.Linear(32 * 3 * 3, 128)),
                ("relu2", torch.nn.ReLU()),
                ("dense2", torch.nn.Linear(128, 10))
            ])
        )

    def forward(self, x):
        conv_out = self.conv1(x)
        res = conv_out.view(conv_out.size(0), -1)
        out = self.dense(res)
        return out


print("Method 4:")
model4 = Net4()
# children()方法其实就是获取模型的属性,可看到构造函数有两个属性
for i in model4.children():
    print(i)
    print(type(i))
print('==============================')
# named_children()方法也是获取模型的属性,同时获取属性的名字
for i in model4.named_children():
    print(i)
    print(type(i))

           

输出的结果是:

'''
Sequential(
  (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (relu1): ReLU()
  (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
<class 'torch.nn.modules.container.Sequential'>
Sequential(
  (dense1): Linear(in_features=288, out_features=128, bias=True)
  (relu2): ReLU()
  (dense2): Linear(in_features=128, out_features=10, bias=True)
)
<class 'torch.nn.modules.container.Sequential'>

==============================
('conv', Sequential(
  (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (relu1): ReLU()
  (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
))
<class 'tuple'>
('dense', Sequential(
  (dense1): Linear(in_features=288, out_features=128, bias=True)
  (relu2): ReLU()
  (dense2): Linear(in_features=128, out_features=10, bias=True)
))
<class 'tuple'>
'''
           

module

model的modules()方法和named_modules()方法都会将整个模型的所有构成(包括包装层、单独的层、自定义层等)由浅入深依次遍历出来, 直到最深处的单层,只不过modules()返回的每一个元素是直接返回的层对象本身,而named_modules()返回的每一个元素是一个元组,第一个元素是名称,第二个元素才是层对象本身。

class Net4(torch.nn.Module):
    def __init__(self):
        super(Net4, self).__init__()
        self.conv = torch.nn.Sequential(
            OrderedDict(
                [
                    ("conv1", torch.nn.Conv2d(3, 32, 3, 1, 1)),
                    ("relu1", torch.nn.ReLU()),
                    ("pool1", torch.nn.MaxPool2d(2))
                ]
            ))

        self.dense = torch.nn.Sequential(
            OrderedDict([
                ("dense1", torch.nn.Linear(32 * 3 * 3, 128)),
                ("relu2", torch.nn.ReLU()),
                ("dense2", torch.nn.Linear(128, 10))
            ])
        )

    def forward(self, x):
        conv_out = self.conv1(x)
        res = conv_out.view(conv_out.size(0), -1)
        out = self.dense(res)
        return out


print("Method 4:")
model4 = Net4()
# modules方法将整个模型的所有构成(包括包装层Sequential、单独的层、自定义层等)由浅入深依次遍历出来,直到最深处的单层
for i in model4.modules():
    print(i)
    print('==============================')
print('=============华丽分割线=================')
# named_modules()同上,但是返回的每一个元素是一个元组,第一个元素是名称,第二个元素才是层对象本身。
for i in model4.named_modules():
    print(i)
    print('==============================')

           

输出结果是:

'''
离模型最近即最浅处,就是模型本身
Net4(
  (conv): Sequential(
    (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (relu1): ReLU()
    (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (dense): Sequential(
    (dense1): Linear(in_features=288, out_features=128, bias=True)
    (relu2): ReLU()
    (dense2): Linear(in_features=128, out_features=10, bias=True)
  )
)
==============================
由浅入深,到模型的属性
Sequential(
  (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (relu1): ReLU()
  (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
==============================
由浅入深,再到模型的属性的内部
Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
==============================
由浅入深,再到模型的属性的内部,依次将这个属性遍历结束
ReLU()
==============================
MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
由浅入深,再到模型的属性的内部,依次将这个属性遍历结束,再遍历另个属性
==============================
Sequential(
  (dense1): Linear(in_features=288, out_features=128, bias=True)
  (relu2): ReLU()
  (dense2): Linear(in_features=128, out_features=10, bias=True)
)
==============================
Linear(in_features=288, out_features=128, bias=True)
==============================
ReLU()
==============================
Linear(in_features=128, out_features=10, bias=True)
==============================
=============华丽分割线=================
('', Net4(
  (conv): Sequential(
    (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (relu1): ReLU()
    (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (dense): Sequential(
    (dense1): Linear(in_features=288, out_features=128, bias=True)
    (relu2): ReLU()
    (dense2): Linear(in_features=128, out_features=10, bias=True)
  )
))
==============================
('conv', Sequential(
  (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (relu1): ReLU()
  (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
))
==============================
('conv.conv1', Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))
==============================
('conv.relu1', ReLU())
==============================
('conv.pool1', MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))
==============================
('dense', Sequential(
  (dense1): Linear(in_features=288, out_features=128, bias=True)
  (relu2): ReLU()
  (dense2): Linear(in_features=128, out_features=10, bias=True)
))
==============================
('dense.dense1', Linear(in_features=288, out_features=128, bias=True))
==============================
('dense.relu2', ReLU())
==============================
('dense.dense2', Linear(in_features=128, out_features=10, bias=True))
==============================
'''
           

parameters

上述的两种一般都还是用在可视化网络的结构的,parameters主要作用是需要获取模型的每个层的对象时使用,比如模型初始化,模型加载参数等。

for k,v in resnet_backbone.named_parameters():
    if 'bn' in k:
        v.requires_grad = False


for k,v in resnet_backbone.named_parameters():
    print('{}: {}'.format(k, v.requires_grad))

           

输出结果是:

convnet_1.0.weight: True
convnet_1.1.weight: True
convnet_1.1.bias: True
convnet_2.0.0.conv1.weight: True
convnet_2.0.0.bn1.weight: False
convnet_2.0.0.bn1.bias: False
convnet_2.0.0.conv2.weight: True
convnet_2.0.0.bn2.weight: False
convnet_2.0.0.bn2.bias: False
convnet_2.0.0.conv3.weight: True
convnet_2.0.0.bn3.weight: False
convnet_2.0.0.bn3.bias: False
convnet_2.0.0.downsample.0.weight: True
convnet_2.0.0.downsample.1.weight: True
convnet_2.0.0.downsample.1.bias: True
           

继续阅读