首页 技术 正文
技术 2022年11月14日
0 收藏 907 点赞 2,240 浏览 4882 个字

1.使用apply()

举例说明:

  • Encoder :设计的编码其模型
  • weights_init(): 用来初始化模型
  • model.apply():实现初始化
# coding:utf-
from torch import nndef weights_init(mod):
"""设计初始化函数"""
classname=mod.__class__.__name__
# 返回传入的module类型
print(classname)
if classname.find('Conv')!= -: #这里的Conv和BatchNnorm是torc.nn里的形式
mod.weight.data.normal_(0.0,0.02)
elif classname.find('BatchNorm')!= -:
mod.weight.data.normal_(1.0,0.02) #bn层里初始化γ,服从(,0.02)的正态分布
mod.bias.data.fill_() #bn层里初始化β,默认为0class Encoder(nn.Module):
def __init__(self, input_size, input_channels, base_channnes, z_channels): super(Encoder, self).__init__()
# input_size必须为16的倍数
assert input_size % == , "input_size has to be a multiple of 16" models = nn.Sequential()
models.add_module('Conv2_{0}_{1}'.format(input_channels, base_channnes), nn.Conv2d(input_channels, base_channnes, , , , bias=False))
models.add_module('LeakyReLU_{0}'.format(base_channnes), nn.LeakyReLU(0.2, inplace=True))
# 此时图片大小已经下降一倍
temp_size = input_size/ # 直到特征图高宽为4
# 目的是保证无论输入什么大小的图片,经过这几层后特征图大小为4*
while temp_size > :
models.add_module('Conv2_{0}_{1}'.format(base_channnes, base_channnes*), nn.Conv2d(base_channnes, base_channnes*, , , , bias=False))
models.add_module('BatchNorm2d_{0}'.format(base_channnes*), nn.BatchNorm2d(base_channnes*))
models.add_module('LeakyReLU_{0}'.format(base_channnes*), nn.LeakyReLU(0.2, inplace=True))
base_channnes *=
temp_size /= # 特征图高宽为4后面则添加上最后一层
# 让输出为1*
models.add_module('Conv2_{0}_{1}'.format(base_channnes, z_channels), nn.Conv2d(base_channnes, z_channels, , , , bias=False))
self.models = models def forward(self, x):
x = self.models(x)
return xif __name__ == '__main__':
e = Encoder(, , , )
# 对e模型中的每个module和其本身都会调用一次weights_init函数,mod参数的值即这些module
e.apply(weights_init)
# 根据名字来查看参数
for name, param in e.named_parameters():
print(name)
# 举个例子看看是否按照设计进行初始化
# 可见BatchNorm2d的weight是正态分布形的参数,bias参数都是0
if name == 'models.BatchNorm2d_128.weight' or name == 'models.BatchNorm2d_128.bias':
print(param)

返回:

# 返回的是依次传入初始化函数的module
Conv2d
LeakyReLU
Conv2d
BatchNorm2d
LeakyReLU
Conv2d
BatchNorm2d
LeakyReLU
Conv2d
BatchNorm2d
LeakyReLU
Conv2d
BatchNorm2d
LeakyReLU
Conv2d
BatchNorm2d
LeakyReLU
Conv2d
Sequential
Encoder# 输出name的格式,并根据条件打印出BatchNorm2d-128的两个参数
models.Conv2_3_64.weight
models.Conv2_64_128.weight
models.BatchNorm2d_128.weight
Parameter containing:
tensor([1.0074, 0.9865, 1.0188, 1.0015, 0.9757, 1.0393, 0.9813, 1.0135, 1.0227,
0.9903, 1.0490, 1.0102, 0.9920, 0.9878, 1.0060, 0.9944, 0.9993, 1.0139,
0.9987, 0.9888, 0.9816, 0.9951, 1.0017, 0.9818, 0.9922, 0.9627, 0.9883,
0.9985, 0.9759, 0.9962, 1.0183, 1.0199, 1.0033, 1.0475, 0.9586, 0.9916,
1.0354, 0.9956, 0.9998, 1.0022, 1.0307, 1.0141, 1.0062, 1.0082, 1.0111,
0.9683, 1.0372, 0.9967, 1.0157, 1.0299, 1.0352, 0.9961, 0.9901, 1.0274,
0.9727, 1.0042, 1.0278, 1.0134, 0.9648, 0.9887, 1.0225, 1.0175, 1.0002,
0.9988, 0.9839, 1.0023, 0.9913, 0.9657, 1.0404, 1.0197, 1.0221, 0.9925,
0.9962, 0.9910, 0.9865, 1.0342, 1.0156, 0.9688, 1.0015, 1.0055, 0.9751,
1.0304, 1.0132, 0.9778, 0.9900, 1.0092, 0.9745, 1.0067, 1.0077, 1.0057,
1.0117, 0.9850, 1.0309, 0.9918, 0.9945, 0.9935, 0.9746, 1.0366, 0.9913,
0.9564, 1.0071, 1.0370, 0.9774, 1.0126, 1.0040, 0.9946, 1.0080, 1.0126,
0.9761, 0.9811, 0.9974, 0.9992, 1.0338, 1.0104, 0.9931, 1.0204, 1.0230,
1.0255, 0.9969, 1.0079, 1.0127, 0.9816, 1.0132, 0.9884, 0.9691, 0.9922,
1.0166, 0.9980], requires_grad=True)
models.BatchNorm2d_128.bias
Parameter containing:
tensor([., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,
., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,
., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,
., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,
., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,
., ., ., ., ., ., ., .], requires_grad=True)
models.Conv2_128_256.weight
models.BatchNorm2d_256.weight
models.BatchNorm2d_256.bias
models.Conv2_256_512.weight
models.BatchNorm2d_512.weight
models.BatchNorm2d_512.bias
models.Conv2_512_1024.weight
models.BatchNorm2d_1024.weight
models.BatchNorm2d_1024.bias
models.Conv2_1024_2048.weight
models.BatchNorm2d_2048.weight
models.BatchNorm2d_2048.bias
models.Conv2_2048_100.weight

2.直接在定义网络时定义

import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as Fclass Discriminator(nn.Module):
"""
6层全连接层
"""
def __init__(self, z_dim):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, ),
nn.LeakyReLU(0.2, True),
nn.Linear(, ),
nn.LeakyReLU(0.2, True),
nn.Linear(, ),
nn.LeakyReLU(0.2, True),
nn.Linear(, ),
nn.LeakyReLU(0.2, True),
nn.Linear(, ),
nn.LeakyReLU(0.2, True),
nn.Linear(, ),
)
self.weight_init() # 参数初始化
def weight_init(self, mode='normal'):
if mode == 'kaiming':
initializer = kaiming_init
elif mode == 'normal':
initializer = normal_init for block in self._modules:
for m in self._modules[block]:
initializer(m) def forward(self, z):
return self.net(z).squeeze()def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.fill_()
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_()
if m.bias is not None:
m.bias.data.fill_()def normal_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.normal_(m.weight, , 0.02)
if m.bias is not None:
m.bias.data.fill_()
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_()
if m.bias is not None:
m.bias.data.fill_()

然后调用即可

相关推荐
python开发_常用的python模块及安装方法
adodb:我们领导推荐的数据库连接组件bsddb3:BerkeleyDB的连接组件Cheetah-1.0:我比较喜欢这个版本的cheeta…
日期:2022-11-24 点赞:878 阅读:9,082
Educational Codeforces Round 11 C. Hard Process 二分
C. Hard Process题目连接:http://www.codeforces.com/contest/660/problem/CDes…
日期:2022-11-24 点赞:807 阅读:5,556
下载Ubuntn 17.04 内核源代码
zengkefu@server1:/usr/src$ uname -aLinux server1 4.10.0-19-generic #21…
日期:2022-11-24 点赞:569 阅读:6,406
可用Active Desktop Calendar V7.86 注册码序列号
可用Active Desktop Calendar V7.86 注册码序列号Name: www.greendown.cn Code: &nb…
日期:2022-11-24 点赞:733 阅读:6,179
Android调用系统相机、自定义相机、处理大图片
Android调用系统相机和自定义相机实例本博文主要是介绍了android上使用相机进行拍照并显示的两种方式,并且由于涉及到要把拍到的照片显…
日期:2022-11-24 点赞:512 阅读:7,815
Struts的使用
一、Struts2的获取  Struts的官方网站为:http://struts.apache.org/  下载完Struts2的jar包,…
日期:2022-11-24 点赞:671 阅读:4,898