发布于2021-10-05 21:24 阅读(331) 评论(0) 点赞(29) 收藏(0)
代码实现:
- # 一般循环神经网络RNN
- class ConvRNN(nn.Module):
- def __init__(self, inp_dim, oup_dim, kernel, dilation):
- super().__init__()
- pad_x = int(dilation * (kernel - 1) / 2)
- self.conv_x = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
-
- pad_h = int((kernel - 1) / 2)
- self.conv_h = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
- self.relu = nn.LeakyReLU(0.2)
-
- def forward(self, x, h=None):
- if h is None:
- h = F.tanh(self.conv_x(x))
- else:
- h = F.tanh(self.conv_x(x) + self.conv_h(h))
-
- h = self.relu(h)
- return h, h
参考:图解LSTM 结构逻辑_BruceJust的博客-CSDN博客_lstm结构图
前向传播公式:
代码实现:
- # 参考LSTM结构图
- # https://blog.csdn.net/weixin_42175217/article/details/106183682
- class ConvLSTM(nn.Module):
- def __init__(self, inp_dim, oup_dim, kernel, dilation):
- super().__init__()
- pad_x = int(dilation * (kernel - 1) / 2)
- self.conv_xf = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
- self.conv_xi = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
- self.conv_xo = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
- self.conv_xj = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
-
- pad_h = int((kernel - 1) / 2)
- self.conv_hf = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
- self.conv_hi = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
- self.conv_ho = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
- self.conv_hj = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
-
- self.relu = nn.LeakyReLU(0.2)
-
- def forward(self, x, pair=None):
- if pair is None:
- i = F.sigmoid(self.conv_xi(x))
- o = F.sigmoid(self.conv_xo(x))
- j = F.tanh(self.conv_xj(x))
- c = i * j
- h = o * c
- else:
- h, c = pair
- f = F.sigmoid(self.conv_xf(x) + self.conv_hf(h))
- i = F.sigmoid(self.conv_xi(x) + self.conv_hi(h))
- o = F.sigmoid(self.conv_xo(x) + self.conv_ho(h))
- j = F.tanh(self.conv_xj(x) + self.conv_hj(h))
- c = f * c + i * j
- h = o * F.tanh(c)
-
- h = self.relu(h)
- return h, [h, c]
前向传播公式:
代码实现:
- class ConvGRU(nn.Module):
- def __init__(self, inp_dim, oup_dim, kernel, dilation):
- super().__init__()
- pad_x = int(dilation * (kernel - 1) / 2)
- self.conv_xz = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
- self.conv_xr = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
- self.conv_xn = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
-
- pad_h = int((kernel - 1) / 2)
- self.conv_hz = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
- self.conv_hr = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
- self.conv_hn = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)
-
- self.relu = nn.LeakyReLU(0.2)
-
- def forward(self, x, h=None):
- if h is None:
- z = F.sigmoid(self.conv_xz(x))
- f = F.tanh(self.conv_xn(x))
- h = z * f
- else:
- z = F.sigmoid(self.conv_xz(x) + self.conv_hz(h))
- r = F.sigmoid(self.conv_xr(x) + self.conv_hr(h))
- n = F.tanh(self.conv_xn(x) + self.conv_hn(r * h))
- h = (1 - z) * h + z * n
-
- h = self.relu(h)
- return h, h
代码参考:DCSFN/model.py at master · Ohraincu/DCSFN · GitHub
作者:你是好人
链接:http://www.phpheidong.com/blog/article/167761/17f0e71446b473a51b71/
来源:php黑洞网
任何形式的转载都请注明出处,如有侵权 一经发现 必将追究其法律责任
昵称:
评论内容:(最多支持255个字符)
---无人问津也好,技不如人也罢,你都要试着安静下来,去做自己该做的事,而不是让内心的烦躁、焦虑,坏掉你本来就不多的热情和定力
Copyright © 2018-2021 php黑洞网 All Rights Reserved 版权所有,并保留所有权利。 京ICP备18063182号-4
投诉与举报,广告合作请联系vgs_info@163.com或QQ3083709327
免责声明:网站文章均由用户上传,仅供读者学习交流使用,禁止用做商业用途。若文章涉及色情,反动,侵权等违法信息,请向我们举报,一经核实我们会立即删除!