×

PyTorch教程之从零开始的递归神经网络实现

消耗积分:0 | 格式:pdf | 大小:0.36 MB | 2023-06-05

哈哈哈

分享资料个

我们现在准备好从头开始实施 RNN。特别是,我们将训练此 RNN 作为字符级语言模型(参见 第 9.4 节),并按照第 9.2 节中概述的数据处理步骤,在由 HG Wells 的《时间机器》的整个文本组成的语料库上对其进行训练. 我们首先加载数据集。

%matplotlib inline
import math
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
%matplotlib inline
import math
from mxnet import autograd, gluon, np, npx
from d2l import mxnet as d2l

npx.set_np()
%matplotlib inline
import math
import jax
from flax import linen as nn
from jax import numpy as jnp
from d2l import jax as d2l
%matplotlib inline
import math
import tensorflow as tf
from d2l import tensorflow as d2l

9.5.1. 循环神经网络模型

我们首先定义一个类来实现 RNN 模型(第 9.4.2 节)。请注意,隐藏单元的数量num_hiddens是一个可调的超参数。

class RNNScratch(d2l.Module): #@save
  """The RNN model implemented from scratch."""
  def __init__(self, num_inputs, num_hiddens, sigma=0.01):
    super().__init__()
    self.save_hyperparameters()
    self.W_xh = nn.Parameter(
      torch.randn(num_inputs, num_hiddens) * sigma)
    self.W_hh = nn.Parameter(
      torch.randn(num_hiddens, num_hiddens) * sigma)
    self.b_h = nn.Parameter(torch.zeros(num_hiddens))
class RNNScratch(d2l.Module): #@save
  """The RNN model implemented from scratch."""
  def __init__(self, num_inputs, num_hiddens, sigma=0.01):
    super().__init__()
    self.save_hyperparameters()
    self.W_xh = np.random.randn(num_inputs, num_hiddens) * sigma
    self.W_hh = np.random.randn(
      num_hiddens, num_hiddens) * sigma
    self.b_h = np.zeros(num_hiddens)
class RNNScratch(nn.Module): #@save
  """The RNN model implemented from scratch."""
  num_inputs: int
  num_hiddens: int
  sigma: float = 0.01

  def setup(self):
    self.W_xh = self.param('W_xh', nn.initializers.normal(self.sigma),
                (self.num_inputs, self.num_hiddens))
    self.W_hh = self.param('W_hh', nn.initializers.normal(self.sigma),
                (self.num_hiddens, self.num_hiddens))
    self.b_h = self.param('b_h', nn.initializers.zeros, (self.num_hiddens))
class RNNScratch(d2l.Module): #@save
  """The RNN model implemented from scratch."""
  def __init__(self, num_inputs, num_hiddens, sigma=0.01):
    super().__init__()
    self.save_hyperparameters()
    self.W_xh = tf.Variable(tf.random.normal(
      (num_inputs, num_hiddens)) * sigma)
    self.W_hh = tf.Variable(tf.random.normal(
      (num_hiddens, num_hiddens)) * sigma)
    self.b_h = tf.Variable(tf.zeros(num_hiddens))

下面的方法forward定义了如何计算任何时间步的输出和隐藏状态,给定当前输入和模型在前一个时间步的状态。请注意,RNN 模型循环遍历 的最外层维度inputs,一次更新隐藏状态。这里的模型使用了tanh激活函数(第 5.1.2.3 节)。

@d2l.add_to_class(RNNScratch) #@save
def forward(self, inputs, state=None):
  if state is None:
    # Initial state with shape: (batch_size, num_hiddens)
    state = torch.zeros((inputs.shape[1], self.num_hiddens),
             device=inputs.device)
  else:
    state, = state
  outputs = []
  for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
    state = torch.tanh(torch.matmul(X, self.W_xh) +
             torch.matmul(state, self.W_hh) + self.b_h)
    outputs.append(state)
  return outputs, state
@d2l.add_to_class(RNNScratch) #@save
def forward(self, inputs, state=None):
  if state is None:
    # Initial state with shape: (batch_size, num_hiddens)
    state = np.zeros((inputs.shape[1], self.num_hiddens),
             ctx=inputs.ctx)
  else:
    state, = state
  outputs = []
  for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
    state = np.tanh(np.dot(X, self.W_xh) +
             np.dot(state, self.W_hh) + self.b_h)
    outputs.append(state)
  return outputs, state
@d2l.add_to_class(RNNScratch) #@save
def __call__(self, inputs, state=None):
  if state is not None:
    state, = state
  outputs = []
  for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
    state = jnp.tanh(jnp.matmul(X, self.W_xh) + (
      jnp.matmul(state, self.W_hh) if state is not None else 0)
             + self.b_h)
    outputs.append(state)
  return outputs, state
@d2l.add_to_class(RNNScratch) #@save
def forward(self, inputs, state=None):
  if state is None:
    # Initial state with shape: (batch_size, num_hiddens)
    state = tf.zeros((inputs.shape[1], self.num_hiddens))
  else:
    state, = state
    state = tf.reshape(state, (-1, self.num_hiddens))
  outputs = []
  for X in inputs: # Shape of inputs: (num_steps, batch_size, num_inputs)
    state = tf.tanh(tf.matmul(X, self.W_xh) +
             tf.matmul(state, self.W_hh) + self.b_h)
    outputs.append(state)
  return outputs, state

我们可以将一小批输入序列输入 RNN 模型,如下所示。

batch_size, num_inputs, num_hiddens, num_steps = 2, 16, 32, 100
rnn = RNNScratch(num_inputs, num_hiddens)
X = torch.ones((num_steps, batch_size, num_inputs))
outputs, state = rnn(X)
batch_size, num_inputs, num_hiddens, num_steps = 2, 16, 32, 100
rnn = RNNScratch(num_inputs, num_hiddens)
X = np.ones((num_steps, batch_size, num_inputs))
outputs, state = rnn(X)
batch_size, num_inputs, num_hiddens, num_steps = 2, 16, 32, 100
rnn = RNNScratch(num_inputs, num_hiddens)
X = jnp.ones((num_steps, batch_size, num_inputs))
(output

声明:本文内容及配图由入驻作者撰写或者入驻合作网站授权转载。文章观点仅代表作者本人,不代表电子发烧友网立场。文章及其配图仅供工程师学习之用,如有内容侵权或者其他违规问题,请联系本站处理。 举报投诉

评论(0)
发评论

下载排行榜

全部0条评论

快来发表一下你的评论吧 !