×

PyTorch教程15.7之词的相似性和类比

消耗积分:0 | 格式:pdf | 大小:0.10 MB | 2023-06-05

王尚岱

分享资料个

15.4 节中,我们在一个小数据集上训练了一个 word2vec 模型,并将其应用于为输入词寻找语义相似的词。在实践中,在大型语料库上预训练的词向量可以应用于下游的自然语言处理任务,这将在第 16 节后面介绍为了以直接的方式展示来自大型语料库的预训练词向量的语义,让我们将它们应用到词相似度和类比任务中。

import os
import torch
from torch import nn
from d2l import torch as d2l
import os
from mxnet import np, npx
from d2l import mxnet as d2l

npx.set_np()

15.7.1。加载预训练词向量

下面列出了维度为 50、100 和 300 的预训练 GloVe 嵌入,可以从GloVe 网站下载。预训练的 fastText 嵌入有多种语言版本。这里我们考虑一个可以从fastText 网站下载的英文版本(300 维“wiki.en”) 。

#@save
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
                '0b8703943ccdb6eb788e6f091b8946e82231bc4d')

#@save
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
                 'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')

#@save
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
                 'b5116e234e9eb9076672cfeabf5469f3eec904fa')

#@save
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
              'c1816da3821ae9f43899be655002f6c723e91b88')
#@save
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
                '0b8703943ccdb6eb788e6f091b8946e82231bc4d')

#@save
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
                 'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')

#@save
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
                 'b5116e234e9eb9076672cfeabf5469f3eec904fa')

#@save
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
              'c1816da3821ae9f43899be655002f6c723e91b88')

为了加载这些预训练的 GloVe 和 fastText 嵌入,我们定义了以下TokenEmbedding类。

#@save
class TokenEmbedding:
  """Token Embedding."""
  def __init__(self, embedding_name):
    self.idx_to_token, self.idx_to_vec = self._load_embedding(
      embedding_name)
    self.unknown_idx = 0
    self.token_to_idx = {token: idx for idx, token in
               enumerate(self.idx_to_token)}

  def _load_embedding(self, embedding_name):
    idx_to_token, idx_to_vec = [''], []
    data_dir = d2l.download_extract(embedding_name)
    # GloVe website: https://nlp.stanford.edu/projects/glove/
    # fastText website: https://fasttext.cc/
    with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
      for line in f:
        elems = line.rstrip().split(' ')
        token, elems = elems[0], [float(elem) for elem in elems[1:]]
        # Skip header information, such as the top row in fastText
        if len(elems) > 1:
          idx_to_token.append(token)
          idx_to_vec.append(elems)
    idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
    return idx_to_token, torch.tensor(idx_to_vec)

  def __getitem__(self, tokens):
    indices = [self.token_to_idx.get(token, self.unknown_idx)
          for token in tokens]
    vecs = self.idx_to_vec[torch.tensor(indices)]
    return vecs

  def __len__(self):
    return len(self.idx_to_token)
#@save
class TokenEmbedding:
  """Token Embedding."""
  def __init__(self, embedding_name):
    self.idx_to_token, self.idx_to_vec = self._load_embedding(
      embedding_name)
    self.unknown_idx = 0
    self.token_to_idx = {token: idx for idx, token in
               enumerate(self.idx_to_token)}

  def _load_embedding(self, embedding_name):
    idx_to_token, idx_to_vec = [''], []
    data_dir = d2l.download_extract(embedding_name)
    # GloVe website: https://nlp.stanford.edu/projects/glove/
    # fastText website: https://fasttext.cc/
    with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
      for line in f:
        elems = line.rstrip().split(' ')
        token, elems = elems[0], [float(elem) for elem in elems[1:]]
        # Skip header information, such as the top row in fastText
        if len(elems) > 1:
          idx_to_token.append(token)
          idx_to_vec.append(elems)
    idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
    return idx_to_token, np.array(idx_to_vec)

  def __getitem__(self, tokens):
    indices = [self.token_to_idx.get(token, self.unknown_idx)
          for token in tokens]
    vecs = self.idx_to_vec[np.array(indices)]
    return vecs

  def __len__(self):
    return len(self.idx_to_token)

下面我们加载 50 维 GloVe 嵌入(在维基百科子集上预训练)。创建TokenEmbedding实例时,如果尚未下载指定的嵌入文件,则必须下载。

glove_6b50d = TokenEmbedding('glove.6b.50d')
Downloading ../data/glove.6B.50d.zip from http://d2l-data.s3-accelerate.amazonaws.com/glove.6B.50d.zip...
glove_6b50d = TokenEmbedding('glove.6b.50d')
Downloading ../data/glove.6B.50d.zip from http://d2l-data.s3-accelerate.amazonaws.com/glove.6B.50d.zip...

输出词汇量。词汇表包含 400000 个单词(标记)和一个特殊的未知标记。

len(glove_6b50d)
400001
len(glove_6b50d)
400001

我们可以获得一个词在词汇表中的索引,反之亦然。

glove_6b50d.token_to_idx['beautiful'], glove_6b50d.idx_to_token[3367]
(3367, 'beautiful')
glove_6b50d.token_to_idx['beautiful'], glove_6b50d.idx_to_token[3367]
(3367, 'beautiful')

15.7.2。应用预训练词向量

使用加载的 GloVe 向量,我们将通过将它们应用于以下单词相似性和类比任务来演示它们的语义。

15.7.2.1。词相似度

第 15.4.3 节类似,为了根据词向量之间的余弦相似度为输入词找到语义相似的词,我们实现以下knnk-最近的邻居)功能。

def knn(W, x, k):
  # Add 1e-9 for numerical stability
  cos = torch.mv(W, x.reshape(-1,)) / (
    torch.sqrt(torch.sum(W * W, axis=1) + 1e-9) *
    torch.sqrt((x * x).sum()))
  _, topk = torch.topk(cos, k=k)
  return topk, [cos[int(i)] for i in topk]
def knn(W, x, k):
  # Add 1e-9 for numerical stability
  cos = np.dot(W, x.reshape(-1,)) / (
    np.sqrt(np.sum(W * W, axis=1) + 1e-9) * np

声明:本文内容及配图由入驻作者撰写或者入驻合作网站授权转载。文章观点仅代表作者本人,不代表电子发烧友网立场。文章及其配图仅供工程师学习之用,如有内容侵权或者其他违规问题,请联系本站处理。 举报投诉

评论(0)
发评论

下载排行榜

全部0条评论

快来发表一下你的评论吧 !