ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

65注意力评分函数

2022-08-17 23:03:09  阅读:164  来源: 互联网

标签:评分 self torch lens shape valid 65 注意力 size


点击查看代码
import math
import torch
from torch import nn
from d2l import torch as d2l

# 掩蔽softmax操作
#@save
def masked_softmax(X, valid_lens):
    """通过在最后一个轴上掩蔽元素来执行softmax操作"""
    # X:3D张量,valid_lens:1D或2D张量
    if valid_lens is None:
        # dim=-1 最后一维
        return nn.functional.softmax(X, dim=-1)
    else:
        shape = X.shape
        # print('shape', shape)
        if valid_lens.dim() == 1:
            # 沿着指定的维度重复张量的元素
            # """
            # torch.repeat_interleave(input, repeats, dim=None) → Tensor
            # 1)input (类型:torch.Tensor):输入张量
            # 2)repeats(类型:int或torch.Tensor):每个元素的重复次数
            # 3)dim(类型:int)需要重复的维度。默认情况下dim=None,表示将把给定的输入张量展平(flatten)为向量,
            #                                然后将每个元素重复repeats次,并返回重复后的张量。
            # """
            # print('valid_lens0', valid_lens)
            valid_lens = torch.repeat_interleave(valid_lens, shape[1])
            # print('valid_lens1', valid_lens)
        else:
            # 不分行列,改成一串
            # print('valid_lens0', valid_lens)
            valid_lens = valid_lens.reshape(-1)
            # print('valid_lens1', valid_lens)
        #  value=-1e6
        # 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0
        X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-1e6)
        # """
        # X = torch.tensor([[1, 2, 3], [4, 5, 6]])
        # print('sequence_mask(X, torch.tensor([1, 2]))',
        #       sequence_mask(X, torch.tensor([1, 2])))
        #
        # sequence_mask(X, torch.tensor([1, 2]))
        # tensor([[1, 0, 0],
        #         [4, 5, 0]])
        # """
        return nn.functional.softmax(X.reshape(shape), dim=-1)

# 函数是如何工作
# print(masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])))
# print(masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])))
"""
shape torch.Size([2, 2, 4])
valid_lens0 tensor([2, 3])
valid_lens1 tensor([2, 2, 3, 3])
tensor([[[0.3190, 0.6810, 0.0000, 0.0000],
         [0.4617, 0.5383, 0.0000, 0.0000]],

        [[0.2563, 0.3015, 0.4423, 0.0000],
         [0.3518, 0.3026, 0.3456, 0.0000]]])
shape torch.Size([2, 2, 4])
valid_lens0 tensor([[1, 3],
        [2, 4]])
valid_lens1 tensor([1, 3, 2, 4])
tensor([[[1.0000, 0.0000, 0.0000, 0.0000],
         [0.2313, 0.3962, 0.3725, 0.0000]],

        [[0.3576, 0.6424, 0.0000, 0.0000],
         [0.2277, 0.2360, 0.3160, 0.2203]]])
"""
# 加性注意力
#@save
class AdditiveAttention(nn.Module):
    """加性注意力"""
    def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
        super(AdditiveAttention, self).__init__(**kwargs)
        self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
        self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
        self.w_v = nn.Linear(num_hiddens, 1, bias=False)
        self.dropout = nn.Dropout(dropout)

    def forward(self, queries, keys, values, valid_lens):
        # valid_lens 多少对keys-values是需要的
        queries, keys = self.W_q(queries), self.W_k(keys)
        # print('queries.shape', queries.shape)
        # print('keys.shape', keys.shape)
        # '''
        # (batch_size, num_query, num_hidden)
        # queries.shape torch.Size([2, 1, 8])
        # (batch_size, num_key, num_hidden)
        # keys.shape torch.Size([2, 10, 8])
        # '''
        # 在维度扩展后,
        # queries的形状:(batch_size,查询的个数,1,             num_hidden)
        # key的形状:    (batch_size,1,       “键-值”对的个数,num_hiddens)
        # features的形状:(batch_size,查询的个数,“键-值”对的个数,num_hiddens)
        # 使用广播方式进行求和
        # print('queries.unsqueeze(2)', queries.unsqueeze(2).shape)
        # print('keys.unsqueeze(1)', keys.unsqueeze(1).shape)
        # """
        # queries.unsqueeze(2) torch.Size([2, 1, 1, 8])
        # keys.unsqueeze(1) torch.Size([2, 1, 10, 8])
        # """
        features = queries.unsqueeze(2) + keys.unsqueeze(1)
        # print('features.shape', features.shape)
        # """
        # features.shape torch.Size([2, 1, 10, 8])
        # """
        features = torch.tanh(features)
        # self.w_v仅有一个输出,因此从形状中移除最后那个维度。
        # scores的形状:(batch_size,查询的个数,“键-值”对的个数)
        # print('self.w_v(features).shape', self.w_v(features).shape)
        # """
        # self.w_v(features).shape torch.Size([2, 1, 10, 1])
        # """
        scores = self.w_v(features).squeeze(-1)
        # print('scores.shape', scores.shape)
        # """
        # scores.shape torch.Size([2, 1, 10])
        # """
        # 过滤
        self.attention_weights = masked_softmax(scores, valid_lens)
        # print('self.attention_weights.shape', self.attention_weights.shape)
        #                                         (batch_size,查询的个数,“键-值”对的个数)
        # """self.attention_weights.shape torch.Size([2, 1, 10])"""
        # values的形状:(batch_size,“键-值”对的个数,值的维度)
        # print('values.shape', values.shape)
        # """values.shape torch.Size([2, 10, 4])"""
        # print('torch.bmm(self.dropout(self.attention_weights), values).shape',
        #       torch.bmm(self.dropout(self.attention_weights), values).shape)
        # """torch.bmm(self.dropout(self.attention_weights), values).shape torch.Size([2, 1, 4])"""
        #                                                                   (batch_size, num_query, value_size)
        return torch.bmm(self.dropout(self.attention_weights), values)

# 演示上面的AdditiveAttention类
# (2, 1, 20) -> (batch_size, num_query, query_size)
# (2, 10, 2) -> (batch_size, num_key, key_size)
queries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))
# values的小批量,两个值矩阵是相同的
# (2, 10, 4) -> (batch_size, num_value, value_size)
values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(
    2, 1, 1)
valid_lens = torch.tensor([2, 6])

attention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8,
                              dropout=0.1)
attention.eval()
print(attention(queries, keys, values, valid_lens))
print('attention(queries, keys, values, valid_lens).shape',
      attention(queries, keys, values, valid_lens).shape)
"""
tensor([[[ 2.0000,  3.0000,  4.0000,  5.0000]],

        [[10.0000, 11.0000, 12.0000, 13.0000]]], grad_fn=<BmmBackward0>)
                                                            (batch_size, num_query, value_size)
attention(queries, keys, values, valid_lens).shape torch.Size([2, 1, 4])
"""
# 注意力权重
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
                  xlabel='Keys', ylabel='Queries')
# d2l.plt.show()

# 缩放点积注意力
#@save
class DotProductAttention(nn.Module):
    """缩放点积注意力"""
    def __init__(self, dropout, **kwargs):
        super(DotProductAttention, self).__init__(**kwargs)
        self.dropout = nn.Dropout(dropout)

    # 查询和键的长度为 d  num_query = num_keys
    # queries的形状:(batch_size,查询的个数,size_query = d)
    # keys的形状:(batch_size,“键-值”对的个数,size_keys = d)
    # values的形状:(batch_size,“键-值”对的个数,值的维度)
    # valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
    def forward(self, queries, keys, values, valid_lens=None):
        d = queries.shape[-1]
        # 设置transpose_b=True为了交换keys的最后两个维度
        scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
        self.attention_weights = masked_softmax(scores, valid_lens)
        # values的形状:(batch_size,“键-值”对的个数,值的维度)
        return torch.bmm(self.dropout(self.attention_weights), values)

# 演示上述的DotProductAttention类
# (batch_size, num_query, query_size)
queries = torch.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.eval()
print(attention(queries, keys, values, valid_lens))

# 均匀的注意力权重
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
                  xlabel='Keys', ylabel='Queries')
# d2l.plt.show()


标签:评分,self,torch,lens,shape,valid,65,注意力,size
来源: https://www.cnblogs.com/g932150283/p/16597076.html

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有