Segment Anything Model代码讲解(五)之Transformer
在Transformer.py的代码中,实现一个双向的Transformer模型,主要用于文本和图像之间的交互操作。以下是各部分的解释:
-
TwoWayTransformer类:定义了双向Transformer模型的主体部分。它由多个Transformer块堆叠而成,每个块包括两种不同的注意力机制和一个MLP层。输入是图像的embedding、图像的位置编码和点的embedding,输出是处理后的点的embedding和处理后的图像的embedding。
-
TwoWayAttentionBlock类:定义了Transformer中的基本块,包括自注意力、点对图像的注意力、MLP层和图像对点的注意力。其中,点对图像的注意力和图像对点的注意力的区别在于queries和keys的不同。输入是点的embedding和图像的embedding,输出是处理后的点的embedding和处理后的图像的embedding。
-
Attention类:定义了注意力层,它实现了点积注意力的方法,并提供了一个downsample_rate参数以降低嵌入的大小。
-
其他函数和类:包括MLPBlock、LayerNorm等等。
这个模型主要用于视觉与语言的联合学习,例如视觉问答或图像文本生成等任务。
import torch
from torch import Tensor, nnimport math
from typing import Tuple, Typefrom .common import MLPBlockclass TwoWayTransformer(nn.Module):def __init__(self,depth: int,embedding_dim: int,num_heads: int,mlp_dim: int,activation: Type[nn.Module] = nn.ReLU,attention_downsample_rate: int = 2,) -> None:"""A transformer decoder that attends to an input image usingqueries whose positional embedding is supplied.Args:depth (int): number of layers in the transformerembedding_dim (int): the channel dimension for the input embeddingsnum_heads (int): the number of heads for multihead attention. Mustdivide embedding_dimmlp_dim (int): the channel dimension internal to the MLP blockactivation (nn.Module): the activation to use in the MLP block"""super().__init__()self.depth = depthself.embedding_dim = embedding_dimself.num_heads = num_headsself.mlp_dim = mlp_dimself.layers = nn.ModuleList()for i in range(depth):self.layers.append(TwoWayAttentionBlock(embedding_dim=embedding_dim,num_heads=num_heads,mlp_dim=mlp_dim,activation=activation,attention_downsample_rate=attention_downsample_rate,skip_first_layer_pe=(i == 0),))self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)self.norm_final_attn = nn.LayerNorm(embedding_dim)def forward(self,image_embedding: Tensor,image_pe: Tensor,point_embedding: Tensor,) -> Tuple[Tensor, Tensor]:"""Args:image_embedding (torch.Tensor): image to attend to. Should be shapeB x embedding_dim x h x w for any h and w.image_pe (torch.Tensor): the positional encoding to add to the image. Musthave the same shape as image_embedding.point_embedding (torch.Tensor): the embedding to add to the query points.Must have shape B x N_points x embedding_dim for any N_points.Returns:torch.Tensor: the processed point_embeddingtorch.Tensor: the processed image_embedding"""# BxCxHxW -> BxHWxC == B x N_image_tokens x Cbs, c, h, w = image_embedding.shapeimage_embedding = image_embedding.flatten(2).permute(0, 2, 1)image_pe = image_pe.flatten(2).permute(0, 2, 1)# Prepare queriesqueries = point_embeddingkeys = image_embedding# Apply transformer blocks and final layernormfor layer in self.layers:queries, keys = layer(queries=queries,keys=keys,query_pe=point_embedding,key_pe=image_pe,)# Apply the final attention layer from the points to the imageq = queries + point_embeddingk = keys + image_peattn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)queries = queries + attn_outqueries = self.norm_final_attn(queries)return queries, keysclass TwoWayAttentionBlock(nn.Module):def __init__(self,embedding_dim: int,num_heads: int,mlp_dim: int = 2048,activation: Type[nn.Module] = nn.ReLU,attention_downsample_rate: int = 2,skip_first_layer_pe: bool = False,) -> None:"""A transformer block with four layers: (1) self-attention of sparseinputs, (2) cross attention of sparse inputs to dense inputs, (3) mlpblock on sparse inputs, and (4) cross attention of dense inputs to sparseinputs.Arguments:embedding_dim (int): the channel dimension of the embeddingsnum_heads (int): the number of heads in the attention layersmlp_dim (int): the hidden dimension of the mlp blockactivation (nn.Module): the activation of the mlp blockskip_first_layer_pe (bool): skip the PE on the first layer"""super().__init__()self.self_attn = Attention(embedding_dim, num_heads)self.norm1 = nn.LayerNorm(embedding_dim)self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)self.norm2 = nn.LayerNorm(embedding_dim)self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)self.norm3 = nn.LayerNorm(embedding_dim)self.norm4 = nn.LayerNorm(embedding_dim)self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)self.skip_first_layer_pe = skip_first_layer_pedef forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]:# Self attention blockif self.skip_first_layer_pe:queries = self.self_attn(q=queries, k=queries, v=queries)else:q = queries + query_peattn_out = self.self_attn(q=q, k=q, v=queries)queries = queries + attn_outqueries = self.norm1(queries)# Cross attention block, tokens attending to image embeddingq = queries + query_pek = keys + key_peattn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)queries = queries + attn_outqueries = self.norm2(queries)# MLP blockmlp_out = self.mlp(queries)queries = queries + mlp_outqueries = self.norm3(queries)# Cross attention block, image embedding attending to tokensq = queries + query_pek = keys + key_peattn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)keys = keys + attn_outkeys = self.norm4(keys)return queries, keysclass Attention(nn.Module):"""An attention layer that allows for downscaling the size of the embeddingafter projection to queries, keys, and values."""def __init__(self,embedding_dim: int,num_heads: int,downsample_rate: int = 1,) -> None:super().__init__()self.embedding_dim = embedding_dimself.internal_dim = embedding_dim // downsample_rateself.num_heads = num_headsassert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."self.q_proj = nn.Linear(embedding_dim, self.internal_dim)self.k_proj = nn.Linear(embedding_dim, self.internal_dim)self.v_proj = nn.Linear(embedding_dim, self.internal_dim)self.out_proj = nn.Linear(self.internal_dim, embedding_dim)def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:b, n, c = x.shapex = x.reshape(b, n, num_heads, c // num_heads)return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_headdef _recombine_heads(self, x: Tensor) -> Tensor:b, n_heads, n_tokens, c_per_head = x.shapex = x.transpose(1, 2)return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x Cdef forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:# Input projectionsq = self.q_proj(q)k = self.k_proj(k)v = self.v_proj(v)# Separate into headsq = self._separate_heads(q, self.num_heads)k = self._separate_heads(k, self.num_heads)v = self._separate_heads(v, self.num_heads)# Attention_, _, _, c_per_head = q.shapeattn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokensattn = attn / math.sqrt(c_per_head)attn = torch.softmax(attn, dim=-1)# Get outputout = attn @ vout = self._recombine_heads(out)out = self.out_proj(out)return out