dot_product_matrix_attention
allennlp.modules.matrix_attention.dot_product_matrix_attention
DotProductMatrixAttention#
@MatrixAttention.register("dot_product")
class DotProductMatrixAttention(MatrixAttention)
Computes attention between every entry in matrix_1 with every entry in matrix_2 using a dot product.
Registered as a MatrixAttention
with name "dot_product".
forward#
class DotProductMatrixAttention(MatrixAttention):
| ...
| @overrides
| def forward(
| self,
| matrix_1: torch.Tensor,
| matrix_2: torch.Tensor
| ) -> torch.Tensor