Skip to content

visual_entailment

allennlp_models.vision.models.visual_entailment

[SOURCE]


VisualEntailmentModel#

@Model.register("ve_vilbert")
@Model.register("ve_vilbert_from_huggingface", constructor="from_huggingface_model_name")
class VisualEntailmentModel(VisionTextModel):
 | def __init__(
 |     self,
 |     vocab: Vocabulary,
 |     text_embeddings: TransformerEmbeddings,
 |     image_embeddings: ImageFeatureEmbeddings,
 |     encoder: BiModalEncoder,
 |     pooled_output_dim: int,
 |     fusion_method: str = "sum",
 |     dropout: float = 0.1,
 |     label_namespace: str = "labels",
 |     *, ignore_text: bool = False,
 |     *, ignore_image: bool = False
 | ) -> None

Model for visual entailment task based on the paper Visual Entailment: A Novel Task for Fine-Grained Image Understanding.

Parameters

  • vocab : Vocabulary
  • text_embeddings : TransformerEmbeddings
  • image_embeddings : ImageFeatureEmbeddings
  • encoder : BiModalEncoder
  • pooled_output_dim : int
  • fusion_method : str, optional (default = "sum")
  • dropout : float, optional (default = 0.1)
  • label_namespace : str, optional (default = labels)

forward#

class VisualEntailmentModel(VisionTextModel):
 | ...
 | def forward(
 |     self,
 |     box_features: torch.Tensor,
 |     box_coordinates: torch.Tensor,
 |     box_mask: torch.Tensor,
 |     hypothesis: TextFieldTensors,
 |     labels: Optional[torch.Tensor] = None
 | ) -> Dict[str, torch.Tensor]

get_metrics#

class VisualEntailmentModel(VisionTextModel):
 | ...
 | def get_metrics(self, reset: bool = False) -> Dict[str, float]

make_output_human_readable#

class VisualEntailmentModel(VisionTextModel):
 | ...
 | def make_output_human_readable(
 |     self,
 |     output_dict: Dict[str, torch.Tensor]
 | ) -> Dict[str, torch.Tensor]

default_predictor#

class VisualEntailmentModel(VisionTextModel):
 | ...
 | default_predictor = "vilbert_ve"