Skip to content

gqa

allennlp_models.vision.dataset_readers.gqa

[SOURCE]


GQAReader#

@DatasetReader.register("gqa")
class GQAReader(VisionReader):
 | def __init__(
 |     self,
 |     image_dir: Union[str, PathLike],
 |     *, image_loader: Optional[ImageLoader] = None,
 |     *, image_featurizer: Optional[Lazy[GridEmbedder]] = None,
 |     *, region_detector: Optional[Lazy[RegionDetector]] = None,
 |     *, answer_vocab: Optional[Union[str, Vocabulary]] = None,
 |     *, feature_cache_dir: Optional[Union[str, PathLike]] = None,
 |     *, data_dir: Optional[Union[str, PathLike]] = None,
 |     *, tokenizer: Tokenizer = None,
 |     *, token_indexers: Dict[str, TokenIndexer] = None,
 |     *, cuda_device: Optional[Union[int, torch.device]] = None,
 |     *, max_instances: Optional[int] = None,
 |     *, image_processing_batch_size: int = 8,
 |     *, write_to_cache: bool = True
 | ) -> None

Parametersimage_dir: `str`

Path to directory containing `png` image files.

image_loader : ImageLoader image_featurizer: Lazy[GridEmbedder] The backbone image processor (like a ResNet), whose output will be passed to the region detector for finding object boxes in the image. region_detector: Lazy[RegionDetector] For pulling out regions of the image (both coordinates and features) that will be used by downstream models. data_dir: str Path to directory containing text files for each dataset split. These files contain the sentences and metadata for each task instance. tokenizer: Tokenizer, optional token_indexers: Dict[str, TokenIndexer]

text_to_instance#

class GQAReader(VisionReader):
 | ...
 | def text_to_instance(
 |     self,
 |     question: str,
 |     image: Optional[Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]],
 |     answer: Optional[Dict[str, str]] = None,
 |     *, use_cache: bool = True
 | ) -> Optional[Instance]

apply_token_indexers#

class GQAReader(VisionReader):
 | ...
 | def apply_token_indexers(self, instance: Instance) -> None