Skip to content

cached_transformers

allennlp.common.cached_transformers

[SOURCE]


TransformerSpec#

class TransformerSpec(NamedTuple)

model_name#

class TransformerSpec(NamedTuple):
 | ...
 | model_name: str = None

override_weights_file#

class TransformerSpec(NamedTuple):
 | ...
 | override_weights_file: Optional[str] = None

override_weights_strip_prefix#

class TransformerSpec(NamedTuple):
 | ...
 | override_weights_strip_prefix: Optional[str] = None

get#

def get(
    model_name: str,
    make_copy: bool,
    override_weights_file: Optional[str] = None,
    override_weights_strip_prefix: Optional[str] = None,
    **kwargs
) -> transformers.PreTrainedModel

Returns a transformer model from the cache.

Parameters

  • model_name : str
    The name of the transformer, for example "bert-base-cased"
  • make_copy : bool
    If this is True, return a copy of the model instead of the cached model itself. If you want to modify the parameters of the model, set this to True. If you want only part of the model, set this to False, but make sure to copy.deepcopy() the bits you are keeping.
  • override_weights_file : str, optional
    If set, this specifies a file from which to load alternate weights that override the weights from huggingface. The file is expected to contain a PyTorch state_dict, created with torch.save().
  • override_weights_strip_prefix : str, optional
    If set, strip the given prefix from the state dict when loading it.

get_tokenizer#

def get_tokenizer(
    model_name: str,
    **kwargs
) -> transformers.PreTrainedTokenizer