Skip to content

bias_mitigator_wrappers

allennlp.fairness.bias_mitigator_wrappers

[SOURCE]


BiasMitigatorWrapper

class BiasMitigatorWrapper(Registrable)

Parent class for bias mitigator wrappers.

train

class BiasMitigatorWrapper(Registrable):
 | ...
 | def train(self, mode: bool = True)

Parameters

  • mode : bool, optional (default = True)
    Sets requires_grad to value of mode for bias mitigator and associated bias direction.

HardBiasMitigatorWrapper

@BiasMitigatorWrapper.register("hard")
class HardBiasMitigatorWrapper(BiasMitigatorWrapper):
 | def __init__(
 |     self,
 |     bias_direction: BiasDirectionWrapper,
 |     embedding_layer: torch.nn.Embedding,
 |     equalize_word_pairs_file: Union[PathLike, str],
 |     tokenizer: Tokenizer,
 |     mitigator_vocab: Optional[Vocabulary] = None,
 |     namespace: str = "tokens",
 |     requires_grad: bool = True
 | )

Parameters

  • bias_direction : BiasDirectionWrapper
    Bias direction used by mitigator.
  • embedding_layer : torch.nn.Embedding
    Embedding layer of base model.
  • equalize_word_pairs_file : Union[PathLike, str]
    Path of file containing equalize word pairs.
  • tokenizer : Tokenizer
    Tokenizer used to tokenize equalize words.
  • mitigator_vocab : Vocabulary, optional (default = None)
    Vocabulary of tokenizer. If None, assumes tokenizer is of type PreTrainedTokenizer and uses tokenizer's vocab attribute.
  • namespace : str, optional (default = "tokens")
    Namespace of mitigator_vocab to use when tokenizing. Disregarded when mitigator_vocab is None.
  • requires_grad : bool, optional (default = True)
    Option to enable gradient calculation for bias mitigator.

__call__

class HardBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def __call__(self, module, module_in, module_out)

Called as forward hook.

train

class HardBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def train(self, mode: bool = True)

LinearBiasMitigatorWrapper

@BiasMitigatorWrapper.register("linear")
class LinearBiasMitigatorWrapper(BiasMitigatorWrapper):
 | def __init__(
 |     self,
 |     bias_direction: BiasDirectionWrapper,
 |     embedding_layer: torch.nn.Embedding,
 |     requires_grad: bool = True
 | )

Parameters

  • bias_direction : BiasDirectionWrapper
    Bias direction used by mitigator.
  • embedding_layer : torch.nn.Embedding
    Embedding layer of base model.
  • requires_grad : bool, optional (default = True)
    Option to enable gradient calculation for bias mitigator.

__call__

class LinearBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def __call__(self, module, module_in, module_out)

Called as forward hook.

train

class LinearBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def train(self, mode: bool = True)

INLPBiasMitigatorWrapper

@BiasMitigatorWrapper.register("inlp")
class INLPBiasMitigatorWrapper(BiasMitigatorWrapper):
 | def __init__(
 |     self,
 |     embedding_layer: torch.nn.Embedding,
 |     seed_word_pairs_file: Union[PathLike, str],
 |     tokenizer: Tokenizer,
 |     mitigator_vocab: Optional[Vocabulary] = None,
 |     namespace: str = "tokens"
 | )

Parameters

  • embedding_layer : torch.nn.Embedding
    Embedding layer of base model.
  • seed_word_pairs_file : Union[PathLike, str]
    Path of file containing seed word pairs.
  • tokenizer : Tokenizer
    Tokenizer used to tokenize seed words.
  • mitigator_vocab : Vocabulary, optional (default = None)
    Vocabulary of tokenizer. If None, assumes tokenizer is of type PreTrainedTokenizer and uses tokenizer's vocab attribute.
  • namespace : str, optional (default = "tokens")
    Namespace of mitigator_vocab to use when tokenizing. Disregarded when mitigator_vocab is None.

__call__

class INLPBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def __call__(self, module, module_in, module_out)

Called as forward hook.

train

class INLPBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def train(self, mode: bool = True)

OSCaRBiasMitigatorWrapper

@BiasMitigatorWrapper.register("oscar")
class OSCaRBiasMitigatorWrapper(BiasMitigatorWrapper):
 | def __init__(
 |     self,
 |     bias_direction1: BiasDirectionWrapper,
 |     bias_direction2: BiasDirectionWrapper,
 |     embedding_layer: torch.nn.Embedding,
 |     requires_grad: bool = True
 | )

Parameters

  • bias_direction1 : BiasDirectionWrapper
    Bias direction of first concept subspace used by mitigator.
  • bias_direction2 : BiasDirectionWrapper
    Bias direction of second concept subspace used by mitigator.
  • embedding_layer : torch.nn.Embedding
    Embedding layer of base model.
  • requires_grad : bool, optional (default = True)
    Option to enable gradient calculation for bias mitigator.

__call__

class OSCaRBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def __call__(self, module, module_in, module_out)

Called as forward hook.

train

class OSCaRBiasMitigatorWrapper(BiasMitigatorWrapper):
 | ...
 | def train(self, mode: bool = True)