Skip to content

bert

mindnlp.transformers.models.bert.configuration_bert.BertConfig

Bases: PretrainedConfig

Configuration for BERT-base

Source code in mindnlp/transformers/models/bert/configuration_bert.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
class BertConfig(PretrainedConfig):
    """
    Configuration for BERT-base
    """
    model_type = "bert"

    def __init__(
        self,
        vocab_size=30522,
        hidden_size=768,
        num_hidden_layers=12,
        num_attention_heads=12,
        intermediate_size=3072,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=512,
        type_vocab_size=2,
        initializer_range=0.02,
        layer_norm_eps=1e-12,
        pad_token_id=0,
        position_embedding_type="absolute",
        use_cache=True,
        classifier_dropout=None,
        **kwargs,
    ):
        """
        Initialize a BertConfig object with the specified parameters.

        Args:
            self (object): The object instance.
            vocab_size (int): The size of the vocabulary. Defaults to 30522.
            hidden_size (int): The size of the hidden layers. Defaults to 768.
            num_hidden_layers (int): The number of hidden layers. Defaults to 12.
            num_attention_heads (int): The number of attention heads. Defaults to 12.
            intermediate_size (int): The size of the intermediate layer in the transformer encoder. Defaults to 3072.
            hidden_act (str): The activation function for the hidden layers. Defaults to 'gelu'.
            hidden_dropout_prob (float): The dropout probability for the hidden layers. Defaults to 0.1.
            attention_probs_dropout_prob (float): The dropout probability for the attention probabilities. Defaults to 0.1.
            max_position_embeddings (int): The maximum position index. Defaults to 512.
            type_vocab_size (int): The size of the type vocabulary. Defaults to 2.
            initializer_range (float): The range for weight initialization. Defaults to 0.02.
            layer_norm_eps (float): The epsilon value for layer normalization. Defaults to 1e-12.
            pad_token_id (int): The token ID for padding. Defaults to 0.
            position_embedding_type (str): The type of position embeddings. Defaults to 'absolute'.
            use_cache (bool): Whether to use cache during inference. Defaults to True.
            classifier_dropout (float): The dropout probability for the classifier layer. Defaults to None.

        Returns:
            None.

        Raises:
            ValueError: If any of the input parameters are invalid or out of range.
        """
        super().__init__(pad_token_id=pad_token_id, **kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.hidden_act = hidden_act
        self.intermediate_size = intermediate_size
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.position_embedding_type = position_embedding_type
        self.use_cache = use_cache
        self.classifier_dropout = classifier_dropout

mindnlp.transformers.models.bert.configuration_bert.BertConfig.__init__(vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs)

Initialize a BertConfig object with the specified parameters.

PARAMETER DESCRIPTION
self

The object instance.

TYPE: object

vocab_size

The size of the vocabulary. Defaults to 30522.

TYPE: int DEFAULT: 30522

hidden_size

The size of the hidden layers. Defaults to 768.

TYPE: int DEFAULT: 768

num_hidden_layers

The number of hidden layers. Defaults to 12.

TYPE: int DEFAULT: 12

num_attention_heads

The number of attention heads. Defaults to 12.

TYPE: int DEFAULT: 12

intermediate_size

The size of the intermediate layer in the transformer encoder. Defaults to 3072.

TYPE: int DEFAULT: 3072

hidden_act

The activation function for the hidden layers. Defaults to 'gelu'.

TYPE: str DEFAULT: 'gelu'

hidden_dropout_prob

The dropout probability for the hidden layers. Defaults to 0.1.

TYPE: float DEFAULT: 0.1

attention_probs_dropout_prob

The dropout probability for the attention probabilities. Defaults to 0.1.

TYPE: float DEFAULT: 0.1

max_position_embeddings

The maximum position index. Defaults to 512.

TYPE: int DEFAULT: 512

type_vocab_size

The size of the type vocabulary. Defaults to 2.

TYPE: int DEFAULT: 2

initializer_range

The range for weight initialization. Defaults to 0.02.

TYPE: float DEFAULT: 0.02

layer_norm_eps

The epsilon value for layer normalization. Defaults to 1e-12.

TYPE: float DEFAULT: 1e-12

pad_token_id

The token ID for padding. Defaults to 0.

TYPE: int DEFAULT: 0

position_embedding_type

The type of position embeddings. Defaults to 'absolute'.

TYPE: str DEFAULT: 'absolute'

use_cache

Whether to use cache during inference. Defaults to True.

TYPE: bool DEFAULT: True

classifier_dropout

The dropout probability for the classifier layer. Defaults to None.

TYPE: float DEFAULT: None

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

If any of the input parameters are invalid or out of range.

Source code in mindnlp/transformers/models/bert/configuration_bert.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def __init__(
    self,
    vocab_size=30522,
    hidden_size=768,
    num_hidden_layers=12,
    num_attention_heads=12,
    intermediate_size=3072,
    hidden_act="gelu",
    hidden_dropout_prob=0.1,
    attention_probs_dropout_prob=0.1,
    max_position_embeddings=512,
    type_vocab_size=2,
    initializer_range=0.02,
    layer_norm_eps=1e-12,
    pad_token_id=0,
    position_embedding_type="absolute",
    use_cache=True,
    classifier_dropout=None,
    **kwargs,
):
    """
    Initialize a BertConfig object with the specified parameters.

    Args:
        self (object): The object instance.
        vocab_size (int): The size of the vocabulary. Defaults to 30522.
        hidden_size (int): The size of the hidden layers. Defaults to 768.
        num_hidden_layers (int): The number of hidden layers. Defaults to 12.
        num_attention_heads (int): The number of attention heads. Defaults to 12.
        intermediate_size (int): The size of the intermediate layer in the transformer encoder. Defaults to 3072.
        hidden_act (str): The activation function for the hidden layers. Defaults to 'gelu'.
        hidden_dropout_prob (float): The dropout probability for the hidden layers. Defaults to 0.1.
        attention_probs_dropout_prob (float): The dropout probability for the attention probabilities. Defaults to 0.1.
        max_position_embeddings (int): The maximum position index. Defaults to 512.
        type_vocab_size (int): The size of the type vocabulary. Defaults to 2.
        initializer_range (float): The range for weight initialization. Defaults to 0.02.
        layer_norm_eps (float): The epsilon value for layer normalization. Defaults to 1e-12.
        pad_token_id (int): The token ID for padding. Defaults to 0.
        position_embedding_type (str): The type of position embeddings. Defaults to 'absolute'.
        use_cache (bool): Whether to use cache during inference. Defaults to True.
        classifier_dropout (float): The dropout probability for the classifier layer. Defaults to None.

    Returns:
        None.

    Raises:
        ValueError: If any of the input parameters are invalid or out of range.
    """
    super().__init__(pad_token_id=pad_token_id, **kwargs)
    self.vocab_size = vocab_size
    self.hidden_size = hidden_size
    self.num_hidden_layers = num_hidden_layers
    self.num_attention_heads = num_attention_heads
    self.hidden_act = hidden_act
    self.intermediate_size = intermediate_size
    self.hidden_dropout_prob = hidden_dropout_prob
    self.attention_probs_dropout_prob = attention_probs_dropout_prob
    self.max_position_embeddings = max_position_embeddings
    self.type_vocab_size = type_vocab_size
    self.initializer_range = initializer_range
    self.layer_norm_eps = layer_norm_eps
    self.position_embedding_type = position_embedding_type
    self.use_cache = use_cache
    self.classifier_dropout = classifier_dropout

mindnlp.transformers.models.bert.modeling_bert

MindNLP bert model

mindnlp.transformers.models.bert.modeling_bert.BertAttention

Bases: Module

Bert Attention

Source code in mindnlp/transformers/models/bert/modeling_bert.py
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
class BertAttention(nn.Module):
    r"""
    Bert Attention
    """
    def __init__(self, config, position_embedding_type=None):
        """
        Initializes a BertAttention object.

        Args:
            self: The instance of the class itself.
            config (object): The configuration object containing settings for the BertAttention.
            position_embedding_type (str, optional): The type of position embedding to be used. Defaults to None.

        Returns:
            None: This method initializes the BertAttention object and does not return any value.

        Raises:
            None
        """
        super().__init__()
        self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
        self.output = BertSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        """prune heads"""
        if len(heads) == 0:
            return
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

        # Update hyper params and store pruned heads
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(
        self,
        hidden_states: mindspore.Tensor,
        attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
        output_attentions: Optional[bool] = False,
    ):
        """
        This method forwards the BertAttention layer.

        Args:
            self (BertAttention): The instance of the BertAttention class.
            hidden_states (mindspore.Tensor): The input tensor containing the hidden states of the model.
                The shape should be [batch_size, sequence_length, hidden_size].
            attention_mask (Optional[mindspore.Tensor]): An optional tensor containing the attention mask for the input.
                If provided, the shape should be [batch_size, 1, sequence_length, sequence_length] and
                the values should be 0 or 1. Default is None.
            head_mask (Optional[mindspore.Tensor]): An optional tensor containing the head mask for the input.
                If provided, the shape should be [num_heads] and the values should be 0 or 1. Default is None.
            encoder_hidden_states (Optional[mindspore.Tensor]):
                An optional tensor containing the hidden states of the encoder.
                If provided, the shape should be [batch_size, sequence_length, hidden_size].
                Default is None.
            encoder_attention_mask (Optional[mindspore.Tensor]):
                An optional tensor containing the attention mask for the encoder input.
                If provided, the shape should be [batch_size, 1, sequence_length,
                sequence_length] and the values should be 0 or 1. Default is None.
            past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]):
                An optional tuple containing the past key and value tensors.
                If provided, the shape should be [(batch_size, num_heads, sequence_length,
                head_size), (batch_size, num_heads, sequence_length, head_size)]. Default is None.
            output_attentions (Optional[bool]): An optional boolean value indicating whether to output attentions.
                Default is False.

        Returns:
            outputs (Tuple[mindspore.Tensor]):
                A tuple of output tensors containing the attention_output and any additional outputs from the layer.

        Raises:
            ValueError: If the shapes or types of input tensors are invalid.
            RuntimeError: If there is a runtime error during the execution of the method.
        """
        self_outputs = self.self(
            hidden_states,
            attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
        return outputs

mindnlp.transformers.models.bert.modeling_bert.BertAttention.__init__(config, position_embedding_type=None)

Initializes a BertAttention object.

PARAMETER DESCRIPTION
self

The instance of the class itself.

config

The configuration object containing settings for the BertAttention.

TYPE: object

position_embedding_type

The type of position embedding to be used. Defaults to None.

TYPE: str DEFAULT: None

RETURNS DESCRIPTION
None

This method initializes the BertAttention object and does not return any value.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
def __init__(self, config, position_embedding_type=None):
    """
    Initializes a BertAttention object.

    Args:
        self: The instance of the class itself.
        config (object): The configuration object containing settings for the BertAttention.
        position_embedding_type (str, optional): The type of position embedding to be used. Defaults to None.

    Returns:
        None: This method initializes the BertAttention object and does not return any value.

    Raises:
        None
    """
    super().__init__()
    self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
    self.output = BertSelfOutput(config)
    self.pruned_heads = set()

mindnlp.transformers.models.bert.modeling_bert.BertAttention.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False)

This method forwards the BertAttention layer.

PARAMETER DESCRIPTION
self

The instance of the BertAttention class.

TYPE: BertAttention

hidden_states

The input tensor containing the hidden states of the model. The shape should be [batch_size, sequence_length, hidden_size].

TYPE: Tensor

attention_mask

An optional tensor containing the attention mask for the input. If provided, the shape should be [batch_size, 1, sequence_length, sequence_length] and the values should be 0 or 1. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

An optional tensor containing the head mask for the input. If provided, the shape should be [num_heads] and the values should be 0 or 1. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

encoder_hidden_states

An optional tensor containing the hidden states of the encoder. If provided, the shape should be [batch_size, sequence_length, hidden_size]. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

encoder_attention_mask

An optional tensor containing the attention mask for the encoder input. If provided, the shape should be [batch_size, 1, sequence_length, sequence_length] and the values should be 0 or 1. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

past_key_value

An optional tuple containing the past key and value tensors. If provided, the shape should be [(batch_size, num_heads, sequence_length, head_size), (batch_size, num_heads, sequence_length, head_size)]. Default is None.

TYPE: Optional[Tuple[Tuple[Tensor]]] DEFAULT: None

output_attentions

An optional boolean value indicating whether to output attentions. Default is False.

TYPE: Optional[bool] DEFAULT: False

RETURNS DESCRIPTION
outputs

A tuple of output tensors containing the attention_output and any additional outputs from the layer.

TYPE: Tuple[Tensor]

RAISES DESCRIPTION
ValueError

If the shapes or types of input tensors are invalid.

RuntimeError

If there is a runtime error during the execution of the method.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
def forward(
    self,
    hidden_states: mindspore.Tensor,
    attention_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
    output_attentions: Optional[bool] = False,
):
    """
    This method forwards the BertAttention layer.

    Args:
        self (BertAttention): The instance of the BertAttention class.
        hidden_states (mindspore.Tensor): The input tensor containing the hidden states of the model.
            The shape should be [batch_size, sequence_length, hidden_size].
        attention_mask (Optional[mindspore.Tensor]): An optional tensor containing the attention mask for the input.
            If provided, the shape should be [batch_size, 1, sequence_length, sequence_length] and
            the values should be 0 or 1. Default is None.
        head_mask (Optional[mindspore.Tensor]): An optional tensor containing the head mask for the input.
            If provided, the shape should be [num_heads] and the values should be 0 or 1. Default is None.
        encoder_hidden_states (Optional[mindspore.Tensor]):
            An optional tensor containing the hidden states of the encoder.
            If provided, the shape should be [batch_size, sequence_length, hidden_size].
            Default is None.
        encoder_attention_mask (Optional[mindspore.Tensor]):
            An optional tensor containing the attention mask for the encoder input.
            If provided, the shape should be [batch_size, 1, sequence_length,
            sequence_length] and the values should be 0 or 1. Default is None.
        past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]):
            An optional tuple containing the past key and value tensors.
            If provided, the shape should be [(batch_size, num_heads, sequence_length,
            head_size), (batch_size, num_heads, sequence_length, head_size)]. Default is None.
        output_attentions (Optional[bool]): An optional boolean value indicating whether to output attentions.
            Default is False.

    Returns:
        outputs (Tuple[mindspore.Tensor]):
            A tuple of output tensors containing the attention_output and any additional outputs from the layer.

    Raises:
        ValueError: If the shapes or types of input tensors are invalid.
        RuntimeError: If there is a runtime error during the execution of the method.
    """
    self_outputs = self.self(
        hidden_states,
        attention_mask,
        head_mask,
        encoder_hidden_states,
        encoder_attention_mask,
        past_key_value,
        output_attentions,
    )
    attention_output = self.output(self_outputs[0], hidden_states)
    outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
    return outputs

mindnlp.transformers.models.bert.modeling_bert.BertAttention.prune_heads(heads)

prune heads

Source code in mindnlp/transformers/models/bert/modeling_bert.py
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
def prune_heads(self, heads):
    """prune heads"""
    if len(heads) == 0:
        return
    heads, index = find_pruneable_heads_and_indices(
        heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
    )

    # Prune linear layers
    self.self.query = prune_linear_layer(self.self.query, index)
    self.self.key = prune_linear_layer(self.self.key, index)
    self.self.value = prune_linear_layer(self.self.value, index)
    self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

    # Update hyper params and store pruned heads
    self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
    self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
    self.pruned_heads = self.pruned_heads.union(heads)

mindnlp.transformers.models.bert.modeling_bert.BertDualAttention

Bases: Module

This class represents a BertDualAttention module that inherits from nn.Module. It contains methods for initializing the module, pruning attention heads, and forwarding the attention mechanism for BERT models.

ATTRIBUTE DESCRIPTION
config

Configuration for the BertDualAttention module.

position_embedding_type

Type of position embedding to be used (optional).

METHOD DESCRIPTION
__init__

Initializes the BertDualAttention module with the given configuration and position embedding type.

prune_heads

Prunes the specified attention heads from the self-attention mechanism.

encoder_attention_mask=None, past_key_value=None, output_attentions=False)

Constructs the attention mechanism for BERT models using the provided inputs and past key values.

RAISES DESCRIPTION
ValueError

If the number of heads to be pruned is invalid.

RETURNS DESCRIPTION
outputs

Tuple containing the attention output and optional additional outputs.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
class BertDualAttention(nn.Module):

    """
    This class represents a BertDualAttention module that inherits from nn.Module.
    It contains methods for initializing the module, pruning attention heads,
    and forwarding the attention mechanism for BERT models.

    Attributes:
        config: Configuration for the BertDualAttention module.
        position_embedding_type: Type of position embedding to be used (optional).

    Methods:
        __init__(self, config, position_embedding_type=None):
            Initializes the BertDualAttention module with the given configuration and position embedding type.

        prune_heads(self, heads):
            Prunes the specified attention heads from the self-attention mechanism.

        forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None,
        encoder_attention_mask=None, past_key_value=None, output_attentions=False):
            Constructs the attention mechanism for BERT models using the provided inputs and past key values.

    Raises:
        ValueError: If the number of heads to be pruned is invalid.

    Returns:
        outputs: Tuple containing the attention output and optional additional outputs.
    """
    def __init__(self, config, position_embedding_type=None):
        """
        Initializes the BertDualAttention class with the provided configuration and position embedding type.

        Args:
            self (object): The instance of the class.
            config (object): The configuration object containing settings for the dual attention mechanism.
            position_embedding_type (str, optional): The type of position embedding to be used. Default is None.

        Returns:
            None: This method does not return any value.

        Raises:
            None
        """
        super().__init__()
        self.self = BertDualSelfAttention(config, position_embedding_type=position_embedding_type)
        self.output = BertDualSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        """prune heads"""
        if len(heads) == 0:
            return
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

        # Update hyper params and store pruned heads
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(
        self,
        hidden_states: mindspore.Tensor,
        attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
        output_attentions: Optional[bool] = False,
    ):
        """
        Constructs the attention mechanism for the BertDualAttention class.

        Args:
            self (BertDualAttention): The instance of the BertDualAttention class.
            hidden_states (mindspore.Tensor):
                The input hidden states tensor of shape (batch_size, seq_length, hidden_size).
            attention_mask (Optional[mindspore.Tensor]):
                The attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length).
                Defaults to None.
            head_mask (Optional[mindspore.Tensor]):
                The head mask tensor of shape (num_heads, seq_length, seq_length). Defaults to None.
            encoder_hidden_states (Optional[mindspore.Tensor]):
                The encoder hidden states tensor of shape (batch_size, seq_length, hidden_size). Defaults to None.
            encoder_attention_mask (Optional[mindspore.Tensor]):
                The encoder attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length).
                Defaults to None.
            past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]):
                The previous key-value pairs tensor. Defaults to None.
            output_attentions (Optional[bool]): Whether to output the attention weights. Defaults to False.

        Returns:
            outputs (tuple):
                A tuple containing the attention output tensor of shape (batch_size, seq_length, hidden_size)
                and any additional outputs.

        Raises:
            None
        """
        self_outputs = self.self(
            hidden_states,
            attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
        return outputs

mindnlp.transformers.models.bert.modeling_bert.BertDualAttention.__init__(config, position_embedding_type=None)

Initializes the BertDualAttention class with the provided configuration and position embedding type.

PARAMETER DESCRIPTION
self

The instance of the class.

TYPE: object

config

The configuration object containing settings for the dual attention mechanism.

TYPE: object

position_embedding_type

The type of position embedding to be used. Default is None.

TYPE: str DEFAULT: None

RETURNS DESCRIPTION
None

This method does not return any value.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
def __init__(self, config, position_embedding_type=None):
    """
    Initializes the BertDualAttention class with the provided configuration and position embedding type.

    Args:
        self (object): The instance of the class.
        config (object): The configuration object containing settings for the dual attention mechanism.
        position_embedding_type (str, optional): The type of position embedding to be used. Default is None.

    Returns:
        None: This method does not return any value.

    Raises:
        None
    """
    super().__init__()
    self.self = BertDualSelfAttention(config, position_embedding_type=position_embedding_type)
    self.output = BertDualSelfOutput(config)
    self.pruned_heads = set()

mindnlp.transformers.models.bert.modeling_bert.BertDualAttention.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False)

Constructs the attention mechanism for the BertDualAttention class.

PARAMETER DESCRIPTION
self

The instance of the BertDualAttention class.

TYPE: BertDualAttention

hidden_states

The input hidden states tensor of shape (batch_size, seq_length, hidden_size).

TYPE: Tensor

attention_mask

The attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length). Defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

The head mask tensor of shape (num_heads, seq_length, seq_length). Defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

encoder_hidden_states

The encoder hidden states tensor of shape (batch_size, seq_length, hidden_size). Defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

encoder_attention_mask

The encoder attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length). Defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

past_key_value

The previous key-value pairs tensor. Defaults to None.

TYPE: Optional[Tuple[Tuple[Tensor]]] DEFAULT: None

output_attentions

Whether to output the attention weights. Defaults to False.

TYPE: Optional[bool] DEFAULT: False

RETURNS DESCRIPTION
outputs

A tuple containing the attention output tensor of shape (batch_size, seq_length, hidden_size) and any additional outputs.

TYPE: tuple

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
def forward(
    self,
    hidden_states: mindspore.Tensor,
    attention_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
    output_attentions: Optional[bool] = False,
):
    """
    Constructs the attention mechanism for the BertDualAttention class.

    Args:
        self (BertDualAttention): The instance of the BertDualAttention class.
        hidden_states (mindspore.Tensor):
            The input hidden states tensor of shape (batch_size, seq_length, hidden_size).
        attention_mask (Optional[mindspore.Tensor]):
            The attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length).
            Defaults to None.
        head_mask (Optional[mindspore.Tensor]):
            The head mask tensor of shape (num_heads, seq_length, seq_length). Defaults to None.
        encoder_hidden_states (Optional[mindspore.Tensor]):
            The encoder hidden states tensor of shape (batch_size, seq_length, hidden_size). Defaults to None.
        encoder_attention_mask (Optional[mindspore.Tensor]):
            The encoder attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length).
            Defaults to None.
        past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]):
            The previous key-value pairs tensor. Defaults to None.
        output_attentions (Optional[bool]): Whether to output the attention weights. Defaults to False.

    Returns:
        outputs (tuple):
            A tuple containing the attention output tensor of shape (batch_size, seq_length, hidden_size)
            and any additional outputs.

    Raises:
        None
    """
    self_outputs = self.self(
        hidden_states,
        attention_mask,
        head_mask,
        encoder_hidden_states,
        encoder_attention_mask,
        past_key_value,
        output_attentions,
    )
    attention_output = self.output(self_outputs[0], hidden_states)
    outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
    return outputs

mindnlp.transformers.models.bert.modeling_bert.BertDualAttention.prune_heads(heads)

prune heads

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
def prune_heads(self, heads):
    """prune heads"""
    if len(heads) == 0:
        return
    heads, index = find_pruneable_heads_and_indices(
        heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
    )

    # Prune linear layers
    self.self.query = prune_linear_layer(self.self.query, index)
    self.self.key = prune_linear_layer(self.self.key, index)
    self.self.value = prune_linear_layer(self.self.value, index)
    self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

    # Update hyper params and store pruned heads
    self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
    self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
    self.pruned_heads = self.pruned_heads.union(heads)

mindnlp.transformers.models.bert.modeling_bert.BertDualEncoder

Bases: Module

The BertDualEncoder class represents a dual encoder model based on the BERT architecture. This class inherits from the nn.Module class in MindSpore.

ATTRIBUTE DESCRIPTION
config

The configuration parameters for the model.

layer

A list of BertDualLayer instances representing the stacked layers in the encoder.

gradient_checkpointing

A boolean indicating whether gradient checkpointing is enabled in the model.

METHOD DESCRIPTION
__init__

Initializes the BertDualEncoder instance with the provided configuration.

forward

Constructs the dual encoder model with the given input tensors and parameters. Returns the final hidden states, past key values, hidden states at all layers, self-attentions at all layers, and cross-attentions at all layers.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
class BertDualEncoder(nn.Module):

    """
    The BertDualEncoder class represents a dual encoder model based on the BERT architecture.
    This class inherits from the nn.Module class in MindSpore.

    Attributes:
        config: The configuration parameters for the model.
        layer: A list of BertDualLayer instances representing the stacked layers in the encoder.
        gradient_checkpointing: A boolean indicating whether gradient checkpointing is enabled in the model.

    Methods:
        __init__(self, config):
            Initializes the BertDualEncoder instance with the provided configuration.

        forward(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict):
            Constructs the dual encoder model with the given input tensors and parameters.
            Returns the final hidden states, past key values, hidden states at all layers,
            self-attentions at all layers, and cross-attentions at all layers.
    """
    def __init__(self, config):
        """Initialize the BertDualEncoder class.

        Args:
            self: The instance of the BertDualEncoder class.
            config:
                A dictionary containing the configuration parameters for the BertDualEncoder.

                - Type: dict
                - Purpose: Specifies the configuration settings for the BertDualEncoder.
                - Restrictions: Must be a valid dictionary object.

        Returns:
            None.

        Raises:
            None.
        """
        super().__init__()
        self.config = config
        self.layer = nn.ModuleList([BertDualLayer(config) for _ in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False

    def forward(
        self,
        hidden_states: mindspore.Tensor,
        attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = False,
        output_hidden_states: Optional[bool] = False,
        return_dict: Optional[bool] = True,
    ):
        """
        This method forwards the BertDualEncoder model.

        Args:
            self: The object instance.
            hidden_states (mindspore.Tensor): The input hidden states tensor.
            attention_mask (Optional[mindspore.Tensor]): Mask indicating which elements in the input should be attended to.
            head_mask (Optional[mindspore.Tensor]): Mask for attention heads.
            encoder_hidden_states (Optional[mindspore.Tensor]): Hidden states from the encoder.
            encoder_attention_mask (Optional[mindspore.Tensor]): Mask for encoder attention.
            past_key_values (Optional[Tuple[Tuple[mindspore.Tensor]]]): Past key values for caching.
            use_cache (Optional[bool]): Flag indicating whether to use caching.
            output_attentions (Optional[bool]): Flag indicating whether to output attentions.
            output_hidden_states (Optional[bool]): Flag indicating whether to output hidden states.
            return_dict (Optional[bool]): Flag indicating whether to return a dictionary.

        Returns:
            None.

        Raises:
            None
        """
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None

        next_decoder_cache = () if use_cache else None
        for i, layer_module in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            layer_head_mask = head_mask[i] if head_mask is not None else None
            past_key_value = past_key_values[i] if past_key_values is not None else None
            layer_outputs = layer_module(
                hidden_states,
                attention_mask,
                layer_head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
                past_key_value,
                output_attentions,
            )
            hidden_states = layer_outputs[0]
            if use_cache:
                next_decoder_cache += (layer_outputs[-1],)
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],)

        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        if not return_dict:
            return tuple(
                v
                for v in [
                    hidden_states,
                    next_decoder_cache,
                    all_hidden_states,
                    all_self_attentions,
                    all_cross_attentions,
                ]
                if v is not None
            )
        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=hidden_states,
            past_key_values=next_decoder_cache,
            hidden_states=all_hidden_states,
            attentions=all_self_attentions,
            cross_attentions=all_cross_attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertDualEncoder.__init__(config)

Initialize the BertDualEncoder class.

PARAMETER DESCRIPTION
self

The instance of the BertDualEncoder class.

config

A dictionary containing the configuration parameters for the BertDualEncoder.

  • Type: dict
  • Purpose: Specifies the configuration settings for the BertDualEncoder.
  • Restrictions: Must be a valid dictionary object.

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
def __init__(self, config):
    """Initialize the BertDualEncoder class.

    Args:
        self: The instance of the BertDualEncoder class.
        config:
            A dictionary containing the configuration parameters for the BertDualEncoder.

            - Type: dict
            - Purpose: Specifies the configuration settings for the BertDualEncoder.
            - Restrictions: Must be a valid dictionary object.

    Returns:
        None.

    Raises:
        None.
    """
    super().__init__()
    self.config = config
    self.layer = nn.ModuleList([BertDualLayer(config) for _ in range(config.num_hidden_layers)])
    self.gradient_checkpointing = False

mindnlp.transformers.models.bert.modeling_bert.BertDualEncoder.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True)

This method forwards the BertDualEncoder model.

PARAMETER DESCRIPTION
self

The object instance.

hidden_states

The input hidden states tensor.

TYPE: Tensor

attention_mask

Mask indicating which elements in the input should be attended to.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

Mask for attention heads.

TYPE: Optional[Tensor] DEFAULT: None

encoder_hidden_states

Hidden states from the encoder.

TYPE: Optional[Tensor] DEFAULT: None

encoder_attention_mask

Mask for encoder attention.

TYPE: Optional[Tensor] DEFAULT: None

past_key_values

Past key values for caching.

TYPE: Optional[Tuple[Tuple[Tensor]]] DEFAULT: None

use_cache

Flag indicating whether to use caching.

TYPE: Optional[bool] DEFAULT: None

output_attentions

Flag indicating whether to output attentions.

TYPE: Optional[bool] DEFAULT: False

output_hidden_states

Flag indicating whether to output hidden states.

TYPE: Optional[bool] DEFAULT: False

return_dict

Flag indicating whether to return a dictionary.

TYPE: Optional[bool] DEFAULT: True

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
def forward(
    self,
    hidden_states: mindspore.Tensor,
    attention_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = False,
    output_hidden_states: Optional[bool] = False,
    return_dict: Optional[bool] = True,
):
    """
    This method forwards the BertDualEncoder model.

    Args:
        self: The object instance.
        hidden_states (mindspore.Tensor): The input hidden states tensor.
        attention_mask (Optional[mindspore.Tensor]): Mask indicating which elements in the input should be attended to.
        head_mask (Optional[mindspore.Tensor]): Mask for attention heads.
        encoder_hidden_states (Optional[mindspore.Tensor]): Hidden states from the encoder.
        encoder_attention_mask (Optional[mindspore.Tensor]): Mask for encoder attention.
        past_key_values (Optional[Tuple[Tuple[mindspore.Tensor]]]): Past key values for caching.
        use_cache (Optional[bool]): Flag indicating whether to use caching.
        output_attentions (Optional[bool]): Flag indicating whether to output attentions.
        output_hidden_states (Optional[bool]): Flag indicating whether to output hidden states.
        return_dict (Optional[bool]): Flag indicating whether to return a dictionary.

    Returns:
        None.

    Raises:
        None
    """
    all_hidden_states = () if output_hidden_states else None
    all_self_attentions = () if output_attentions else None
    all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None

    next_decoder_cache = () if use_cache else None
    for i, layer_module in enumerate(self.layer):
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        layer_head_mask = head_mask[i] if head_mask is not None else None
        past_key_value = past_key_values[i] if past_key_values is not None else None
        layer_outputs = layer_module(
            hidden_states,
            attention_mask,
            layer_head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        hidden_states = layer_outputs[0]
        if use_cache:
            next_decoder_cache += (layer_outputs[-1],)
        if output_attentions:
            all_self_attentions = all_self_attentions + (layer_outputs[1],)
            if self.config.add_cross_attention:
                all_cross_attentions = all_cross_attentions + (layer_outputs[2],)

    if output_hidden_states:
        all_hidden_states = all_hidden_states + (hidden_states,)

    if not return_dict:
        return tuple(
            v
            for v in [
                hidden_states,
                next_decoder_cache,
                all_hidden_states,
                all_self_attentions,
                all_cross_attentions,
            ]
            if v is not None
        )
    return BaseModelOutputWithPastAndCrossAttentions(
        last_hidden_state=hidden_states,
        past_key_values=next_decoder_cache,
        hidden_states=all_hidden_states,
        attentions=all_self_attentions,
        cross_attentions=all_cross_attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertDualForSequenceClassification

Bases: BertPreTrainedModel

The BertDualForSequenceClassification class represents a dual BERT model for sequence classification tasks. This class inherits from BertPreTrainedModel and provides methods for initializing the model and processing input data for sequence classification.

The init method initializes the BertDualForSequenceClassification instance by setting the number of labels, BERT model configuration, dropout, and classifier layers.

The forward method processes input data for sequence classification using the BERT model. It accepts input tensors such as input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, and additional parameters for controlling the output format. The method returns the classification logits and can also calculate the loss based on the problem type and labels provided.

Note

This docstring is based on the provided code and may need to be updated with additional information about the class attributes, methods, and usage.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
class BertDualForSequenceClassification(BertPreTrainedModel):

    """
    The BertDualForSequenceClassification class represents a dual BERT model for sequence classification tasks.
    This class inherits from BertPreTrainedModel and provides methods for initializing the model and processing
    input data for sequence classification.

    The __init__ method initializes the BertDualForSequenceClassification instance by setting the number of labels,
    BERT model configuration, dropout, and classifier layers.

    The forward method processes input data for sequence classification using the BERT model.
    It accepts input tensors such as input_ids, attention_mask, token_type_ids, position_ids, head_mask,
    inputs_embeds, labels, and additional parameters for controlling the output format.
    The method returns the classification logits and can also calculate the loss based on the problem type
    and labels provided.

    Note:
        This docstring is based on the provided code and may need to be updated with additional information
        about the class attributes, methods, and usage.
    """
    def __init__(self, config):
        """
        Initializes a new instance of the BertDualForSequenceClassification class.

        Args:
            self: The instance of the class.
            config:
                An instance of the configuration class containing the model configuration parameters.

                - Type: config class
                - Purpose: Specifies the configuration parameters for the model.
                - Restrictions: Must be a valid instance of the configuration class.

        Returns:
            None.

        Raises:
            TypeError: If the config parameter is not of the expected type.
            ValueError: If the config.num_labels is not provided or is invalid.
            AttributeError: If the required attributes are not found in the config object.
            RuntimeError: If an error occurs during model initialization or post-initialization.
        """
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config

        self.bert = BertDualModel(config)
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        """
        This method forwards a dual BERT model for sequence classification.

        Args:
            self (BertDualForSequenceClassification): The instance of the BertDualForSequenceClassification class.
            input_ids (Optional[mindspore.Tensor]): The input token IDs representing the sequences. Default is None.
            attention_mask (Optional[mindspore.Tensor]): The attention mask to avoid attending to padding tokens. Default is None.
            token_type_ids (Optional[mindspore.Tensor]): The token type IDs to distinguish different sequences in the input. Default is None.
            position_ids (Optional[mindspore.Tensor]): The position IDs to specify the position of each token in the input. Default is None.
            head_mask (Optional[mindspore.Tensor]): The head mask to nullify selected heads of the self-attention mechanism. Default is None.
            inputs_embeds (Optional[mindspore.Tensor]): The embedded representation of the input sequences. Default is None.
            labels (Optional[mindspore.Tensor]): The labels for the input sequences. Default is None.
            output_attentions (Optional[bool]): Whether to return the attentions of all layers. Default is None.
            output_hidden_states (Optional[bool]): Whether to return the hidden states of all layers. Default is None.
            return_dict (Optional[bool]): Whether to return outputs as a dictionary. Default is None.

        Returns:
            None.

        Raises:
            ValueError: If the provided problem type is not supported or recognized.
            RuntimeError: If the number of labels is not compatible with the problem type.
            NotImplementedError: If the problem type is not implemented.

        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and labels.dtype in (mindspore.int32, mindspore.int64):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            if self.config.problem_type == "regression":
                if self.num_labels == 1:
                    loss = ops.mse_loss(logits.squeeze(), labels.squeeze())
                else:
                    loss = ops.mse_loss(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                loss = F.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                loss = ops.binary_cross_entropy_with_logits(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertDualForSequenceClassification.__init__(config)

Initializes a new instance of the BertDualForSequenceClassification class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

An instance of the configuration class containing the model configuration parameters.

  • Type: config class
  • Purpose: Specifies the configuration parameters for the model.
  • Restrictions: Must be a valid instance of the configuration class.

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
TypeError

If the config parameter is not of the expected type.

ValueError

If the config.num_labels is not provided or is invalid.

AttributeError

If the required attributes are not found in the config object.

RuntimeError

If an error occurs during model initialization or post-initialization.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
def __init__(self, config):
    """
    Initializes a new instance of the BertDualForSequenceClassification class.

    Args:
        self: The instance of the class.
        config:
            An instance of the configuration class containing the model configuration parameters.

            - Type: config class
            - Purpose: Specifies the configuration parameters for the model.
            - Restrictions: Must be a valid instance of the configuration class.

    Returns:
        None.

    Raises:
        TypeError: If the config parameter is not of the expected type.
        ValueError: If the config.num_labels is not provided or is invalid.
        AttributeError: If the required attributes are not found in the config object.
        RuntimeError: If an error occurs during model initialization or post-initialization.
    """
    super().__init__(config)
    self.num_labels = config.num_labels
    self.config = config

    self.bert = BertDualModel(config)
    classifier_dropout = (
        config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
    )
    self.dropout = nn.Dropout(classifier_dropout)
    self.classifier = nn.Linear(config.hidden_size, config.num_labels)

    # Initialize weights and apply final processing
    self.post_init()

mindnlp.transformers.models.bert.modeling_bert.BertDualForSequenceClassification.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

This method forwards a dual BERT model for sequence classification.

PARAMETER DESCRIPTION
self

The instance of the BertDualForSequenceClassification class.

TYPE: BertDualForSequenceClassification

input_ids

The input token IDs representing the sequences. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

attention_mask

The attention mask to avoid attending to padding tokens. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

token_type_ids

The token type IDs to distinguish different sequences in the input. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

position_ids

The position IDs to specify the position of each token in the input. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

The head mask to nullify selected heads of the self-attention mechanism. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

inputs_embeds

The embedded representation of the input sequences. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

labels

The labels for the input sequences. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

output_attentions

Whether to return the attentions of all layers. Default is None.

TYPE: Optional[bool] DEFAULT: None

output_hidden_states

Whether to return the hidden states of all layers. Default is None.

TYPE: Optional[bool] DEFAULT: None

return_dict

Whether to return outputs as a dictionary. Default is None.

TYPE: Optional[bool] DEFAULT: None

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

If the provided problem type is not supported or recognized.

RuntimeError

If the number of labels is not compatible with the problem type.

NotImplementedError

If the problem type is not implemented.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
):
    """
    This method forwards a dual BERT model for sequence classification.

    Args:
        self (BertDualForSequenceClassification): The instance of the BertDualForSequenceClassification class.
        input_ids (Optional[mindspore.Tensor]): The input token IDs representing the sequences. Default is None.
        attention_mask (Optional[mindspore.Tensor]): The attention mask to avoid attending to padding tokens. Default is None.
        token_type_ids (Optional[mindspore.Tensor]): The token type IDs to distinguish different sequences in the input. Default is None.
        position_ids (Optional[mindspore.Tensor]): The position IDs to specify the position of each token in the input. Default is None.
        head_mask (Optional[mindspore.Tensor]): The head mask to nullify selected heads of the self-attention mechanism. Default is None.
        inputs_embeds (Optional[mindspore.Tensor]): The embedded representation of the input sequences. Default is None.
        labels (Optional[mindspore.Tensor]): The labels for the input sequences. Default is None.
        output_attentions (Optional[bool]): Whether to return the attentions of all layers. Default is None.
        output_hidden_states (Optional[bool]): Whether to return the hidden states of all layers. Default is None.
        return_dict (Optional[bool]): Whether to return outputs as a dictionary. Default is None.

    Returns:
        None.

    Raises:
        ValueError: If the provided problem type is not supported or recognized.
        RuntimeError: If the number of labels is not compatible with the problem type.
        NotImplementedError: If the problem type is not implemented.

    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    pooled_output = outputs[1]

    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)

    loss = None
    if labels is not None:
        if self.config.problem_type is None:
            if self.num_labels == 1:
                self.config.problem_type = "regression"
            elif self.num_labels > 1 and labels.dtype in (mindspore.int32, mindspore.int64):
                self.config.problem_type = "single_label_classification"
            else:
                self.config.problem_type = "multi_label_classification"

        if self.config.problem_type == "regression":
            if self.num_labels == 1:
                loss = ops.mse_loss(logits.squeeze(), labels.squeeze())
            else:
                loss = ops.mse_loss(logits, labels)
        elif self.config.problem_type == "single_label_classification":
            loss = F.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
        elif self.config.problem_type == "multi_label_classification":
            loss = ops.binary_cross_entropy_with_logits(logits, labels)
    if not return_dict:
        output = (logits,) + outputs[2:]
        return ((loss,) + output) if loss is not None else output

    return SequenceClassifierOutput(
        loss=loss,
        logits=logits,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertDualIntermediate

Bases: Module

This class represents a dual intermediate layer in a BERT model.

The BertDualIntermediate class is a subclass of nn.Module and is used to forward the dual intermediate layer in a BERT model. It takes in a configuration object as input, which specifies the hidden size and intermediate size. The class initializes the hidden size and intermediate size attributes based on the provided configuration.

ATTRIBUTE DESCRIPTION
hidden_size

The size of the hidden state in the dual intermediate layer.

TYPE: int

intermediate_size

The size of the intermediate state in the dual intermediate layer.

TYPE: int

dense

A dense layer that transforms the input hidden states.

TYPE: Dense

intermediate_act_fn

The activation function to be applied to the intermediate states.

TYPE: function

METHOD DESCRIPTION
forward

Constructs the dual intermediate layer using the given hidden states as input. The method first splits the input hidden states into two channels: hidden_states_r and hidden_states_d. Then, it combines the two channels into a single input using the to_2channel function. The combined input is passed through the dense layer. The resulting intermediate states are then split back into two channels: hidden_states_r and hidden_states_d. Finally, the two channels are concatenated and passed through the intermediate activation function. The method returns the resulting hidden states.

Note
  • This class assumes that the given configuration object contains the necessary parameters for initialization.
  • The intermediate activation function can be either a string representing a predefined activation function or a custom activation function.
Example
>>> # Create a configuration object
>>> config = {
>>>     'hidden_size': 768,
>>>     'intermediate_size': 3072,
>>>     'hidden_act': 'gelu'
>>> }
...
>>> # Create an instance of the BertDualIntermediate class
>>> dual_intermediate = BertDualIntermediate(config)
...
>>> # Use the dual_intermediate instance to forward the dual intermediate layer
>>> hidden_states = ... # input hidden states
>>> output = dual_intermediate.forward(hidden_states)
Source code in mindnlp/transformers/models/bert/modeling_bert.py
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
class BertDualIntermediate(nn.Module):

    """
    This class represents a dual intermediate layer in a BERT model.

    The BertDualIntermediate class is a subclass of nn.Module and is used to forward the dual intermediate layer in a BERT model.
    It takes in a configuration object as input, which specifies the hidden size and intermediate size.
    The class initializes the hidden size and intermediate size attributes based on the provided configuration.

    Attributes:
        hidden_size (int): The size of the hidden state in the dual intermediate layer.
        intermediate_size (int): The size of the intermediate state in the dual intermediate layer.
        dense (Dense): A dense layer that transforms the input hidden states.
        intermediate_act_fn (function): The activation function to be applied to the intermediate states.

    Methods:
        forward(hidden_states):
            Constructs the dual intermediate layer using the given hidden states as input.
            The method first splits the input hidden states into two channels: hidden_states_r and hidden_states_d.
            Then, it combines the two channels into a single input using the to_2channel function.
            The combined input is passed through the dense layer.
            The resulting intermediate states are then split back into two channels: hidden_states_r and hidden_states_d.
            Finally, the two channels are concatenated and passed through the intermediate activation function.
            The method returns the resulting hidden states.

    Note:
        - This class assumes that the given configuration object contains the necessary parameters for initialization.
        - The intermediate activation function can be either a string representing a predefined activation function or a custom activation function.

    Example:
        ```python
        >>> # Create a configuration object
        >>> config = {
        >>>     'hidden_size': 768,
        >>>     'intermediate_size': 3072,
        >>>     'hidden_act': 'gelu'
        >>> }
        ...
        >>> # Create an instance of the BertDualIntermediate class
        >>> dual_intermediate = BertDualIntermediate(config)
        ...
        >>> # Use the dual_intermediate instance to forward the dual intermediate layer
        >>> hidden_states = ... # input hidden states
        >>> output = dual_intermediate.forward(hidden_states)
        ```
    """
    def __init__(self, config):
        """
        Initializes an instance of the BertDualIntermediate class.

        Args:
            self: The current object instance.
            config:
                An object of the configuration class that holds the configuration settings.

                - Type: object
                - Purpose: To provide the necessary configuration for initializing the BertDualIntermediate instance.
                - Restrictions: None

        Returns:
            None.

        Raises:
            None.
        """
        super().__init__()
        self.hidden_size = config.hidden_size
        self.intermediate_size = config.intermediate_size
        self.dense = Dense(config.hidden_size//2, config.intermediate_size//2)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states):
        """
        The forward method in the BertDualIntermediate class processes the hidden_states tensor to produce an intermediate representation.

        Args:
            self (BertDualIntermediate): The instance of the BertDualIntermediate class.
            hidden_states (tensor): A tensor of shape (batch_size, sequence_length, hidden_size)
                representing the hidden states of the input sequence. The hidden_size is expected to be an even number.

        Returns:
            None: This method does not return any value. The input hidden_states tensor is modified in place.

        Raises:
            ValueError: If the hidden_states tensor does not have the expected shape or if the hidden_size is not an even number.
            RuntimeError: If any runtime error occurs during the processing of hidden_states.
        """
        hidden_states_r = hidden_states[:,:,:self.hidden_size//2]
        hidden_states_d = hidden_states[:,:,self.hidden_size//2:]
        hidden_states = to_2channel(hidden_states_r, hidden_states_d)
        hidden_states = self.dense(hidden_states)
        hidden_states_r, hidden_states_d = get_x_and_y(hidden_states)
        hidden_states = ops.cat([hidden_states_r, hidden_states_d], -1)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

mindnlp.transformers.models.bert.modeling_bert.BertDualIntermediate.__init__(config)

Initializes an instance of the BertDualIntermediate class.

PARAMETER DESCRIPTION
self

The current object instance.

config

An object of the configuration class that holds the configuration settings.

  • Type: object
  • Purpose: To provide the necessary configuration for initializing the BertDualIntermediate instance.
  • Restrictions: None

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
def __init__(self, config):
    """
    Initializes an instance of the BertDualIntermediate class.

    Args:
        self: The current object instance.
        config:
            An object of the configuration class that holds the configuration settings.

            - Type: object
            - Purpose: To provide the necessary configuration for initializing the BertDualIntermediate instance.
            - Restrictions: None

    Returns:
        None.

    Raises:
        None.
    """
    super().__init__()
    self.hidden_size = config.hidden_size
    self.intermediate_size = config.intermediate_size
    self.dense = Dense(config.hidden_size//2, config.intermediate_size//2)
    if isinstance(config.hidden_act, str):
        self.intermediate_act_fn = ACT2FN[config.hidden_act]
    else:
        self.intermediate_act_fn = config.hidden_act

mindnlp.transformers.models.bert.modeling_bert.BertDualIntermediate.forward(hidden_states)

The forward method in the BertDualIntermediate class processes the hidden_states tensor to produce an intermediate representation.

PARAMETER DESCRIPTION
self

The instance of the BertDualIntermediate class.

TYPE: BertDualIntermediate

hidden_states

A tensor of shape (batch_size, sequence_length, hidden_size) representing the hidden states of the input sequence. The hidden_size is expected to be an even number.

TYPE: tensor

RETURNS DESCRIPTION
None

This method does not return any value. The input hidden_states tensor is modified in place.

RAISES DESCRIPTION
ValueError

If the hidden_states tensor does not have the expected shape or if the hidden_size is not an even number.

RuntimeError

If any runtime error occurs during the processing of hidden_states.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
def forward(self, hidden_states):
    """
    The forward method in the BertDualIntermediate class processes the hidden_states tensor to produce an intermediate representation.

    Args:
        self (BertDualIntermediate): The instance of the BertDualIntermediate class.
        hidden_states (tensor): A tensor of shape (batch_size, sequence_length, hidden_size)
            representing the hidden states of the input sequence. The hidden_size is expected to be an even number.

    Returns:
        None: This method does not return any value. The input hidden_states tensor is modified in place.

    Raises:
        ValueError: If the hidden_states tensor does not have the expected shape or if the hidden_size is not an even number.
        RuntimeError: If any runtime error occurs during the processing of hidden_states.
    """
    hidden_states_r = hidden_states[:,:,:self.hidden_size//2]
    hidden_states_d = hidden_states[:,:,self.hidden_size//2:]
    hidden_states = to_2channel(hidden_states_r, hidden_states_d)
    hidden_states = self.dense(hidden_states)
    hidden_states_r, hidden_states_d = get_x_and_y(hidden_states)
    hidden_states = ops.cat([hidden_states_r, hidden_states_d], -1)
    hidden_states = self.intermediate_act_fn(hidden_states)
    return hidden_states

mindnlp.transformers.models.bert.modeling_bert.BertDualLayer

Bases: Module

BertDualLayer

This class represents a layer in a dual-attention BERT model. It is a subclass of nn.Module and is responsible for performing attention and feed-forward operations.

ATTRIBUTE DESCRIPTION
chunk_size_feed_forward

The size of chunks for feed-forward operation.

TYPE: int

seq_len_dim

The dimension of the sequence length.

TYPE: int

attention

The attention module used for self-attention.

TYPE: BertDualAttention

is_decoder

Indicates whether the layer is used as a decoder model.

TYPE: bool

add_cross_attention

Indicates whether cross-attention is added.

TYPE: bool

crossattention

The attention module used for cross-attention (if add_cross_attention is True).

TYPE: BertAttention

intermediate

The intermediate module used in the feed-forward operation.

TYPE: BertDualIntermediate

output

The output module used in the feed-forward operation.

TYPE: BertDualOutput

METHOD DESCRIPTION
forward

Constructs the layer by performing attention and feed-forward operations.

feed_forward_chunk

Performs the feed-forward operation on a chunk of attention output.

Note

The class assumes that the imported modules (BertDualAttention, BertAttention, BertDualIntermediate, BertDualOutput) are available and properly implemented.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
class BertDualLayer(nn.Module):

    """
    BertDualLayer

    This class represents a layer in a dual-attention BERT model.
    It is a subclass of nn.Module and is responsible for performing attention and feed-forward operations.

    Attributes:
        chunk_size_feed_forward (int): The size of chunks for feed-forward operation.
        seq_len_dim (int): The dimension of the sequence length.
        attention (BertDualAttention): The attention module used for self-attention.
        is_decoder (bool): Indicates whether the layer is used as a decoder model.
        add_cross_attention (bool): Indicates whether cross-attention is added.
        crossattention (BertAttention): The attention module used for cross-attention (if add_cross_attention is True).
        intermediate (BertDualIntermediate): The intermediate module used in the feed-forward operation.
        output (BertDualOutput): The output module used in the feed-forward operation.

    Methods:
        forward(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions):
            Constructs the layer by performing attention and feed-forward operations.

        feed_forward_chunk(attention_output):
            Performs the feed-forward operation on a chunk of attention output.

    Note:
        The class assumes that the imported modules (BertDualAttention, BertAttention, BertDualIntermediate, BertDualOutput)
        are available and properly implemented.
    """
    def __init__(self, config):
        """
        Initializes a new instance of the BertDualLayer class.

        Args:
            self: The object instance.
            config (object):
                The configuration object that contains the settings for the BertDualLayer.

                - chunk_size_feed_forward (int): The chunk size for feed-forward attention.
                - is_decoder (bool): Indicates whether the model is a decoder.
                - add_cross_attention (bool): Indicates whether cross attention is added.
                    Raises a ValueError if cross attention is added and the model is not a decoder.
                - position_embedding_type (str): The type of position embedding for cross attention.

        Returns:
            None

        Raises:
            ValueError: If cross attention is added and the model is not a decoder.
        """
        super().__init__()
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1
        self.attention = BertDualAttention(config)
        self.is_decoder = config.is_decoder
        self.add_cross_attention = config.add_cross_attention
        if self.add_cross_attention:
            if not self.is_decoder:
                raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
            self.crossattention = BertAttention(config, position_embedding_type="absolute")
        self.intermediate = BertDualIntermediate(config)
        self.output = BertDualOutput(config)

    def forward(
        self,
        hidden_states: mindspore.Tensor,
        attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
        output_attentions: Optional[bool] = False,
    ):
        """
        This method forwards a BertDualLayer by performing self-attention and potentially cross-attention operations.

        Args:
            self: The instance of the BertDualLayer class.
            hidden_states (mindspore.Tensor): The input hidden states to be processed.
            attention_mask (Optional[mindspore.Tensor]): An optional tensor specifying which elements should be attended to.
            head_mask (Optional[mindspore.Tensor]): An optional tensor providing a mask for the attention heads.
            encoder_hidden_states (Optional[mindspore.Tensor]): Optional hidden states from an encoder layer for cross-attention.
            encoder_attention_mask (Optional[mindspore.Tensor]): Optional attention mask for the encoder hidden states.
            past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]): Optional tuple containing the past key and value tensors.
            output_attentions (Optional[bool]): Flag indicating whether to output attention weights.

        Returns:
            None.

        Raises:
            ValueError: Raised if `encoder_hidden_states` are provided but cross-attention layers are not instantiated in the BertDualLayer instance.
        """
        # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
        self_attention_outputs = self.attention(
            hidden_states,
            attention_mask,
            head_mask,
            output_attentions=output_attentions,
            past_key_value=self_attn_past_key_value,
        )
        attention_output = self_attention_outputs[0]

        # if decoder, the last output is tuple of self-attn cache
        if self.is_decoder:
            outputs = self_attention_outputs[1:-1]
            present_key_value = self_attention_outputs[-1]
        else:
            outputs = self_attention_outputs[1:]  # add self attentions if we output attention weights

        cross_attn_present_key_value = None
        if self.is_decoder and encoder_hidden_states is not None:
            if not hasattr(self, "crossattention"):
                raise ValueError(
                    f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
                    " by setting `config.add_cross_attention=True`"
                )

            # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
            cross_attention_outputs = self.crossattention(
                attention_output,
                attention_mask,
                head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
                cross_attn_past_key_value,
                output_attentions,
            )
            attention_output = cross_attention_outputs[0]
            outputs = outputs + cross_attention_outputs[1:-1]  # add cross attentions if we output attention weights

            # add cross-attn cache to positions 3,4 of present_key_value tuple
            cross_attn_present_key_value = cross_attention_outputs[-1]
            present_key_value = present_key_value + cross_attn_present_key_value

        # layer_output = apply_chunking_to_forward(
        #     self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
        # )
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)

        outputs = (layer_output,) + outputs

        # if decoder, return the attn key/values as the last output
        if self.is_decoder:
            outputs = outputs + (present_key_value,)

        return outputs

    def feed_forward_chunk(self, attention_output):
        """feed forward chunk"""
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        return layer_output

mindnlp.transformers.models.bert.modeling_bert.BertDualLayer.__init__(config)

Initializes a new instance of the BertDualLayer class.

PARAMETER DESCRIPTION
self

The object instance.

config

The configuration object that contains the settings for the BertDualLayer.

  • chunk_size_feed_forward (int): The chunk size for feed-forward attention.
  • is_decoder (bool): Indicates whether the model is a decoder.
  • add_cross_attention (bool): Indicates whether cross attention is added. Raises a ValueError if cross attention is added and the model is not a decoder.
  • position_embedding_type (str): The type of position embedding for cross attention.

TYPE: object

RETURNS DESCRIPTION

None

RAISES DESCRIPTION
ValueError

If cross attention is added and the model is not a decoder.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
def __init__(self, config):
    """
    Initializes a new instance of the BertDualLayer class.

    Args:
        self: The object instance.
        config (object):
            The configuration object that contains the settings for the BertDualLayer.

            - chunk_size_feed_forward (int): The chunk size for feed-forward attention.
            - is_decoder (bool): Indicates whether the model is a decoder.
            - add_cross_attention (bool): Indicates whether cross attention is added.
                Raises a ValueError if cross attention is added and the model is not a decoder.
            - position_embedding_type (str): The type of position embedding for cross attention.

    Returns:
        None

    Raises:
        ValueError: If cross attention is added and the model is not a decoder.
    """
    super().__init__()
    self.chunk_size_feed_forward = config.chunk_size_feed_forward
    self.seq_len_dim = 1
    self.attention = BertDualAttention(config)
    self.is_decoder = config.is_decoder
    self.add_cross_attention = config.add_cross_attention
    if self.add_cross_attention:
        if not self.is_decoder:
            raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
        self.crossattention = BertAttention(config, position_embedding_type="absolute")
    self.intermediate = BertDualIntermediate(config)
    self.output = BertDualOutput(config)

mindnlp.transformers.models.bert.modeling_bert.BertDualLayer.feed_forward_chunk(attention_output)

feed forward chunk

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3373
3374
3375
3376
3377
def feed_forward_chunk(self, attention_output):
    """feed forward chunk"""
    intermediate_output = self.intermediate(attention_output)
    layer_output = self.output(intermediate_output, attention_output)
    return layer_output

mindnlp.transformers.models.bert.modeling_bert.BertDualLayer.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False)

This method forwards a BertDualLayer by performing self-attention and potentially cross-attention operations.

PARAMETER DESCRIPTION
self

The instance of the BertDualLayer class.

hidden_states

The input hidden states to be processed.

TYPE: Tensor

attention_mask

An optional tensor specifying which elements should be attended to.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

An optional tensor providing a mask for the attention heads.

TYPE: Optional[Tensor] DEFAULT: None

encoder_hidden_states

Optional hidden states from an encoder layer for cross-attention.

TYPE: Optional[Tensor] DEFAULT: None

encoder_attention_mask

Optional attention mask for the encoder hidden states.

TYPE: Optional[Tensor] DEFAULT: None

past_key_value

Optional tuple containing the past key and value tensors.

TYPE: Optional[Tuple[Tuple[Tensor]]] DEFAULT: None

output_attentions

Flag indicating whether to output attention weights.

TYPE: Optional[bool] DEFAULT: False

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

Raised if encoder_hidden_states are provided but cross-attention layers are not instantiated in the BertDualLayer instance.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
def forward(
    self,
    hidden_states: mindspore.Tensor,
    attention_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
    output_attentions: Optional[bool] = False,
):
    """
    This method forwards a BertDualLayer by performing self-attention and potentially cross-attention operations.

    Args:
        self: The instance of the BertDualLayer class.
        hidden_states (mindspore.Tensor): The input hidden states to be processed.
        attention_mask (Optional[mindspore.Tensor]): An optional tensor specifying which elements should be attended to.
        head_mask (Optional[mindspore.Tensor]): An optional tensor providing a mask for the attention heads.
        encoder_hidden_states (Optional[mindspore.Tensor]): Optional hidden states from an encoder layer for cross-attention.
        encoder_attention_mask (Optional[mindspore.Tensor]): Optional attention mask for the encoder hidden states.
        past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]): Optional tuple containing the past key and value tensors.
        output_attentions (Optional[bool]): Flag indicating whether to output attention weights.

    Returns:
        None.

    Raises:
        ValueError: Raised if `encoder_hidden_states` are provided but cross-attention layers are not instantiated in the BertDualLayer instance.
    """
    # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
    self_attention_outputs = self.attention(
        hidden_states,
        attention_mask,
        head_mask,
        output_attentions=output_attentions,
        past_key_value=self_attn_past_key_value,
    )
    attention_output = self_attention_outputs[0]

    # if decoder, the last output is tuple of self-attn cache
    if self.is_decoder:
        outputs = self_attention_outputs[1:-1]
        present_key_value = self_attention_outputs[-1]
    else:
        outputs = self_attention_outputs[1:]  # add self attentions if we output attention weights

    cross_attn_present_key_value = None
    if self.is_decoder and encoder_hidden_states is not None:
        if not hasattr(self, "crossattention"):
            raise ValueError(
                f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
                " by setting `config.add_cross_attention=True`"
            )

        # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
        cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
        cross_attention_outputs = self.crossattention(
            attention_output,
            attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            cross_attn_past_key_value,
            output_attentions,
        )
        attention_output = cross_attention_outputs[0]
        outputs = outputs + cross_attention_outputs[1:-1]  # add cross attentions if we output attention weights

        # add cross-attn cache to positions 3,4 of present_key_value tuple
        cross_attn_present_key_value = cross_attention_outputs[-1]
        present_key_value = present_key_value + cross_attn_present_key_value

    # layer_output = apply_chunking_to_forward(
    #     self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
    # )
    intermediate_output = self.intermediate(attention_output)
    layer_output = self.output(intermediate_output, attention_output)

    outputs = (layer_output,) + outputs

    # if decoder, return the attn key/values as the last output
    if self.is_decoder:
        outputs = outputs + (present_key_value,)

    return outputs

mindnlp.transformers.models.bert.modeling_bert.BertDualModel

Bases: BertPreTrainedModel

The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in Attention is all you need by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.

To behave as an decoder the model needs to be initialized with the is_decoder argument of the configuration set to True. To be used in a Seq2Seq model, the model needs to initialized with both is_decoder argument and add_cross_attention set to True; an encoder_hidden_states is then expected as an input to the forward pass.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
class BertDualModel(BertPreTrainedModel):
    """

    The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
    cross-attention is added between the self-attention layers, following the architecture described in [Attention is
    all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
    Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.

    To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
    to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
    `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
    """
    def __init__(self, config, add_pooling_layer=True):
        """
        Initializes an instance of the BertDualModel class.

        Args:
            self: The instance of the class.
            config (object): The configuration object that contains the settings for the model.
            add_pooling_layer (bool): A flag indicating whether to add a pooling layer. Defaults to True.

        Returns:
            None

        Raises:
            None
        """
        super().__init__(config)
        self.config = config

        self.embeddings = BertEmbeddings(config)
        self.encoder = BertDualEncoder(config)

        self.pooler = BertPooler(config) if add_pooling_layer else None

        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        """
        This method retrieves the input embeddings from the BertDualModel instance.

        Args:
            self: The instance of the BertDualModel class.

        Returns:
            None.

        Raises:
            None.

        This method retrieves the input embeddings, represented by the 'word_embeddings' attribute of the BertDualModel instance.
        The embeddings are used to encode the input data into numerical representations suitable for processing by the model.

        Note that this method does not modify any attributes or perform any calculations. It simply returns the existing input embeddings.

        Example:
            ```python
            >>> model = BertDualModel()
            >>> embeddings = model.get_input_embeddings()
            ```
        """
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        """
        Sets the input embeddings for the BertDualModel.

        Args:
            self (BertDualModel): The instance of the BertDualModel class.
            value: The input embeddings to be set for the model. Should be of type WordEmbeddings.

        Returns:
            None: This method updates the input embeddings for the BertDualModel in-place.

        Raises:
            TypeError: If the provided 'value' is not of type WordEmbeddings.
        """
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        """
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_values: Optional[List[mindspore.Tensor]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        r"""
        Args:
            encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
                the model is configured as a decoder.
            encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on the padding token indices of the encoder input.
                This mask is used in the cross-attention if the model is configured as a decoder.
                Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
            past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with
                each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
                Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
                don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
                `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
                `past_key_values`).
        """
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if self.config.is_decoder:
            use_cache = use_cache if use_cache is not None else self.config.use_cache
        else:
            use_cache = False

        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        if input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.shape
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.shape[:-1]
        else:
            raise ValueError("You have to specify either input_ids or inputs_embeds")

        batch_size, seq_length = input_shape

        # past_key_values_length
        past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0

        if attention_mask is None:
            attention_mask = ops.ones(batch_size, seq_length + past_key_values_length)

        if token_type_ids is None:
            if hasattr(self.embeddings, "token_type_ids"):
                buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.broadcast_to((batch_size, seq_length))
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = ops.zeros(*input_shape, dtype=mindspore.int64)
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)

        # If a 2D or 3D attention mask is provided for the cross-attention
        # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
        if self.config.is_decoder and encoder_hidden_states is not None:
            encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.shape
            encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
            if encoder_attention_mask is None:
                encoder_attention_mask = ops.ones(*encoder_hidden_shape)
            encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
        else:
            encoder_extended_attention_mask = None

        # Prepare head mask if needed
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)

        embedding_output = self.embeddings(
            input_ids=input_ids,
            position_ids=position_ids,
            token_type_ids=token_type_ids,
            inputs_embeds=inputs_embeds,
            past_key_values_length=past_key_values_length,
        )

        encoder_outputs = self.encoder(
            embedding_output,
            attention_mask=extended_attention_mask,
            head_mask=head_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_extended_attention_mask,
            past_key_values=past_key_values,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = encoder_outputs[0]
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None

        if not return_dict:
            return (sequence_output, pooled_output) + encoder_outputs[1:]

        return BaseModelOutputWithPoolingAndCrossAttentions(
            last_hidden_state=sequence_output,
            pooler_output=pooled_output,
            past_key_values=encoder_outputs.past_key_values,
            hidden_states=encoder_outputs.hidden_states,
            attentions=encoder_outputs.attentions,
            cross_attentions=encoder_outputs.cross_attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertDualModel.__init__(config, add_pooling_layer=True)

Initializes an instance of the BertDualModel class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

The configuration object that contains the settings for the model.

TYPE: object

add_pooling_layer

A flag indicating whether to add a pooling layer. Defaults to True.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION

None

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
def __init__(self, config, add_pooling_layer=True):
    """
    Initializes an instance of the BertDualModel class.

    Args:
        self: The instance of the class.
        config (object): The configuration object that contains the settings for the model.
        add_pooling_layer (bool): A flag indicating whether to add a pooling layer. Defaults to True.

    Returns:
        None

    Raises:
        None
    """
    super().__init__(config)
    self.config = config

    self.embeddings = BertEmbeddings(config)
    self.encoder = BertDualEncoder(config)

    self.pooler = BertPooler(config) if add_pooling_layer else None

    # Initialize weights and apply final processing
    self.post_init()

mindnlp.transformers.models.bert.modeling_bert.BertDualModel.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None)

PARAMETER DESCRIPTION
encoder_hidden_states

Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.

TYPE: (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional* DEFAULT: None

encoder_attention_mask

Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in [0, 1]:

  • 1 for tokens that are not masked,
  • 0 for tokens that are masked.

TYPE: `torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional* DEFAULT: None

use_cache

If set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).

TYPE: `bool`, *optional* DEFAULT: None

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_values: Optional[List[mindspore.Tensor]] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
):
    r"""
    Args:
        encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
            the model is configured as a decoder.
        encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on the padding token indices of the encoder input.
            This mask is used in the cross-attention if the model is configured as a decoder.
            Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.
        past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with
            each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
            Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
    """
    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
    output_hidden_states = (
        output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
    )
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    if self.config.is_decoder:
        use_cache = use_cache if use_cache is not None else self.config.use_cache
    else:
        use_cache = False

    if input_ids is not None and inputs_embeds is not None:
        raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
    if input_ids is not None:
        self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
        input_shape = input_ids.shape
    elif inputs_embeds is not None:
        input_shape = inputs_embeds.shape[:-1]
    else:
        raise ValueError("You have to specify either input_ids or inputs_embeds")

    batch_size, seq_length = input_shape

    # past_key_values_length
    past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0

    if attention_mask is None:
        attention_mask = ops.ones(batch_size, seq_length + past_key_values_length)

    if token_type_ids is None:
        if hasattr(self.embeddings, "token_type_ids"):
            buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
            buffered_token_type_ids_expanded = buffered_token_type_ids.broadcast_to((batch_size, seq_length))
            token_type_ids = buffered_token_type_ids_expanded
        else:
            token_type_ids = ops.zeros(*input_shape, dtype=mindspore.int64)
    # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
    # ourselves in which case we just need to make it broadcastable to all heads.
    extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)

    # If a 2D or 3D attention mask is provided for the cross-attention
    # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
    if self.config.is_decoder and encoder_hidden_states is not None:
        encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.shape
        encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
        if encoder_attention_mask is None:
            encoder_attention_mask = ops.ones(*encoder_hidden_shape)
        encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
    else:
        encoder_extended_attention_mask = None

    # Prepare head mask if needed
    # 1.0 in head_mask indicate we keep the head
    # attention_probs has shape bsz x n_heads x N x N
    # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
    # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
    head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)

    embedding_output = self.embeddings(
        input_ids=input_ids,
        position_ids=position_ids,
        token_type_ids=token_type_ids,
        inputs_embeds=inputs_embeds,
        past_key_values_length=past_key_values_length,
    )

    encoder_outputs = self.encoder(
        embedding_output,
        attention_mask=extended_attention_mask,
        head_mask=head_mask,
        encoder_hidden_states=encoder_hidden_states,
        encoder_attention_mask=encoder_extended_attention_mask,
        past_key_values=past_key_values,
        use_cache=use_cache,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = encoder_outputs[0]
    pooled_output = self.pooler(sequence_output) if self.pooler is not None else None

    if not return_dict:
        return (sequence_output, pooled_output) + encoder_outputs[1:]

    return BaseModelOutputWithPoolingAndCrossAttentions(
        last_hidden_state=sequence_output,
        pooler_output=pooled_output,
        past_key_values=encoder_outputs.past_key_values,
        hidden_states=encoder_outputs.hidden_states,
        attentions=encoder_outputs.attentions,
        cross_attentions=encoder_outputs.cross_attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertDualModel.get_input_embeddings()

This method retrieves the input embeddings from the BertDualModel instance.

PARAMETER DESCRIPTION
self

The instance of the BertDualModel class.

RETURNS DESCRIPTION

None.

This method retrieves the input embeddings, represented by the 'word_embeddings' attribute of the BertDualModel instance. The embeddings are used to encode the input data into numerical representations suitable for processing by the model.

Note that this method does not modify any attributes or perform any calculations. It simply returns the existing input embeddings.

Example
>>> model = BertDualModel()
>>> embeddings = model.get_input_embeddings()
Source code in mindnlp/transformers/models/bert/modeling_bert.py
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
def get_input_embeddings(self):
    """
    This method retrieves the input embeddings from the BertDualModel instance.

    Args:
        self: The instance of the BertDualModel class.

    Returns:
        None.

    Raises:
        None.

    This method retrieves the input embeddings, represented by the 'word_embeddings' attribute of the BertDualModel instance.
    The embeddings are used to encode the input data into numerical representations suitable for processing by the model.

    Note that this method does not modify any attributes or perform any calculations. It simply returns the existing input embeddings.

    Example:
        ```python
        >>> model = BertDualModel()
        >>> embeddings = model.get_input_embeddings()
        ```
    """
    return self.embeddings.word_embeddings

mindnlp.transformers.models.bert.modeling_bert.BertDualModel.set_input_embeddings(value)

Sets the input embeddings for the BertDualModel.

PARAMETER DESCRIPTION
self

The instance of the BertDualModel class.

TYPE: BertDualModel

value

The input embeddings to be set for the model. Should be of type WordEmbeddings.

RETURNS DESCRIPTION
None

This method updates the input embeddings for the BertDualModel in-place.

RAISES DESCRIPTION
TypeError

If the provided 'value' is not of type WordEmbeddings.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
def set_input_embeddings(self, value):
    """
    Sets the input embeddings for the BertDualModel.

    Args:
        self (BertDualModel): The instance of the BertDualModel class.
        value: The input embeddings to be set for the model. Should be of type WordEmbeddings.

    Returns:
        None: This method updates the input embeddings for the BertDualModel in-place.

    Raises:
        TypeError: If the provided 'value' is not of type WordEmbeddings.
    """
    self.embeddings.word_embeddings = value

mindnlp.transformers.models.bert.modeling_bert.BertDualOutput

Bases: Module

The 'BertDualOutput' class represents a custom neural network layer for processing dual outputs in a BERT model. This class inherits functionality from nn.Module and implements methods for initialization and processing of hidden states.

ATTRIBUTE DESCRIPTION
intermediate_size

The size of the intermediate layer in the network.

TYPE: int

dense

A dense layer for processing the intermediate hidden states.

TYPE: Dense

LayerNorm

A layer normalization module for normalizing hidden states.

TYPE: LayerNorm

dropout

A dropout layer for regularization during training.

TYPE: Dropout

METHOD DESCRIPTION
__init__

Initializes the BertDualOutput instance with the provided configuration.

forward

Processes the hidden states and input tensor to produce the final output.

The 'init' method initializes the instance by setting the intermediate_size, dense layer, LayerNorm module, and dropout layer based on the provided configuration. The 'forward' method processes the hidden states by splitting them, applying transformations, and combining the outputs to produce the final hidden states.

This class is designed to be used as a component in BERT models for handling dual outputs efficiently.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
class BertDualOutput(nn.Module):

    """
    The 'BertDualOutput' class represents a custom neural network layer for processing dual outputs in a BERT model.
    This class inherits functionality from nn.Module and implements methods for initialization and processing of hidden states.

    Attributes:
        intermediate_size (int): The size of the intermediate layer in the network.
        dense (Dense): A dense layer for processing the intermediate hidden states.
        LayerNorm (nn.LayerNorm): A layer normalization module for normalizing hidden states.
        dropout (nn.Dropout): A dropout layer for regularization during training.

    Methods:
        __init__(self, config): Initializes the BertDualOutput instance with the provided configuration.
        forward(self, hidden_states, input_tensor): Processes the hidden states and input tensor to produce the final output.

    The '__init__' method initializes the instance by setting the intermediate_size, dense layer,
    LayerNorm module, and dropout layer based on the provided configuration.
    The 'forward' method processes the hidden states by splitting them, applying transformations,
    and combining the outputs to produce the final hidden states.

    This class is designed to be used as a component in BERT models for handling dual outputs efficiently.
    """
    def __init__(self, config):
        """
        Initializes an instance of the BertDualOutput class.

        Args:
            self: The instance of the BertDualOutput class.
            config:
                A configuration object containing the following attributes:

                - intermediate_size (int): The size of the intermediate layer.
                - hidden_size (int): The size of the hidden layer.
                - layer_norm_eps (float): The epsilon value for layer normalization.
                - hidden_dropout_prob (float): The dropout probability for the hidden layer.

        Returns:
            None.

        Raises:
            TypeError: If the config parameter is not provided or is not of the expected type.
            ValueError: If the config parameter does not contain the required attributes
                or if their values are not within the expected range.
            AttributeError: If the config parameter does not have the necessary attributes.
        """
        super().__init__()
        self.intermediate_size = config.intermediate_size
        self.dense = Dense(config.intermediate_size//2, config.hidden_size//2)
        self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        """
        This method 'forward' is a member of the class 'BertDualOutput' and is used to process hidden states
        and input tensors in a specific manner.

        Args:
            self: The instance of the class.
            hidden_states (tensor): The hidden states to be processed.
                It is expected to be a tensor with shape (batch_size, sequence_length, hidden_size).
            input_tensor (tensor): The input tensor to be added to the processed hidden states.
                It is expected to be a tensor with the same shape as hidden_states.

        Returns:
            None: This method does not return any value explicitly,
                but it modifies the hidden_states and input_tensor in place.

        Raises:
            None: This method does not raise any exceptions explicitly.
        """
        hidden_states_r = hidden_states[:,:,:self.intermediate_size//2]
        hidden_states_d = hidden_states[:,:,self.intermediate_size//2:]
        hidden_states = to_2channel(hidden_states_r, hidden_states_d)
        hidden_states = self.dense(hidden_states)
        hidden_states_r, hidden_states_d = get_x_and_y(hidden_states)
        hidden_states = ops.cat([hidden_states_r, hidden_states_d], -1)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

mindnlp.transformers.models.bert.modeling_bert.BertDualOutput.__init__(config)

Initializes an instance of the BertDualOutput class.

PARAMETER DESCRIPTION
self

The instance of the BertDualOutput class.

config

A configuration object containing the following attributes:

  • intermediate_size (int): The size of the intermediate layer.
  • hidden_size (int): The size of the hidden layer.
  • layer_norm_eps (float): The epsilon value for layer normalization.
  • hidden_dropout_prob (float): The dropout probability for the hidden layer.

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
TypeError

If the config parameter is not provided or is not of the expected type.

ValueError

If the config parameter does not contain the required attributes or if their values are not within the expected range.

AttributeError

If the config parameter does not have the necessary attributes.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
def __init__(self, config):
    """
    Initializes an instance of the BertDualOutput class.

    Args:
        self: The instance of the BertDualOutput class.
        config:
            A configuration object containing the following attributes:

            - intermediate_size (int): The size of the intermediate layer.
            - hidden_size (int): The size of the hidden layer.
            - layer_norm_eps (float): The epsilon value for layer normalization.
            - hidden_dropout_prob (float): The dropout probability for the hidden layer.

    Returns:
        None.

    Raises:
        TypeError: If the config parameter is not provided or is not of the expected type.
        ValueError: If the config parameter does not contain the required attributes
            or if their values are not within the expected range.
        AttributeError: If the config parameter does not have the necessary attributes.
    """
    super().__init__()
    self.intermediate_size = config.intermediate_size
    self.dense = Dense(config.intermediate_size//2, config.hidden_size//2)
    self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=config.layer_norm_eps)
    self.dropout = nn.Dropout(config.hidden_dropout_prob)

mindnlp.transformers.models.bert.modeling_bert.BertDualOutput.forward(hidden_states, input_tensor)

This method 'forward' is a member of the class 'BertDualOutput' and is used to process hidden states and input tensors in a specific manner.

PARAMETER DESCRIPTION
self

The instance of the class.

hidden_states

The hidden states to be processed. It is expected to be a tensor with shape (batch_size, sequence_length, hidden_size).

TYPE: tensor

input_tensor

The input tensor to be added to the processed hidden states. It is expected to be a tensor with the same shape as hidden_states.

TYPE: tensor

RETURNS DESCRIPTION
None

This method does not return any value explicitly, but it modifies the hidden_states and input_tensor in place.

RAISES DESCRIPTION
None

This method does not raise any exceptions explicitly.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
def forward(self, hidden_states, input_tensor):
    """
    This method 'forward' is a member of the class 'BertDualOutput' and is used to process hidden states
    and input tensors in a specific manner.

    Args:
        self: The instance of the class.
        hidden_states (tensor): The hidden states to be processed.
            It is expected to be a tensor with shape (batch_size, sequence_length, hidden_size).
        input_tensor (tensor): The input tensor to be added to the processed hidden states.
            It is expected to be a tensor with the same shape as hidden_states.

    Returns:
        None: This method does not return any value explicitly,
            but it modifies the hidden_states and input_tensor in place.

    Raises:
        None: This method does not raise any exceptions explicitly.
    """
    hidden_states_r = hidden_states[:,:,:self.intermediate_size//2]
    hidden_states_d = hidden_states[:,:,self.intermediate_size//2:]
    hidden_states = to_2channel(hidden_states_r, hidden_states_d)
    hidden_states = self.dense(hidden_states)
    hidden_states_r, hidden_states_d = get_x_and_y(hidden_states)
    hidden_states = ops.cat([hidden_states_r, hidden_states_d], -1)
    hidden_states = self.dropout(hidden_states)
    hidden_states = self.LayerNorm(hidden_states + input_tensor)
    return hidden_states

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfAttention

Bases: Module

The BertDualSelfAttention class represents the dual self-attention mechanism used in the BERT model. This class implements the mechanism for both real and imaginary parts of the self-attention mechanism. It inherits from the nn.Module class and provides methods for attention score computation and context layer generation.

ATTRIBUTE DESCRIPTION
config

A configuration object containing the model's hyperparameters.

output_attentions

A boolean indicating whether to output attention scores.

num_attention_heads

An integer representing the number of attention heads.

attention_head_size

An integer representing the size of each attention head.

all_head_size

An integer representing the total size of all attention heads combined.

query

A Dense layer for computing queries for the attention mechanism.

key

A Dense layer for computing keys for the attention mechanism.

value

A Dense layer for computing values for the attention mechanism.

dropout

A dropout layer for performing dropout on the attention scores.

position_embedding_type

A string representing the type of position embedding used.

METHOD DESCRIPTION
transpose_for_scores

Transposes the input tensor for computing attention scores.

forward

Constructs the dual self-attention mechanism using the provided input tensors.

Note

The forward method raises a NotImplementedError for cross-attention and past_key_value arguments, as these functionalities are not implemented yet.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
class BertDualSelfAttention(nn.Module):

    """
    The BertDualSelfAttention class represents the dual self-attention mechanism used in the BERT model.
    This class implements the mechanism for both real and imaginary parts of the self-attention mechanism.
    It inherits from the nn.Module class and provides methods for attention score computation and context layer generation.

    Attributes:
        config: A configuration object containing the model's hyperparameters.
        output_attentions: A boolean indicating whether to output attention scores.
        num_attention_heads: An integer representing the number of attention heads.
        attention_head_size: An integer representing the size of each attention head.
        all_head_size: An integer representing the total size of all attention heads combined.
        query: A Dense layer for computing queries for the attention mechanism.
        key: A Dense layer for computing keys for the attention mechanism.
        value: A Dense layer for computing values for the attention mechanism.
        dropout: A dropout layer for performing dropout on the attention scores.
        position_embedding_type: A string representing the type of position embedding used.

    Methods:
        transpose_for_scores(input_x): Transposes the input tensor for computing attention scores.
        forward(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions):
            Constructs the dual self-attention mechanism using the provided input tensors.

    Note:
        The forward method raises a NotImplementedError for cross-attention and past_key_value arguments,
        as these functionalities are not implemented yet.
    """
    def __init__(self, config, position_embedding_type=None):
        """
        Initializes an instance of the BertDualSelfAttention class.

        Args:
            self: The instance of the class.
            config (object): An object of the configuration class containing the model's configuration parameters.
            position_embedding_type (str, optional): The type of position embedding. Defaults to None.

        Returns:
            None

        Raises:
            ValueError: If the hidden size is not a multiple of the number of attention heads.

        """
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
            raise ValueError(
                f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
                f"heads ({config.num_attention_heads})"
            )
        self.config = config
        self.output_attentions = config.output_attentions
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = Dense(config.hidden_size//2, self.all_head_size//2)
        self.key = Dense(config.hidden_size//2, self.all_head_size//2)
        self.value = Dense(config.hidden_size//2, self.all_head_size//2)

        self.dropout = nn.Dropout(p=config.attention_probs_dropout_prob)
        self.position_embedding_type = position_embedding_type or getattr(
            config, "position_embedding_type", "absolute"
        )

    def transpose_for_scores(self, input_x):
        r"""
        transpose for scores
        """
        new_x_shape = input_x.shape[:-1] + (self.num_attention_heads, self.attention_head_size //2)
        input_x = input_x.view(*new_x_shape)
        return input_x.transpose(0, 1, 3, 2, 4)

    def forward(
        self,
        hidden_states: mindspore.Tensor,
        attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
        output_attentions: Optional[bool] = False,
    ):
        """
        This method 'forward' in the class 'BertDualSelfAttention' implements the dual self-attention mechanism
        for the BERT model.

        Args:
            self: The instance of the class.
            hidden_states (mindspore.Tensor):
                The input hidden states tensor with shape (batch_size, sequence_length, hidden_size).
            attention_mask (Optional[mindspore.Tensor]):
                An optional tensor with shape (batch_size, sequence_length) containing values of 0 or 1 to mask
                the attention scores for padded tokens.
            head_mask (Optional[mindspore.Tensor]): An optional tensor to mask the attention scores of specific heads.
            encoder_hidden_states (Optional[mindspore.Tensor]):
                An optional tensor containing the hidden states of the encoder if performing cross-attention.
            encoder_attention_mask (Optional[mindspore.Tensor]):
                An optional tensor to mask the attention scores for cross-attention.
            past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]):
                An optional tuple containing the past key and value tensors for incremental decoding.
            output_attentions (Optional[bool]):
                An optional boolean flag indicating whether to output the attention scores.

        Returns:
            Tuple[mindspore.Tensor, Optional[mindspore.Tensor]]:
                A tuple containing the context layer tensor with shape (batch_size, sequence_length, hidden_size)
                and optionally the attention scores tensor with shape
                (batch_size, num_attention_heads, sequence_length, sequence_length).

        Raises:
            NotImplementedError: If the functionality for cross-attention or incremental decoding is not implemented.
        """
        hidden_states_r = hidden_states[:,:,:self.config.hidden_size//2]
        hidden_states_d = hidden_states[:,:,self.config.hidden_size//2:]

        new_hidden_states = to_2channel(hidden_states_r, hidden_states_d)
        mixed_query_layer = self.query(new_hidden_states)

        is_cross_attention = encoder_hidden_states is not None

        if is_cross_attention or past_key_value is not None:
            raise NotImplementedError("This functionality is not implemented.")

        mixed_key_layer = self.key(new_hidden_states)

        key_layer = self.transpose_for_scores(mixed_key_layer)

        mixed_value_layer = self.value(new_hidden_states)

        value_layer = self.transpose_for_scores(mixed_value_layer)

        query_layer = self.transpose_for_scores(mixed_query_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = matmul(query_layer, key_layer.swapaxes(-1, -2))

        attention_scores = attention_scores / math.sqrt(self.attention_head_size)

        if attention_mask is not None:
            # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
            attention_scores = attention_scores + attention_mask

        attention_scores_r, attention_scores_i = get_x_and_y(attention_scores)
        attention_scores_r = ops.softmax(attention_scores_r, dim=-1)
        attention_scores_i = ops.softmax(attention_scores_i, dim=-1)

        p_attn = to_2channel(attention_scores_r, attention_scores_i)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        if self.dropout is not None:
            p_attn = self.dropout(p_attn)

        context_layer = matmul(p_attn, value_layer)

        context_layer_r, context_layer_i = get_x_and_y(context_layer)
        context_layer = ops.cat([context_layer_r, context_layer_i], -1)

        context_layer = context_layer.transpose(0, 2, 1, 3)
        new_context_layer_shape = context_layer.shape[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(new_context_layer_shape)
        outputs = (context_layer, p_attn) if output_attentions else (context_layer,)

        return outputs

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfAttention.__init__(config, position_embedding_type=None)

Initializes an instance of the BertDualSelfAttention class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

An object of the configuration class containing the model's configuration parameters.

TYPE: object

position_embedding_type

The type of position embedding. Defaults to None.

TYPE: str DEFAULT: None

RETURNS DESCRIPTION

None

RAISES DESCRIPTION
ValueError

If the hidden size is not a multiple of the number of attention heads.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
def __init__(self, config, position_embedding_type=None):
    """
    Initializes an instance of the BertDualSelfAttention class.

    Args:
        self: The instance of the class.
        config (object): An object of the configuration class containing the model's configuration parameters.
        position_embedding_type (str, optional): The type of position embedding. Defaults to None.

    Returns:
        None

    Raises:
        ValueError: If the hidden size is not a multiple of the number of attention heads.

    """
    super().__init__()
    if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
        raise ValueError(
            f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
            f"heads ({config.num_attention_heads})"
        )
    self.config = config
    self.output_attentions = config.output_attentions
    self.num_attention_heads = config.num_attention_heads
    self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
    self.all_head_size = self.num_attention_heads * self.attention_head_size

    self.query = Dense(config.hidden_size//2, self.all_head_size//2)
    self.key = Dense(config.hidden_size//2, self.all_head_size//2)
    self.value = Dense(config.hidden_size//2, self.all_head_size//2)

    self.dropout = nn.Dropout(p=config.attention_probs_dropout_prob)
    self.position_embedding_type = position_embedding_type or getattr(
        config, "position_embedding_type", "absolute"
    )

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfAttention.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False)

This method 'forward' in the class 'BertDualSelfAttention' implements the dual self-attention mechanism for the BERT model.

PARAMETER DESCRIPTION
self

The instance of the class.

hidden_states

The input hidden states tensor with shape (batch_size, sequence_length, hidden_size).

TYPE: Tensor

attention_mask

An optional tensor with shape (batch_size, sequence_length) containing values of 0 or 1 to mask the attention scores for padded tokens.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

An optional tensor to mask the attention scores of specific heads.

TYPE: Optional[Tensor] DEFAULT: None

encoder_hidden_states

An optional tensor containing the hidden states of the encoder if performing cross-attention.

TYPE: Optional[Tensor] DEFAULT: None

encoder_attention_mask

An optional tensor to mask the attention scores for cross-attention.

TYPE: Optional[Tensor] DEFAULT: None

past_key_value

An optional tuple containing the past key and value tensors for incremental decoding.

TYPE: Optional[Tuple[Tuple[Tensor]]] DEFAULT: None

output_attentions

An optional boolean flag indicating whether to output the attention scores.

TYPE: Optional[bool] DEFAULT: False

RETURNS DESCRIPTION

Tuple[mindspore.Tensor, Optional[mindspore.Tensor]]: A tuple containing the context layer tensor with shape (batch_size, sequence_length, hidden_size) and optionally the attention scores tensor with shape (batch_size, num_attention_heads, sequence_length, sequence_length).

RAISES DESCRIPTION
NotImplementedError

If the functionality for cross-attention or incremental decoding is not implemented.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
def forward(
    self,
    hidden_states: mindspore.Tensor,
    attention_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_value: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
    output_attentions: Optional[bool] = False,
):
    """
    This method 'forward' in the class 'BertDualSelfAttention' implements the dual self-attention mechanism
    for the BERT model.

    Args:
        self: The instance of the class.
        hidden_states (mindspore.Tensor):
            The input hidden states tensor with shape (batch_size, sequence_length, hidden_size).
        attention_mask (Optional[mindspore.Tensor]):
            An optional tensor with shape (batch_size, sequence_length) containing values of 0 or 1 to mask
            the attention scores for padded tokens.
        head_mask (Optional[mindspore.Tensor]): An optional tensor to mask the attention scores of specific heads.
        encoder_hidden_states (Optional[mindspore.Tensor]):
            An optional tensor containing the hidden states of the encoder if performing cross-attention.
        encoder_attention_mask (Optional[mindspore.Tensor]):
            An optional tensor to mask the attention scores for cross-attention.
        past_key_value (Optional[Tuple[Tuple[mindspore.Tensor]]]):
            An optional tuple containing the past key and value tensors for incremental decoding.
        output_attentions (Optional[bool]):
            An optional boolean flag indicating whether to output the attention scores.

    Returns:
        Tuple[mindspore.Tensor, Optional[mindspore.Tensor]]:
            A tuple containing the context layer tensor with shape (batch_size, sequence_length, hidden_size)
            and optionally the attention scores tensor with shape
            (batch_size, num_attention_heads, sequence_length, sequence_length).

    Raises:
        NotImplementedError: If the functionality for cross-attention or incremental decoding is not implemented.
    """
    hidden_states_r = hidden_states[:,:,:self.config.hidden_size//2]
    hidden_states_d = hidden_states[:,:,self.config.hidden_size//2:]

    new_hidden_states = to_2channel(hidden_states_r, hidden_states_d)
    mixed_query_layer = self.query(new_hidden_states)

    is_cross_attention = encoder_hidden_states is not None

    if is_cross_attention or past_key_value is not None:
        raise NotImplementedError("This functionality is not implemented.")

    mixed_key_layer = self.key(new_hidden_states)

    key_layer = self.transpose_for_scores(mixed_key_layer)

    mixed_value_layer = self.value(new_hidden_states)

    value_layer = self.transpose_for_scores(mixed_value_layer)

    query_layer = self.transpose_for_scores(mixed_query_layer)

    # Take the dot product between "query" and "key" to get the raw attention scores.
    attention_scores = matmul(query_layer, key_layer.swapaxes(-1, -2))

    attention_scores = attention_scores / math.sqrt(self.attention_head_size)

    if attention_mask is not None:
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_mask

    attention_scores_r, attention_scores_i = get_x_and_y(attention_scores)
    attention_scores_r = ops.softmax(attention_scores_r, dim=-1)
    attention_scores_i = ops.softmax(attention_scores_i, dim=-1)

    p_attn = to_2channel(attention_scores_r, attention_scores_i)

    # This is actually dropping out entire tokens to attend to, which might
    # seem a bit unusual, but is taken from the original Transformer paper.
    if self.dropout is not None:
        p_attn = self.dropout(p_attn)

    context_layer = matmul(p_attn, value_layer)

    context_layer_r, context_layer_i = get_x_and_y(context_layer)
    context_layer = ops.cat([context_layer_r, context_layer_i], -1)

    context_layer = context_layer.transpose(0, 2, 1, 3)
    new_context_layer_shape = context_layer.shape[:-2] + (self.all_head_size,)
    context_layer = context_layer.view(new_context_layer_shape)
    outputs = (context_layer, p_attn) if output_attentions else (context_layer,)

    return outputs

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfAttention.transpose_for_scores(input_x)

transpose for scores

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2755
2756
2757
2758
2759
2760
2761
def transpose_for_scores(self, input_x):
    r"""
    transpose for scores
    """
    new_x_shape = input_x.shape[:-1] + (self.num_attention_heads, self.attention_head_size //2)
    input_x = input_x.view(*new_x_shape)
    return input_x.transpose(0, 1, 3, 2, 4)

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfOutput

Bases: Module

The 'BertDualSelfOutput' class represents a module that performs dual self-attention mechanism for BERT. It inherits from nn.Module and contains methods for initializing the module and forwarding the dual self-attention mechanism.

ATTRIBUTE DESCRIPTION
hidden_size

The size of the hidden states.

TYPE: int

dense

The dense layer for the dual self-attention mechanism.

TYPE: Dense

LayerNorm

The layer normalization for the dual self-attention mechanism.

TYPE: LayerNorm

dropout

The dropout layer for the dual self-attention mechanism.

TYPE: Dropout

METHOD DESCRIPTION
__init__

Initializes the 'BertDualSelfOutput' module with the provided configuration.

forward

Constructs the dual self-attention mechanism using the provided hidden states and input tensor.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
class BertDualSelfOutput(nn.Module):

    """
    The 'BertDualSelfOutput' class represents a module that performs dual self-attention mechanism for BERT.
    It inherits from nn.Module and contains methods for initializing the module and forwarding
    the dual self-attention mechanism.

    Attributes:
        hidden_size (int): The size of the hidden states.
        dense (Dense): The dense layer for the dual self-attention mechanism.
        LayerNorm (LayerNorm): The layer normalization for the dual self-attention mechanism.
        dropout (Dropout): The dropout layer for the dual self-attention mechanism.

    Methods:
        __init__(config): Initializes the 'BertDualSelfOutput' module with the provided configuration.
        forward(hidden_states, input_tensor):
            Constructs the dual self-attention mechanism using the provided hidden states and input tensor.
    """
    def __init__(self, config):
        """
        Initializes an instance of the BertDualSelfOutput class.

        Args:
            self (BertDualSelfOutput): An instance of the BertDualSelfOutput class.
            config: A configuration object containing the parameters for the model.

        Returns:
            None

        Raises:
            None
        """
        super().__init__()
        self.hidden_size = config.hidden_size
        self.dense = Dense(config.hidden_size//2, config.hidden_size//2)
        self.LayerNorm  = nn.LayerNorm((config.hidden_size,), eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        """
        Method 'forward' in the class 'BertDualSelfOutput'.

        This method forwards the hidden states by processing the input hidden states and input tensor.

        Args:
            self: Instance of the class BertDualSelfOutput. It represents the current instance of the class.
            hidden_states: Tensor of shape (batch_size, sequence_length, hidden_size).
                The input hidden states to be processed.
            input_tensor: Tensor of shape (batch_size, sequence_length, hidden_size).
                The input tensor to be added to the processed hidden states.

        Returns:
            None: This method does not return any value.

        Raises:
            None.
        """
        hidden_states_r = hidden_states[:,:,:self.hidden_size//2]
        hidden_states_d = hidden_states[:,:,self.hidden_size//2:]

        hidden_states = to_2channel(hidden_states_r, hidden_states_d)
        hidden_states = self.dense(hidden_states)
        hidden_states_r, hidden_states_d = get_x_and_y(hidden_states)
        hidden_states = ops.cat([hidden_states_r, hidden_states_d], -1)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfOutput.__init__(config)

Initializes an instance of the BertDualSelfOutput class.

PARAMETER DESCRIPTION
self

An instance of the BertDualSelfOutput class.

TYPE: BertDualSelfOutput

config

A configuration object containing the parameters for the model.

RETURNS DESCRIPTION

None

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
def __init__(self, config):
    """
    Initializes an instance of the BertDualSelfOutput class.

    Args:
        self (BertDualSelfOutput): An instance of the BertDualSelfOutput class.
        config: A configuration object containing the parameters for the model.

    Returns:
        None

    Raises:
        None
    """
    super().__init__()
    self.hidden_size = config.hidden_size
    self.dense = Dense(config.hidden_size//2, config.hidden_size//2)
    self.LayerNorm  = nn.LayerNorm((config.hidden_size,), eps=config.layer_norm_eps)
    self.dropout = nn.Dropout(config.hidden_dropout_prob)

mindnlp.transformers.models.bert.modeling_bert.BertDualSelfOutput.forward(hidden_states, input_tensor)

Method 'forward' in the class 'BertDualSelfOutput'.

This method forwards the hidden states by processing the input hidden states and input tensor.

PARAMETER DESCRIPTION
self

Instance of the class BertDualSelfOutput. It represents the current instance of the class.

hidden_states

Tensor of shape (batch_size, sequence_length, hidden_size). The input hidden states to be processed.

input_tensor

Tensor of shape (batch_size, sequence_length, hidden_size). The input tensor to be added to the processed hidden states.

RETURNS DESCRIPTION
None

This method does not return any value.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
def forward(self, hidden_states, input_tensor):
    """
    Method 'forward' in the class 'BertDualSelfOutput'.

    This method forwards the hidden states by processing the input hidden states and input tensor.

    Args:
        self: Instance of the class BertDualSelfOutput. It represents the current instance of the class.
        hidden_states: Tensor of shape (batch_size, sequence_length, hidden_size).
            The input hidden states to be processed.
        input_tensor: Tensor of shape (batch_size, sequence_length, hidden_size).
            The input tensor to be added to the processed hidden states.

    Returns:
        None: This method does not return any value.

    Raises:
        None.
    """
    hidden_states_r = hidden_states[:,:,:self.hidden_size//2]
    hidden_states_d = hidden_states[:,:,self.hidden_size//2:]

    hidden_states = to_2channel(hidden_states_r, hidden_states_d)
    hidden_states = self.dense(hidden_states)
    hidden_states_r, hidden_states_d = get_x_and_y(hidden_states)
    hidden_states = ops.cat([hidden_states_r, hidden_states_d], -1)
    hidden_states = self.dropout(hidden_states)
    hidden_states = self.LayerNorm(hidden_states + input_tensor)
    return hidden_states

mindnlp.transformers.models.bert.modeling_bert.BertEmbeddings

Bases: Module

Embeddings for BERT, include word, position and token_type

Source code in mindnlp/transformers/models/bert/modeling_bert.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
class BertEmbeddings(nn.Module):
    """
    Embeddings for BERT, include word, position and token_type
    """
    def __init__(self, config):
        """
        This method initializes an instance of the BertEmbeddings class.

        Args:
            self: The instance of the BertEmbeddings class.
            config: An object containing configuration parameters for the embeddings.
                It should have the following attributes:

                - vocab_size (int): The size of the vocabulary.
                - hidden_size (int): The size of the hidden layer.
                - pad_token_id (int): The index of the padding token.
                - max_position_embeddings (int): The maximum number of positional embeddings.
                - type_vocab_size (int): The size of the token type vocabulary.
                - layer_norm_eps (float): The epsilon value for layer normalization.
                - hidden_dropout_prob (float): The dropout probability for the hidden layer.
                - position_embedding_type (str, optional): The type of positional embedding, defaults to 'absolute'.

        Returns:
            None.

        Raises:
            AttributeError: If the config object does not have the required attributes.
            ValueError: If the config attributes have invalid values or types.
            TypeError: If the config parameters are of incorrect types.
            RuntimeError: If there is an error during the initialization process.
        """
        super().__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

        self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
        # position_ids (1, len position emb) is contiguous in memory and exported when serialized
        self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
        self.position_ids = ops.arange(config.max_position_embeddings).reshape((1, -1))
        self.token_type_ids = ops.zeros(*self.position_ids.shape, dtype=mindspore.int64)

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        past_key_values_length: int = 0,
    ):
        """
        This method forwards the embeddings for input tokens in the BERT model.

        Args:
            self (BertEmbeddings): The instance of the BertEmbeddings class.
            input_ids (Optional[mindspore.Tensor]): The input token IDs. Default is None.
            token_type_ids (Optional[mindspore.Tensor]): The token type IDs for the input tokens. Default is None.
            position_ids (Optional[mindspore.Tensor]): The position IDs for the input tokens. Default is None.
            inputs_embeds (Optional[mindspore.Tensor]): The pre-computed input embeddings. Default is None.
            past_key_values_length (int): The length of past key values. Default is 0.

        Returns:
            None.

        Raises:
            TypeError: If the input_ids, token_type_ids, position_ids, or inputs_embeds are not of type mindspore.Tensor.
            ValueError: If the input_shape is not valid or if there is an issue with the dimensions of the input tensors.
            RuntimeError: If there is a runtime issue during the forwardion of embeddings.
        """
        if input_ids is not None:
            input_shape = input_ids.shape
        else:
            input_shape = inputs_embeds.shape[:-1]

        seq_length = input_shape[1]

        if position_ids is None:
            position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]

        # Setting the token_type_ids to the registered buffer in forwardor where it is all zeros, which usually occurs
        # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
        # issue #5664
        if token_type_ids is None:
            if hasattr(self, "token_type_ids"):
                buffered_token_type_ids = self.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = ops.zeros(*input_shape, dtype=mindspore.int64)
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = inputs_embeds + token_type_embeddings
        if self.position_embedding_type == "absolute":
            position_embeddings = self.position_embeddings(position_ids)
            embeddings += position_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

mindnlp.transformers.models.bert.modeling_bert.BertEmbeddings.__init__(config)

This method initializes an instance of the BertEmbeddings class.

PARAMETER DESCRIPTION
self

The instance of the BertEmbeddings class.

config

An object containing configuration parameters for the embeddings. It should have the following attributes:

  • vocab_size (int): The size of the vocabulary.
  • hidden_size (int): The size of the hidden layer.
  • pad_token_id (int): The index of the padding token.
  • max_position_embeddings (int): The maximum number of positional embeddings.
  • type_vocab_size (int): The size of the token type vocabulary.
  • layer_norm_eps (float): The epsilon value for layer normalization.
  • hidden_dropout_prob (float): The dropout probability for the hidden layer.
  • position_embedding_type (str, optional): The type of positional embedding, defaults to 'absolute'.

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
AttributeError

If the config object does not have the required attributes.

ValueError

If the config attributes have invalid values or types.

TypeError

If the config parameters are of incorrect types.

RuntimeError

If there is an error during the initialization process.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
def __init__(self, config):
    """
    This method initializes an instance of the BertEmbeddings class.

    Args:
        self: The instance of the BertEmbeddings class.
        config: An object containing configuration parameters for the embeddings.
            It should have the following attributes:

            - vocab_size (int): The size of the vocabulary.
            - hidden_size (int): The size of the hidden layer.
            - pad_token_id (int): The index of the padding token.
            - max_position_embeddings (int): The maximum number of positional embeddings.
            - type_vocab_size (int): The size of the token type vocabulary.
            - layer_norm_eps (float): The epsilon value for layer normalization.
            - hidden_dropout_prob (float): The dropout probability for the hidden layer.
            - position_embedding_type (str, optional): The type of positional embedding, defaults to 'absolute'.

    Returns:
        None.

    Raises:
        AttributeError: If the config object does not have the required attributes.
        ValueError: If the config attributes have invalid values or types.
        TypeError: If the config parameters are of incorrect types.
        RuntimeError: If there is an error during the initialization process.
    """
    super().__init__()
    self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
    self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
    self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

    self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=config.layer_norm_eps)
    self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
    # position_ids (1, len position emb) is contiguous in memory and exported when serialized
    self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
    self.position_ids = ops.arange(config.max_position_embeddings).reshape((1, -1))
    self.token_type_ids = ops.zeros(*self.position_ids.shape, dtype=mindspore.int64)

mindnlp.transformers.models.bert.modeling_bert.BertEmbeddings.forward(input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0)

This method forwards the embeddings for input tokens in the BERT model.

PARAMETER DESCRIPTION
self

The instance of the BertEmbeddings class.

TYPE: BertEmbeddings

input_ids

The input token IDs. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

token_type_ids

The token type IDs for the input tokens. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

position_ids

The position IDs for the input tokens. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

inputs_embeds

The pre-computed input embeddings. Default is None.

TYPE: Optional[Tensor] DEFAULT: None

past_key_values_length

The length of past key values. Default is 0.

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
TypeError

If the input_ids, token_type_ids, position_ids, or inputs_embeds are not of type mindspore.Tensor.

ValueError

If the input_shape is not valid or if there is an issue with the dimensions of the input tensors.

RuntimeError

If there is a runtime issue during the forwardion of embeddings.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    past_key_values_length: int = 0,
):
    """
    This method forwards the embeddings for input tokens in the BERT model.

    Args:
        self (BertEmbeddings): The instance of the BertEmbeddings class.
        input_ids (Optional[mindspore.Tensor]): The input token IDs. Default is None.
        token_type_ids (Optional[mindspore.Tensor]): The token type IDs for the input tokens. Default is None.
        position_ids (Optional[mindspore.Tensor]): The position IDs for the input tokens. Default is None.
        inputs_embeds (Optional[mindspore.Tensor]): The pre-computed input embeddings. Default is None.
        past_key_values_length (int): The length of past key values. Default is 0.

    Returns:
        None.

    Raises:
        TypeError: If the input_ids, token_type_ids, position_ids, or inputs_embeds are not of type mindspore.Tensor.
        ValueError: If the input_shape is not valid or if there is an issue with the dimensions of the input tensors.
        RuntimeError: If there is a runtime issue during the forwardion of embeddings.
    """
    if input_ids is not None:
        input_shape = input_ids.shape
    else:
        input_shape = inputs_embeds.shape[:-1]

    seq_length = input_shape[1]

    if position_ids is None:
        position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]

    # Setting the token_type_ids to the registered buffer in forwardor where it is all zeros, which usually occurs
    # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
    # issue #5664
    if token_type_ids is None:
        if hasattr(self, "token_type_ids"):
            buffered_token_type_ids = self.token_type_ids[:, :seq_length]
            buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
            token_type_ids = buffered_token_type_ids_expanded
        else:
            token_type_ids = ops.zeros(*input_shape, dtype=mindspore.int64)
    if inputs_embeds is None:
        inputs_embeds = self.word_embeddings(input_ids)
    token_type_embeddings = self.token_type_embeddings(token_type_ids)

    embeddings = inputs_embeds + token_type_embeddings
    if self.position_embedding_type == "absolute":
        position_embeddings = self.position_embeddings(position_ids)
        embeddings += position_embeddings
    embeddings = self.LayerNorm(embeddings)
    embeddings = self.dropout(embeddings)
    return embeddings

mindnlp.transformers.models.bert.modeling_bert.BertEncoder

Bases: Module

Bert Encoder

Source code in mindnlp/transformers/models/bert/modeling_bert.py
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
class BertEncoder(nn.Module):
    r"""
    Bert Encoder
    """
    def __init__(self, config):
        """
        BertEncoder.__init__

        Initializes a new BertEncoder object.

        Args:
            self (object): The instance of the BertEncoder class.
            config (object): The configuration object containing settings for the BertEncoder.
                This parameter is required to initialize the BertEncoder object.

                - It should be an instance of the configuration class containing the necessary settings.

                    - Example: config = BertConfig(num_hidden_layers=12, ...)

        Returns:
            None.

        Raises:
            None.
        """
        super().__init__()
        self.config = config
        self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])

    def forward(
        self,
        hidden_states: mindspore.Tensor,
        attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = False,
        output_hidden_states: Optional[bool] = False,
        return_dict: Optional[bool] = True,
    ):
        """
        This method 'forward' is a part of the class 'BertEncoder' and is responsible for processing
        hidden states through the encoder layers.

        Args:
            self: The instance of the class.
            hidden_states (mindspore.Tensor): The input hidden states to be processed through the encoder layers.
            attention_mask (Optional[mindspore.Tensor]): Mask to avoid attention on padding tokens, defaults to None.
            head_mask (Optional[mindspore.Tensor]): Mask for attention heads in the encoder layers, defaults to None.
            encoder_hidden_states (Optional[mindspore.Tensor]): Hidden states of the encoder, defaults to None.
            encoder_attention_mask (Optional[mindspore.Tensor]): Mask to avoid attention on padding tokens in the encoder, defaults to None.
            past_key_values (Optional[Tuple[Tuple[mindspore.Tensor]]]): Past key values for caching, defaults to None.
            use_cache (Optional[bool]): Indicates whether to use cache for the next decoder step, defaults to None.
            output_attentions (Optional[bool]): Flag to output attention weights, defaults to False.
            output_hidden_states (Optional[bool]): Flag to output hidden states, defaults to False.
            return_dict (Optional[bool]): Flag to return the output as a dictionary, defaults to True.

        Returns:
            None: This method does not return any value directly.
                It processes the input hidden states through the encoder layers and updates the states internally.

        Raises:
            None: This method does not raise any exceptions explicitly.
        """
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None

        next_decoder_cache = () if use_cache else None
        for i, layer_module in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            layer_head_mask = head_mask[i] if head_mask is not None else None
            past_key_value = past_key_values[i] if past_key_values is not None else None
            layer_outputs = layer_module(
                hidden_states,
                attention_mask,
                layer_head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
                past_key_value,
                output_attentions,
            )
            hidden_states = layer_outputs[0]
            if use_cache:
                next_decoder_cache += (layer_outputs[-1],)
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],)

        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        if not return_dict:
            return tuple(
                v
                for v in [
                    hidden_states,
                    next_decoder_cache,
                    all_hidden_states,
                    all_self_attentions,
                    all_cross_attentions,
                ]
                if v is not None
            )
        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=hidden_states,
            past_key_values=next_decoder_cache,
            hidden_states=all_hidden_states,
            attentions=all_self_attentions,
            cross_attentions=all_cross_attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertEncoder.__init__(config)

BertEncoder.init

Initializes a new BertEncoder object.

PARAMETER DESCRIPTION
self

The instance of the BertEncoder class.

TYPE: object

config

The configuration object containing settings for the BertEncoder. This parameter is required to initialize the BertEncoder object.

  • It should be an instance of the configuration class containing the necessary settings.

    • Example: config = BertConfig(num_hidden_layers=12, ...)

TYPE: object

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
def __init__(self, config):
    """
    BertEncoder.__init__

    Initializes a new BertEncoder object.

    Args:
        self (object): The instance of the BertEncoder class.
        config (object): The configuration object containing settings for the BertEncoder.
            This parameter is required to initialize the BertEncoder object.

            - It should be an instance of the configuration class containing the necessary settings.

                - Example: config = BertConfig(num_hidden_layers=12, ...)

    Returns:
        None.

    Raises:
        None.
    """
    super().__init__()
    self.config = config
    self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])

mindnlp.transformers.models.bert.modeling_bert.BertEncoder.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True)

This method 'forward' is a part of the class 'BertEncoder' and is responsible for processing hidden states through the encoder layers.

PARAMETER DESCRIPTION
self

The instance of the class.

hidden_states

The input hidden states to be processed through the encoder layers.

TYPE: Tensor

attention_mask

Mask to avoid attention on padding tokens, defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

Mask for attention heads in the encoder layers, defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

encoder_hidden_states

Hidden states of the encoder, defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

encoder_attention_mask

Mask to avoid attention on padding tokens in the encoder, defaults to None.

TYPE: Optional[Tensor] DEFAULT: None

past_key_values

Past key values for caching, defaults to None.

TYPE: Optional[Tuple[Tuple[Tensor]]] DEFAULT: None

use_cache

Indicates whether to use cache for the next decoder step, defaults to None.

TYPE: Optional[bool] DEFAULT: None

output_attentions

Flag to output attention weights, defaults to False.

TYPE: Optional[bool] DEFAULT: False

output_hidden_states

Flag to output hidden states, defaults to False.

TYPE: Optional[bool] DEFAULT: False

return_dict

Flag to return the output as a dictionary, defaults to True.

TYPE: Optional[bool] DEFAULT: True

RETURNS DESCRIPTION
None

This method does not return any value directly. It processes the input hidden states through the encoder layers and updates the states internally.

RAISES DESCRIPTION
None

This method does not raise any exceptions explicitly.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
def forward(
    self,
    hidden_states: mindspore.Tensor,
    attention_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = False,
    output_hidden_states: Optional[bool] = False,
    return_dict: Optional[bool] = True,
):
    """
    This method 'forward' is a part of the class 'BertEncoder' and is responsible for processing
    hidden states through the encoder layers.

    Args:
        self: The instance of the class.
        hidden_states (mindspore.Tensor): The input hidden states to be processed through the encoder layers.
        attention_mask (Optional[mindspore.Tensor]): Mask to avoid attention on padding tokens, defaults to None.
        head_mask (Optional[mindspore.Tensor]): Mask for attention heads in the encoder layers, defaults to None.
        encoder_hidden_states (Optional[mindspore.Tensor]): Hidden states of the encoder, defaults to None.
        encoder_attention_mask (Optional[mindspore.Tensor]): Mask to avoid attention on padding tokens in the encoder, defaults to None.
        past_key_values (Optional[Tuple[Tuple[mindspore.Tensor]]]): Past key values for caching, defaults to None.
        use_cache (Optional[bool]): Indicates whether to use cache for the next decoder step, defaults to None.
        output_attentions (Optional[bool]): Flag to output attention weights, defaults to False.
        output_hidden_states (Optional[bool]): Flag to output hidden states, defaults to False.
        return_dict (Optional[bool]): Flag to return the output as a dictionary, defaults to True.

    Returns:
        None: This method does not return any value directly.
            It processes the input hidden states through the encoder layers and updates the states internally.

    Raises:
        None: This method does not raise any exceptions explicitly.
    """
    all_hidden_states = () if output_hidden_states else None
    all_self_attentions = () if output_attentions else None
    all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None

    next_decoder_cache = () if use_cache else None
    for i, layer_module in enumerate(self.layer):
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        layer_head_mask = head_mask[i] if head_mask is not None else None
        past_key_value = past_key_values[i] if past_key_values is not None else None
        layer_outputs = layer_module(
            hidden_states,
            attention_mask,
            layer_head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        hidden_states = layer_outputs[0]
        if use_cache:
            next_decoder_cache += (layer_outputs[-1],)
        if output_attentions:
            all_self_attentions = all_self_attentions + (layer_outputs[1],)
            if self.config.add_cross_attention:
                all_cross_attentions = all_cross_attentions + (layer_outputs[2],)

    if output_hidden_states:
        all_hidden_states = all_hidden_states + (hidden_states,)

    if not return_dict:
        return tuple(
            v
            for v in [
                hidden_states,
                next_decoder_cache,
                all_hidden_states,
                all_self_attentions,
                all_cross_attentions,
            ]
            if v is not None
        )
    return BaseModelOutputWithPastAndCrossAttentions(
        last_hidden_state=hidden_states,
        past_key_values=next_decoder_cache,
        hidden_states=all_hidden_states,
        attentions=all_self_attentions,
        cross_attentions=all_cross_attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM

Bases: BertPreTrainedModel

BertForMaskedLM

Source code in mindnlp/transformers/models/bert/modeling_bert.py
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
class BertForMaskedLM(BertPreTrainedModel):
    """BertForMaskedLM"""
    _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]

    def __init__(self, config):
        """
        Initializes an instance of the BertForMaskedLM class.

        Args:
            self: The instance of the class.
            config (BertConfig): The configuration object for the BertForMaskedLM model.

        Returns:
            None

        Raises:
            None

        Description:
        This method is the forwardor for the BertForMaskedLM class. It initializes the instance by setting up
        the model architecture and loading the configuration.

        The 'config' parameter is an instance of the BertConfig class, which contains various settings
        and hyperparameters for the model.
        It is used to configure the model architecture and behavior.

        Note that if the 'is_decoder' attribute of the 'config' parameter is set to True, a warning message is logged,
        reminding the user to set 'is_decoder' to False when using the 'BertForMaskedLM' model
        with bi-directional self-attention.

        The method initializes two attributes of the instance:

        - 'bert': An instance of the 'BertModel' class, which represents the BERT model without the MLM head.
        The 'config' parameter is passed to the 'BertModel' forwardor to configure the model architecture.
        - 'cls': An instance of the 'BertOnlyMLMHead' class, which represents the MLM head of the BERT model.
        The 'config' parameter is passed to the 'BertOnlyMLMHead' forwardor to configure the MLM head.

        After the initialization, the 'post_init' method is called to execute any additional setup steps specific to the BertForMaskedLM class.
        """
        super().__init__(config)

        if config.is_decoder:
            logger.warning(
                "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
                "bi-directional self-attention."
            )

        self.bert = BertModel(config, add_pooling_layer=False)
        self.cls = BertOnlyMLMHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def get_output_embeddings(self):
        """
        This method returns the output embeddings for the BertForMaskedLM model.

        Args:
            self (BertForMaskedLM): The instance of the BertForMaskedLM class.

        Returns:
            None.

        Raises:
            None
        """
        return self.cls.predictions.decoder

    def set_output_embeddings(self, new_embeddings):
        """
        Set the output embeddings for the BertForMaskedLM model.

        Args:
            self (BertForMaskedLM): The instance of the BertForMaskedLM class.
            new_embeddings (Any): The new embeddings to set for the output layer.

        Returns:
            None.

        Raises:
            None
        """
        self.cls.predictions.decoder = new_embeddings

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        r"""
        Args:
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
                config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
                loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]
        prediction_scores = self.cls(sequence_output)

        masked_lm_loss = None
        if labels is not None:
            masked_lm_loss = F.cross_entropy(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

        return MaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

    def prepare_inputs_for_generation(self, input_ids, attention_mask=None):
        """
        Method: prepare_inputs_for_generation

        Description:
            This method prepares inputs for generation by adding a dummy token at the end of the input_ids
            and updating the attention_mask accordingly.

        Args:
            self: The instance of the BertForMaskedLM class.
            input_ids (Tensor): The input token IDs for generation.
            attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.

        Returns:
            dict: A dictionary containing the updated 'input_ids' and 'attention_mask'.

        Raises:
            ValueError: If the PAD token is not defined in the configuration.
        """
        input_shape = input_ids.shape
        effective_batch_size = input_shape[0]

        #  add a dummy token
        if self.config.pad_token_id is None:
            raise ValueError("The PAD token should be defined for generation")

        attention_mask = ops.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
        dummy_token = ops.full(
            (effective_batch_size, 1), self.config.pad_token_id, dtype=mindspore.int64)
        input_ids = ops.cat([input_ids, dummy_token], dim=1)

        return {"input_ids": input_ids, "attention_mask": attention_mask}

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM.__init__(config)

Initializes an instance of the BertForMaskedLM class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

The configuration object for the BertForMaskedLM model.

TYPE: BertConfig

RETURNS DESCRIPTION

None

This method is the forwardor for the BertForMaskedLM class. It initializes the instance by setting up the model architecture and loading the configuration.

The 'config' parameter is an instance of the BertConfig class, which contains various settings and hyperparameters for the model. It is used to configure the model architecture and behavior.

Note that if the 'is_decoder' attribute of the 'config' parameter is set to True, a warning message is logged, reminding the user to set 'is_decoder' to False when using the 'BertForMaskedLM' model with bi-directional self-attention.

The method initializes two attributes of the instance:

  • 'bert': An instance of the 'BertModel' class, which represents the BERT model without the MLM head. The 'config' parameter is passed to the 'BertModel' forwardor to configure the model architecture.
  • 'cls': An instance of the 'BertOnlyMLMHead' class, which represents the MLM head of the BERT model. The 'config' parameter is passed to the 'BertOnlyMLMHead' forwardor to configure the MLM head.

After the initialization, the 'post_init' method is called to execute any additional setup steps specific to the BertForMaskedLM class.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
def __init__(self, config):
    """
    Initializes an instance of the BertForMaskedLM class.

    Args:
        self: The instance of the class.
        config (BertConfig): The configuration object for the BertForMaskedLM model.

    Returns:
        None

    Raises:
        None

    Description:
    This method is the forwardor for the BertForMaskedLM class. It initializes the instance by setting up
    the model architecture and loading the configuration.

    The 'config' parameter is an instance of the BertConfig class, which contains various settings
    and hyperparameters for the model.
    It is used to configure the model architecture and behavior.

    Note that if the 'is_decoder' attribute of the 'config' parameter is set to True, a warning message is logged,
    reminding the user to set 'is_decoder' to False when using the 'BertForMaskedLM' model
    with bi-directional self-attention.

    The method initializes two attributes of the instance:

    - 'bert': An instance of the 'BertModel' class, which represents the BERT model without the MLM head.
    The 'config' parameter is passed to the 'BertModel' forwardor to configure the model architecture.
    - 'cls': An instance of the 'BertOnlyMLMHead' class, which represents the MLM head of the BERT model.
    The 'config' parameter is passed to the 'BertOnlyMLMHead' forwardor to configure the MLM head.

    After the initialization, the 'post_init' method is called to execute any additional setup steps specific to the BertForMaskedLM class.
    """
    super().__init__(config)

    if config.is_decoder:
        logger.warning(
            "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
            "bi-directional self-attention."
        )

    self.bert = BertModel(config, add_pooling_layer=False)
    self.cls = BertOnlyMLMHead(config)

    # Initialize weights and apply final processing
    self.post_init()

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

PARAMETER DESCRIPTION
labels

Labels for computing the masked language modeling loss. Indices should be in [-100, 0, ..., config.vocab_size] (see input_ids docstring) Tokens with indices set to -100 are ignored (masked), the loss is only computed for the tokens with labels in [0, ..., config.vocab_size]

TYPE: `torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional* DEFAULT: None

Source code in mindnlp/transformers/models/bert/modeling_bert.py
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
):
    r"""
    Args:
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        encoder_hidden_states=encoder_hidden_states,
        encoder_attention_mask=encoder_attention_mask,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = outputs[0]
    prediction_scores = self.cls(sequence_output)

    masked_lm_loss = None
    if labels is not None:
        masked_lm_loss = F.cross_entropy(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

    if not return_dict:
        output = (prediction_scores,) + outputs[2:]
        return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

    return MaskedLMOutput(
        loss=masked_lm_loss,
        logits=prediction_scores,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings()

This method returns the output embeddings for the BertForMaskedLM model.

PARAMETER DESCRIPTION
self

The instance of the BertForMaskedLM class.

TYPE: BertForMaskedLM

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
def get_output_embeddings(self):
    """
    This method returns the output embeddings for the BertForMaskedLM model.

    Args:
        self (BertForMaskedLM): The instance of the BertForMaskedLM class.

    Returns:
        None.

    Raises:
        None
    """
    return self.cls.predictions.decoder

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation(input_ids, attention_mask=None)

Description

This method prepares inputs for generation by adding a dummy token at the end of the input_ids and updating the attention_mask accordingly.

PARAMETER DESCRIPTION
self

The instance of the BertForMaskedLM class.

input_ids

The input token IDs for generation.

TYPE: Tensor

attention_mask

The attention mask tensor. Defaults to None.

TYPE: Tensor DEFAULT: None

RETURNS DESCRIPTION
dict

A dictionary containing the updated 'input_ids' and 'attention_mask'.

RAISES DESCRIPTION
ValueError

If the PAD token is not defined in the configuration.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
def prepare_inputs_for_generation(self, input_ids, attention_mask=None):
    """
    Method: prepare_inputs_for_generation

    Description:
        This method prepares inputs for generation by adding a dummy token at the end of the input_ids
        and updating the attention_mask accordingly.

    Args:
        self: The instance of the BertForMaskedLM class.
        input_ids (Tensor): The input token IDs for generation.
        attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.

    Returns:
        dict: A dictionary containing the updated 'input_ids' and 'attention_mask'.

    Raises:
        ValueError: If the PAD token is not defined in the configuration.
    """
    input_shape = input_ids.shape
    effective_batch_size = input_shape[0]

    #  add a dummy token
    if self.config.pad_token_id is None:
        raise ValueError("The PAD token should be defined for generation")

    attention_mask = ops.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
    dummy_token = ops.full(
        (effective_batch_size, 1), self.config.pad_token_id, dtype=mindspore.int64)
    input_ids = ops.cat([input_ids, dummy_token], dim=1)

    return {"input_ids": input_ids, "attention_mask": attention_mask}

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings(new_embeddings)

Set the output embeddings for the BertForMaskedLM model.

PARAMETER DESCRIPTION
self

The instance of the BertForMaskedLM class.

TYPE: BertForMaskedLM

new_embeddings

The new embeddings to set for the output layer.

TYPE: Any

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
def set_output_embeddings(self, new_embeddings):
    """
    Set the output embeddings for the BertForMaskedLM model.

    Args:
        self (BertForMaskedLM): The instance of the BertForMaskedLM class.
        new_embeddings (Any): The new embeddings to set for the output layer.

    Returns:
        None.

    Raises:
        None
    """
    self.cls.predictions.decoder = new_embeddings

mindnlp.transformers.models.bert.modeling_bert.BertForMultipleChoice

Bases: BertPreTrainedModel

BertForMultipleChoice

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
class BertForMultipleChoice(BertPreTrainedModel):
    """BertForMultipleChoice"""
    def __init__(self, config):
        """
        Initializes a BertForMultipleChoice instance.

        Args:
            self (BertForMultipleChoice): The current instance of the BertForMultipleChoice class.
            config: An instance of the configuration class that holds various hyperparameters and settings for the model.

        Returns:
            None.

        Raises:
            TypeError: If the provided config is not of the expected type.
            ValueError: If the provided config does not contain necessary attributes.
            RuntimeError: If there are issues during the initialization process.
        """
        super().__init__(config)

        self.bert = BertModel(config)
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        self.dropout = nn.Dropout(p=classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, 1)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        r"""
        Args:
            labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
                Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
                num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
                `input_ids` above)
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

        input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
        attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
        token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
        position_ids = position_ids.view(-1, position_ids.shape[-1]) if position_ids is not None else None
        inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
            if inputs_embeds is not None
            else None
        )

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)

        loss = None
        if labels is not None:
            loss = F.cross_entropy(reshaped_logits, labels)

        if not return_dict:
            output = (reshaped_logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return MultipleChoiceModelOutput(
            loss=loss,
            logits=reshaped_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__(config)

Initializes a BertForMultipleChoice instance.

PARAMETER DESCRIPTION
self

The current instance of the BertForMultipleChoice class.

TYPE: BertForMultipleChoice

config

An instance of the configuration class that holds various hyperparameters and settings for the model.

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
TypeError

If the provided config is not of the expected type.

ValueError

If the provided config does not contain necessary attributes.

RuntimeError

If there are issues during the initialization process.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
def __init__(self, config):
    """
    Initializes a BertForMultipleChoice instance.

    Args:
        self (BertForMultipleChoice): The current instance of the BertForMultipleChoice class.
        config: An instance of the configuration class that holds various hyperparameters and settings for the model.

    Returns:
        None.

    Raises:
        TypeError: If the provided config is not of the expected type.
        ValueError: If the provided config does not contain necessary attributes.
        RuntimeError: If there are issues during the initialization process.
    """
    super().__init__(config)

    self.bert = BertModel(config)
    classifier_dropout = (
        config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
    )
    self.dropout = nn.Dropout(p=classifier_dropout)
    self.classifier = nn.Linear(config.hidden_size, 1)

    # Initialize weights and apply final processing
    self.post_init()

mindnlp.transformers.models.bert.modeling_bert.BertForMultipleChoice.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

PARAMETER DESCRIPTION
labels

Labels for computing the multiple choice classification loss. Indices should be in [0, ..., num_choices-1] where num_choices is the size of the second dimension of the input tensors. (See input_ids above)

TYPE: `torch.LongTensor` of shape `(batch_size,)`, *optional* DEFAULT: None

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
):
    r"""
    Args:
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
    num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

    input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
    attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
    token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
    position_ids = position_ids.view(-1, position_ids.shape[-1]) if position_ids is not None else None
    inputs_embeds = (
        inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
        if inputs_embeds is not None
        else None
    )

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    pooled_output = outputs[1]

    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)
    reshaped_logits = logits.view(-1, num_choices)

    loss = None
    if labels is not None:
        loss = F.cross_entropy(reshaped_logits, labels)

    if not return_dict:
        output = (reshaped_logits,) + outputs[2:]
        return ((loss,) + output) if loss is not None else output

    return MultipleChoiceModelOutput(
        loss=loss,
        logits=reshaped_logits,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForNextSentencePrediction

Bases: BertPreTrainedModel

BertForNextSentencePrediction

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
class BertForNextSentencePrediction(BertPreTrainedModel):
    """BertForNextSentencePrediction"""
    def __init__(self, config):
        """
        Initializes an instance of BertForNextSentencePrediction class.

        Args:
            self (BertForNextSentencePrediction): The instance of the BertForNextSentencePrediction class.
            config: The configuration object containing settings for the BERT model.

        Returns:
            None: This method initializes the BertForNextSentencePrediction instance with the specified config settings.

        Raises:
            None.
        """
        super().__init__(config)

        self.bert = BertModel(config)
        self.cls = BertOnlyNSPHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        """Constructs the BertForNextSentencePrediction model.

        Args:
            self (BertForNextSentencePrediction): An instance of the BertForNextSentencePrediction class.
            input_ids (Optional[mindspore.Tensor]): The input tensor containing the indices of input sequence tokens.
            attention_mask (Optional[mindspore.Tensor]):
                The attention mask tensor indicating which tokens should be attended to (1) and which should not (0).
            token_type_ids (Optional[mindspore.Tensor]):
                The token type tensor indicating the type of each token in the input sequence.
            position_ids (Optional[mindspore.Tensor]): The tensor containing the position indices of each input token.
            head_mask (Optional[mindspore.Tensor]):
                The tensor indicating which heads should be masked in the attention layers.
            inputs_embeds (Optional[mindspore.Tensor]):
                The tensor containing the embedded representation of the input tokens.
            labels (Optional[mindspore.Tensor]): The tensor containing the labels for the next sentence prediction task.
            output_attentions (Optional[bool]): Whether to include the attention probabilities in the output.
            output_hidden_states (Optional[bool]): Whether to include the hidden states in the output.
            return_dict (Optional[bool]): Whether to return a dictionary instead of a tuple as the output.

        Returns:
            None.

        Raises:
            None.
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        pooled_output = outputs[1]

        seq_relationship_scores = self.cls(pooled_output)

        next_sentence_loss = None
        if labels is not None:
            next_sentence_loss = F.cross_entropy(seq_relationship_scores.view(-1, 2), labels.view(-1))

        if not return_dict:
            output = (seq_relationship_scores,) + outputs[2:]
            return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output

        return NextSentencePredictorOutput(
            loss=next_sentence_loss,
            logits=seq_relationship_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__(config)

Initializes an instance of BertForNextSentencePrediction class.

PARAMETER DESCRIPTION
self

The instance of the BertForNextSentencePrediction class.

TYPE: BertForNextSentencePrediction

config

The configuration object containing settings for the BERT model.

RETURNS DESCRIPTION
None

This method initializes the BertForNextSentencePrediction instance with the specified config settings.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
def __init__(self, config):
    """
    Initializes an instance of BertForNextSentencePrediction class.

    Args:
        self (BertForNextSentencePrediction): The instance of the BertForNextSentencePrediction class.
        config: The configuration object containing settings for the BERT model.

    Returns:
        None: This method initializes the BertForNextSentencePrediction instance with the specified config settings.

    Raises:
        None.
    """
    super().__init__(config)

    self.bert = BertModel(config)
    self.cls = BertOnlyNSPHead(config)

    # Initialize weights and apply final processing
    self.post_init()

mindnlp.transformers.models.bert.modeling_bert.BertForNextSentencePrediction.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

Constructs the BertForNextSentencePrediction model.

PARAMETER DESCRIPTION
self

An instance of the BertForNextSentencePrediction class.

TYPE: BertForNextSentencePrediction

input_ids

The input tensor containing the indices of input sequence tokens.

TYPE: Optional[Tensor] DEFAULT: None

attention_mask

The attention mask tensor indicating which tokens should be attended to (1) and which should not (0).

TYPE: Optional[Tensor] DEFAULT: None

token_type_ids

The token type tensor indicating the type of each token in the input sequence.

TYPE: Optional[Tensor] DEFAULT: None

position_ids

The tensor containing the position indices of each input token.

TYPE: Optional[Tensor] DEFAULT: None

head_mask

The tensor indicating which heads should be masked in the attention layers.

TYPE: Optional[Tensor] DEFAULT: None

inputs_embeds

The tensor containing the embedded representation of the input tokens.

TYPE: Optional[Tensor] DEFAULT: None

labels

The tensor containing the labels for the next sentence prediction task.

TYPE: Optional[Tensor] DEFAULT: None

output_attentions

Whether to include the attention probabilities in the output.

TYPE: Optional[bool] DEFAULT: None

output_hidden_states

Whether to include the hidden states in the output.

TYPE: Optional[bool] DEFAULT: None

return_dict

Whether to return a dictionary instead of a tuple as the output.

TYPE: Optional[bool] DEFAULT: None

RETURNS DESCRIPTION

None.

Source code in mindnlp/transformers/models/bert/modeling_bert.py
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
):
    """Constructs the BertForNextSentencePrediction model.

    Args:
        self (BertForNextSentencePrediction): An instance of the BertForNextSentencePrediction class.
        input_ids (Optional[mindspore.Tensor]): The input tensor containing the indices of input sequence tokens.
        attention_mask (Optional[mindspore.Tensor]):
            The attention mask tensor indicating which tokens should be attended to (1) and which should not (0).
        token_type_ids (Optional[mindspore.Tensor]):
            The token type tensor indicating the type of each token in the input sequence.
        position_ids (Optional[mindspore.Tensor]): The tensor containing the position indices of each input token.
        head_mask (Optional[mindspore.Tensor]):
            The tensor indicating which heads should be masked in the attention layers.
        inputs_embeds (Optional[mindspore.Tensor]):
            The tensor containing the embedded representation of the input tokens.
        labels (Optional[mindspore.Tensor]): The tensor containing the labels for the next sentence prediction task.
        output_attentions (Optional[bool]): Whether to include the attention probabilities in the output.
        output_hidden_states (Optional[bool]): Whether to include the hidden states in the output.
        return_dict (Optional[bool]): Whether to return a dictionary instead of a tuple as the output.

    Returns:
        None.

    Raises:
        None.
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )