Class: Transformers::Mpnet::MPNetLayer
- Inherits:
-
Torch::NN::Module
- Object
- Torch::NN::Module
- Transformers::Mpnet::MPNetLayer
- Defined in:
- lib/transformers/models/mpnet/modeling_mpnet.rb
Instance Method Summary collapse
- #forward(hidden_states, attention_mask: nil, head_mask: nil, position_bias: nil, output_attentions: false, **kwargs) ⇒ Object
-
#initialize(config) ⇒ MPNetLayer
constructor
A new instance of MPNetLayer.
Constructor Details
#initialize(config) ⇒ MPNetLayer
Returns a new instance of MPNetLayer.
254 255 256 257 258 259 |
# File 'lib/transformers/models/mpnet/modeling_mpnet.rb', line 254 def initialize(config) super() @attention = MPNetAttention.new(config) @intermediate = MPNetIntermediate.new(config) @output = MPNetOutput.new(config) end |
Instance Method Details
#forward(hidden_states, attention_mask: nil, head_mask: nil, position_bias: nil, output_attentions: false, **kwargs) ⇒ Object
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 |
# File 'lib/transformers/models/mpnet/modeling_mpnet.rb', line 261 def forward( hidden_states, attention_mask: nil, head_mask: nil, position_bias: nil, output_attentions: false, **kwargs ) self_attention_outputs = @attention.(hidden_states, attention_mask: attention_mask, head_mask: head_mask, position_bias: position_bias, output_attentions: output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1..] intermediate_output = @intermediate.(attention_output) layer_output = @output.(intermediate_output, attention_output) outputs = [layer_output] + outputs outputs end |