Class: Transformers::Mpnet::MPNetEncoder
- Inherits:
-
Torch::NN::Module
- Object
- Torch::NN::Module
- Transformers::Mpnet::MPNetEncoder
- Defined in:
- lib/transformers/models/mpnet/modeling_mpnet.rb
Class Method Summary collapse
Instance Method Summary collapse
- #compute_position_bias(x, position_ids: nil, num_buckets: 32) ⇒ Object
- #forward(hidden_states, attention_mask: nil, head_mask: nil, output_attentions: false, output_hidden_states: false, return_dict: false, **kwargs) ⇒ Object
-
#initialize(config) ⇒ MPNetEncoder
constructor
A new instance of MPNetEncoder.
Constructor Details
#initialize(config) ⇒ MPNetEncoder
Returns a new instance of MPNetEncoder.
281 282 283 284 285 286 287 |
# File 'lib/transformers/models/mpnet/modeling_mpnet.rb', line 281 def initialize(config) super() @config = config @n_heads = config.num_attention_heads @layer = Torch::NN::ModuleList.new(config.num_hidden_layers.times.map { |_| MPNetLayer.new(config) }) @relative_attention_bias = Torch::NN::Embedding.new(config.relative_attention_num_buckets, @n_heads) end |
Class Method Details
.relative_position_bucket(relative_position, num_buckets: 32, max_distance: 128) ⇒ Object
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 |
# File 'lib/transformers/models/mpnet/modeling_mpnet.rb', line 345 def self.relative_position_bucket(relative_position, num_buckets: 32, max_distance: 128) ret = 0 n = -relative_position num_buckets /= 2 ret += n.lt(0).to(Torch.long) * num_buckets n = Torch.abs(n) max_exact = num_buckets / 2 is_small = n.lt(max_exact) val_if_large = max_exact + ( Torch.log(n.float / max_exact) / Math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(Torch.long) val_if_large = Torch.min(val_if_large, Torch.full_like(val_if_large, num_buckets - 1)) ret += Torch.where(is_small, n, val_if_large) ret end |
Instance Method Details
#compute_position_bias(x, position_ids: nil, num_buckets: 32) ⇒ Object
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 |
# File 'lib/transformers/models/mpnet/modeling_mpnet.rb', line 325 def compute_position_bias(x, position_ids: nil, num_buckets: 32) bsz, qlen, klen = [x.size(0), x.size(1), x.size(1)] if !position_ids.nil? context_position = position_ids[0.., 0.., nil] memory_position = position_ids[0.., nil, 0..] else context_position = Torch.arange(qlen, dtype: Torch.long)[0.., nil] memory_position = Torch.arange(klen, dtype: Torch.long)[nil, 0..] end relative_position = memory_position - context_position rp_bucket = self.class.relative_position_bucket(relative_position, num_buckets: num_buckets) rp_bucket = rp_bucket.to(x.device) values = @relative_attention_bias.(rp_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) values = values.([bsz, -1, qlen, klen]).contiguous values end |
#forward(hidden_states, attention_mask: nil, head_mask: nil, output_attentions: false, output_hidden_states: false, return_dict: false, **kwargs) ⇒ Object
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 |
# File 'lib/transformers/models/mpnet/modeling_mpnet.rb', line 289 def forward( hidden_states, attention_mask: nil, head_mask: nil, output_attentions: false, output_hidden_states: false, return_dict: false, **kwargs ) position_bias = compute_position_bias(hidden_states) all_hidden_states = output_hidden_states ? [] : nil all_attentions = output_attentions ? [] : nil @layer.each_with_index do |layer_module, i| if output_hidden_states all_hidden_states = all_hidden_states + [hidden_states] end layer_outputs = layer_module.(hidden_states, attention_mask: attention_mask, head_mask: head_mask[i], position_bias: position_bias, output_attentions: output_attentions, **kwargs) hidden_states = layer_outputs[0] if output_attentions all_attentions = all_attentions + [layer_outputs[1]] end end # Add last layer if output_hidden_states all_hidden_states = all_hidden_states + [hidden_states] end if !return_dict return Array([hidden_states, all_hidden_states, all_attentions].select { |v| !v.nil? }) end BaseModelOutput.new(last_hidden_state: hidden_states, hidden_states: all_hidden_states, attentions: all_attentions) end |