Class: Transformers::DebertaV2::DebertaV2Encoder

Inherits:
Torch::NN::Module
  • Object
show all
Defined in:
lib/transformers/models/deberta_v2/modeling_deberta_v2.rb

Instance Method Summary collapse

Constructor Details

#initialize(config) ⇒ DebertaV2Encoder

Returns a new instance of DebertaV2Encoder.



297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
# File 'lib/transformers/models/deberta_v2/modeling_deberta_v2.rb', line 297

def initialize(config)
  super()

  @layer = Torch::NN::ModuleList.new(config.num_hidden_layers.times.map { |_| DebertaV2Layer.new(config) })
  @relative_attention = config.getattr("relative_attention", false)

  if @relative_attention
    @max_relative_positions = config.getattr("max_relative_positions", -1)
    if @max_relative_positions < 1
      @max_relative_positions = config.max_position_embeddings
    end

    @position_buckets = config.getattr("position_buckets", -1)
    pos_ebd_size = @max_relative_positions * 2

    if @position_buckets > 0
      pos_ebd_size = @position_buckets * 2
    end

    @rel_embeddings = Torch::NN::Embedding.new(pos_ebd_size, config.hidden_size)
  end

  @norm_rel_ebd = config.getattr("norm_rel_ebd", "none").downcase.split("|").map { |x| x.strip }

  if @norm_rel_ebd.include?("layer_norm")
    @LayerNorm = Torch::NN::LayerNorm.new(config.hidden_size, eps: config.layer_norm_eps, elementwise_affine: true)
  end

  @conv = config.getattr("conv_kernel_size", 0) > 0 ? ConvLayer.new(config) : nil
  @gradient_checkpointing = false
end

Instance Method Details

#forward(hidden_states, attention_mask, output_hidden_states: true, output_attentions: false, query_states: nil, relative_pos: nil, return_dict: true) ⇒ Object



356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
# File 'lib/transformers/models/deberta_v2/modeling_deberta_v2.rb', line 356

def forward(
  hidden_states,
  attention_mask,
  output_hidden_states: true,
  output_attentions: false,
  query_states: nil,
  relative_pos: nil,
  return_dict: true
)
  if attention_mask.dim <= 2
    input_mask = attention_mask
  else
    input_mask = attention_mask.sum(-2) > 0
  end
  attention_mask = get_attention_mask(attention_mask)
  relative_pos = get_rel_pos(hidden_states, query_states:, relative_pos:)

  all_hidden_states = output_hidden_states ? [] : nil
  all_attentions = output_attentions ? [] : nil

  if hidden_states.is_a?(Array)
    next_kv = hidden_states[0]
  else
    next_kv = hidden_states
  end
  rel_embeddings = get_rel_embedding
  output_states = next_kv
  @layer.each_with_index do |layer_module, i|
    if output_hidden_states
      all_hidden_states = all_hidden_states + [output_states]
    end

    if @gradient_checkpointing && @training
      output_states = _gradient_checkpointing_func(layer_module.__call__, next_kv, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions)
    else
      output_states = layer_module.(next_kv, attention_mask, query_states: query_states, relative_pos: relative_pos, rel_embeddings: rel_embeddings, output_attentions: output_attentions)
    end

    if output_attentions
      output_states, att_m = output_states
    end

    if i == 0 && !@conv.nil?
      output_states = @conv.(hidden_states, output_states, input_mask)
    end

    if !query_states.nil?
      query_states = output_states
      if hidden_states.is_a?(Array)
        next_kv = i + 1 < @layer.length ? hidden_states[i + 1] : nil
      end
    else
      next_kv = output_states
    end

    if output_attentions
      all_attentions = all_attentions + [att_m]
    end
  end

  if output_hidden_states
    all_hidden_states = all_hidden_states + [output_states]
  end

  if !return_dict
    return Array([output_states, all_hidden_states, all_attentions].select { |v| !v.nil? })
  end
  BaseModelOutput.new(last_hidden_state: output_states, hidden_states: all_hidden_states, attentions: all_attentions)
end

#get_attention_mask(attention_mask) ⇒ Object



337
338
339
340
341
342
343
344
345
346
# File 'lib/transformers/models/deberta_v2/modeling_deberta_v2.rb', line 337

def get_attention_mask(attention_mask)
  if attention_mask.dim <= 2
    extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
    attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
  elsif attention_mask.dim == 3
    attention_mask = attention_mask.unsqueeze(1)
  end

  attention_mask
end

#get_rel_embeddingObject



329
330
331
332
333
334
335
# File 'lib/transformers/models/deberta_v2/modeling_deberta_v2.rb', line 329

def get_rel_embedding
  rel_embeddings = @relative_attention ? @rel_embeddings.weight : nil
  if !rel_embeddings.nil? && @norm_rel_ebd.include?("layer_norm")
    rel_embeddings = @LayerNorm.(rel_embeddings)
  end
  rel_embeddings
end

#get_rel_pos(hidden_states, query_states: nil, relative_pos: nil) ⇒ Object



348
349
350
351
352
353
354
# File 'lib/transformers/models/deberta_v2/modeling_deberta_v2.rb', line 348

def get_rel_pos(hidden_states, query_states: nil, relative_pos: nil)
  if @relative_attention && relative_pos.nil?
    q = !query_states.nil? ? query_states.size(-2) : hidden_states.size(-2)
    relative_pos = DebertaV2.build_relative_position(q, hidden_states.size(-2), bucket_size: @position_buckets, max_position: @max_relative_positions, device: hidden_states.device)
  end
  relative_pos
end