Class: Transformers::Vit::ViTConfig

Inherits:
PretrainedConfig show all
Defined in:
lib/transformers/models/vit/configuration_vit.rb

Instance Attribute Summary collapse

Attributes inherited from PretrainedConfig

#_commit_hash, #add_cross_attention, #architectures, #chunk_size_feed_forward, #id2label, #is_decoder, #is_encoder_decoder, #output_attentions, #output_hidden_states, #pad_token_id, #problem_type, #pruned_heads, #tie_encoder_decoder, #tie_word_embeddings, #tokenizer_class

Instance Method Summary collapse

Methods inherited from PretrainedConfig

#_attn_implementation, #_dict, from_dict, from_pretrained, get_config_dict, #getattr, #hasattr, #method_missing, #name_or_path, #name_or_path=, #num_labels, #num_labels=, #respond_to_missing?, #to_dict, #to_diff_dict, #to_json_string, #to_s, #use_return_dict

Methods included from ClassAttribute

#class_attribute

Constructor Details

#initialize(hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: "gelu", hidden_dropout_prob: 0.0, attention_probs_dropout_prob: 0.0, initializer_range: 0.02, layer_norm_eps: 1e-12, image_size: 224, patch_size: 16, num_channels: 3, qkv_bias: true, encoder_stride: 16, **kwargs) ⇒ ViTConfig

Returns a new instance of ViTConfig.



24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# File 'lib/transformers/models/vit/configuration_vit.rb', line 24

def initialize(
  hidden_size: 768,
  num_hidden_layers: 12,
  num_attention_heads: 12,
  intermediate_size: 3072,
  hidden_act: "gelu",
  hidden_dropout_prob: 0.0,
  attention_probs_dropout_prob: 0.0,
  initializer_range: 0.02,
  layer_norm_eps: 1e-12,
  image_size: 224,
  patch_size: 16,
  num_channels: 3,
  qkv_bias: true,
  encoder_stride: 16,
  **kwargs
)
  super(**kwargs)

  @hidden_size = hidden_size
  @num_hidden_layers = num_hidden_layers
  @num_attention_heads = num_attention_heads
  @intermediate_size = intermediate_size
  @hidden_act = hidden_act
  @hidden_dropout_prob = hidden_dropout_prob
  @attention_probs_dropout_prob = attention_probs_dropout_prob
  @initializer_range = initializer_range
  @layer_norm_eps = layer_norm_eps
  @image_size = image_size
  @patch_size = patch_size
  @num_channels = num_channels
  @qkv_bias = qkv_bias
  @encoder_stride = encoder_stride
end

Dynamic Method Handling

This class handles dynamic methods through the method_missing method in the class Transformers::PretrainedConfig

Instance Attribute Details

#attention_probs_dropout_probObject (readonly)

Returns the value of attribute attention_probs_dropout_prob.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def attention_probs_dropout_prob
  @attention_probs_dropout_prob
end

#encoder_strideObject (readonly)

Returns the value of attribute encoder_stride.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def encoder_stride
  @encoder_stride
end

#hidden_actObject (readonly)

Returns the value of attribute hidden_act.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def hidden_act
  @hidden_act
end

#hidden_dropout_probObject (readonly)

Returns the value of attribute hidden_dropout_prob.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def hidden_dropout_prob
  @hidden_dropout_prob
end

#hidden_sizeObject (readonly)

Returns the value of attribute hidden_size.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def hidden_size
  @hidden_size
end

#image_sizeObject (readonly)

Returns the value of attribute image_size.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def image_size
  @image_size
end

#initializer_rangeObject (readonly)

Returns the value of attribute initializer_range.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def initializer_range
  @initializer_range
end

#intermediate_sizeObject (readonly)

Returns the value of attribute intermediate_size.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def intermediate_size
  @intermediate_size
end

#layer_norm_epsObject (readonly)

Returns the value of attribute layer_norm_eps.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def layer_norm_eps
  @layer_norm_eps
end

#num_attention_headsObject (readonly)

Returns the value of attribute num_attention_heads.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def num_attention_heads
  @num_attention_heads
end

#num_channelsObject (readonly)

Returns the value of attribute num_channels.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def num_channels
  @num_channels
end

#num_hidden_layersObject (readonly)

Returns the value of attribute num_hidden_layers.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def num_hidden_layers
  @num_hidden_layers
end

#patch_sizeObject (readonly)

Returns the value of attribute patch_size.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def patch_size
  @patch_size
end

#qkv_biasObject (readonly)

Returns the value of attribute qkv_bias.



20
21
22
# File 'lib/transformers/models/vit/configuration_vit.rb', line 20

def qkv_bias
  @qkv_bias
end