Method List
-
#[] Transformers::ClassInstantier
-
#[] Transformers::PipelineDataset
-
#[] Transformers::ModelOutput
-
#[] Transformers::BatchFeature
-
#[] Transformers::LazyConfigMapping
-
#[] Transformers::LazyAutoMapping
-
#[] Transformers::BatchEncoding
-
#[] Transformers::PipelineIterator
-
_as_int Transformers::HfHub
-
#_attn_implementation Transformers::PretrainedConfig
-
#_backward_compatibility_gradient_checkpointing Transformers::PreTrainedModel
-
_cache_commit_hash_for_specific_revision Transformers::HfHub
-
_check_disk_space Transformers::HfHub
-
_chmod_and_move Transformers::HfHub
-
#_commit_hash Transformers::PretrainedConfig
-
#_convert_token_to_id Transformers::Bert::BertTokenizer
-
#_convert_token_to_id Transformers::PreTrainedTokenizer
-
#_convert_token_to_id Transformers::Distilbert::DistilBertTokenizer
-
#_convert_token_to_id_with_added_voc Transformers::PreTrainedTokenizerFast
-
#_convert_token_to_id_with_added_voc Transformers::PreTrainedTokenizer
-
_create_symlink Transformers::HfHub
-
#_dict Transformers::PretrainedConfig
-
_download_to_tmp_and_move Transformers::HfHub
-
#_encode_plus Transformers::PreTrainedTokenizer
-
#_ensure_tensor_on_device Transformers::Pipeline
-
#_eventual_warn_about_too_long_sequence Transformers::PreTrainedTokenizerBase
-
#_forward Transformers::EmbeddingPipeline
-
#_forward Transformers::FeatureExtractionPipeline
-
#_forward Transformers::ImageClassificationPipeline
-
#_forward Transformers::RerankingPipeline
-
#_forward Transformers::ImageFeatureExtractionPipeline
-
#_forward Transformers::QuestionAnsweringPipeline
-
#_forward Transformers::TokenClassificationPipeline
-
_from_pretrained Transformers::PreTrainedTokenizerBase
-
#_gelu_python Transformers::GELUActivation
-
_get_default_logging_level Transformers
-
#_get_is_as_tensor_fns Transformers::BatchFeature
-
_get_metadata_or_catch_error Transformers::HfHub
-
_get_pointer_path Transformers::HfHub
-
_hf_hub_download_to_cache_dir Transformers::HfHub
-
_http_user_agent Transformers::HfHub
-
#_init_weights Transformers::Mpnet::MPNetPreTrainedModel
-
#_init_weights Transformers::Bert::BertPreTrainedModel
-
#_init_weights Transformers::PreTrainedModel
-
#_init_weights Transformers::XlmRoberta::XLMRobertaPreTrainedModel
-
#_init_weights Transformers::DebertaV2::DebertaV2PreTrainedModel
-
#_init_weights Transformers::Distilbert::DistilBertPreTrainedModel
-
#_init_weights Transformers::Vit::ViTPreTrainedModel
-
#_initialize_weights Transformers::PreTrainedModel
-
_int_or_none Transformers::HfHub
-
_is_numo Transformers::Utils
-
_is_torch Transformers::Utils
-
_is_torch_device Transformers::Utils
-
_is_true Transformers::HfHub
-
#_is_whitespace Transformers::SquadExample
-
_normalize_etag Transformers::HfHub
-
#_prune_heads Transformers::Vit::ViTModel
-
#_prune_heads Transformers::Bert::BertModel
-
#_prune_heads Transformers::Distilbert::DistilBertModel
-
#_prune_heads Transformers::DebertaV2::DebertaV2Model
-
#_prune_heads Transformers::XlmRoberta::XLMRobertaModel
-
#_prune_heads Transformers::Mpnet::MPNetModel
-
_raise_on_head_call_error Transformers::HfHub
-
#_reorder_cache Transformers::XlmRoberta::XLMRobertaForCausalLM
-
_request_wrapper Transformers::HfHub
-
_rescale_for_pil_conversion Transformers::ImageTransforms
-
#_sanitize_parameters Transformers::RerankingPipeline
-
#_sanitize_parameters Transformers::QuestionAnsweringPipeline
-
#_sanitize_parameters Transformers::ImageFeatureExtractionPipeline
-
#_sanitize_parameters Transformers::FeatureExtractionPipeline
-
#_sanitize_parameters Transformers::EmbeddingPipeline
-
#_sanitize_parameters Transformers::ImageClassificationPipeline
-
#_sanitize_parameters Transformers::TokenClassificationPipeline
-
#_tie_weights Transformers::XlmRoberta::XLMRobertaLMHead
-
#_tie_weights Transformers::Bert::BertLMPredictionHead
-
#_tie_weights Transformers::Mpnet::MPNetLMHead
-
#_tie_weights Transformers::DebertaV2::DebertaV2LMPredictionHead
-
_validate_token_to_send Transformers::HfHub
-
#activation Transformers::Distilbert::DistilBertConfig
-
#add_cross_attention Transformers::PretrainedConfig
-
#aggregate Transformers::TokenClassificationPipeline
-
#aggregate_word Transformers::TokenClassificationPipeline
-
#aggregate_words Transformers::TokenClassificationPipeline
-
apply Transformers::DebertaV2::XDropout
-
apply Transformers::DebertaV2::XSoftmax
-
apply_chunking_to_forward Transformers::TorchUtils
-
#architectures Transformers::PretrainedConfig
-
#attention_dropout Transformers::Distilbert::DistilBertConfig
-
#attention_probs_dropout_prob Transformers::Bert::BertConfig
-
#attention_probs_dropout_prob Transformers::DebertaV2::DebertaV2Config
-
#attention_probs_dropout_prob Transformers::Vit::ViTConfig
-
#attention_probs_dropout_prob Transformers::XlmRoberta::XLMRobertaConfig
-
#attention_probs_dropout_prob Transformers::Mpnet::MPNetConfig
-
attribute Transformers::ModelOutput
-
attributes Transformers::ModelOutput
-
#backend_tokenizer Transformers::PreTrainedTokenizerFast
-
#base_model Transformers::PreTrainedModel
-
#basic_tokenizer Transformers::Distilbert::DistilBertTokenizer
-
#basic_tokenizer Transformers::Bert::BertTokenizer
-
#bos_token_id Transformers::SpecialTokensMixin
-
#bos_token_id Transformers::XlmRoberta::XLMRobertaConfig
-
#bos_token_id Transformers::Mpnet::MPNetConfig
-
build_hf_headers Transformers::HfHub
-
#build_inputs_with_special_tokens Transformers::DebertaV2::DebertaV2TokenizerFast
-
#build_inputs_with_special_tokens Transformers::XlmRoberta::XLMRobertaTokenizerFast
-
#build_inputs_with_special_tokens Transformers::Distilbert::DistilBertTokenizerFast
-
#build_inputs_with_special_tokens Transformers::Mpnet::MPNetTokenizerFast
-
build_relative_position Transformers::DebertaV2
-
c2p_dynamic_expand Transformers::DebertaV2
-
cached_file Transformers::Utils::Hub
-
#call Transformers::RerankingPipeline
-
#call Transformers::QuestionAnsweringPipeline
-
#call Transformers::Pipeline
-
#call Transformers::PreTrainedTokenizerBase
-
#call Transformers::QuestionAnsweringArgumentHandler
-
#call Transformers::BaseImageProcessor
-
#can_generate Transformers::PreTrainedModel
-
#can_save_slow_tokenizer Transformers::DebertaV2::DebertaV2TokenizerFast
-
#can_save_slow_tokenizer Transformers::XlmRoberta::XLMRobertaTokenizerFast
-
#check_model_type Transformers::Pipeline
-
#check_task Transformers::PipelineRegistry
-
#chunk_size_feed_forward Transformers::PretrainedConfig
-
#class_attribute Transformers::ClassAttribute
-
#classifier_dropout Transformers::Bert::BertConfig
-
#classifier_dropout Transformers::XlmRoberta::XLMRobertaConfig
-
#clear_context Transformers::DebertaV2::StableDropout
-
#cls_token_id Transformers::SpecialTokensMixin
-
#commit_hash Transformers::HfHub::HfFileMetadata
-
#compute_position_bias Transformers::Mpnet::MPNetEncoder
-
#config Transformers::PreTrainedModel
-
#context_text Transformers::SquadExample
-
#convert_ids_to_tokens Transformers::PreTrainedTokenizerFast
-
convert_slow_tokenizer Transformers::ConvertSlowTokenizer
-
#convert_to_tensors Transformers::BatchEncoding
-
#convert_to_tensors Transformers::BatchFeature
-
#convert_tokens_to_ids Transformers::PreTrainedTokenizerFast
-
#convert_tokens_to_ids Transformers::PreTrainedTokenizer
-
#convert_tokens_to_string Transformers::PreTrainedTokenizerFast
-
#converted Transformers::ConvertSlowTokenizer::BertConverter
-
#converted Transformers::ConvertSlowTokenizer::Converter
-
#create_position_ids_from_input_ids Transformers::Mpnet::MPNetEmbeddings
-
#create_position_ids_from_input_ids Transformers::XlmRoberta::XLMRobertaEmbeddings
-
#create_position_ids_from_inputs_embeds Transformers::Mpnet::MPNetEmbeddings
-
#create_position_ids_from_inputs_embeds Transformers::XlmRoberta::XLMRobertaEmbeddings
-
create_sample Transformers::QuestionAnsweringPipeline
-
#create_token_type_ids_from_sequences Transformers::Mpnet::MPNetTokenizerFast
-
#create_token_type_ids_from_sequences Transformers::Distilbert::DistilBertTokenizerFast
-
#create_token_type_ids_from_sequences Transformers::DebertaV2::DebertaV2TokenizerFast
-
#create_token_type_ids_from_sequences Transformers::XlmRoberta::XLMRobertaTokenizerFast
-
#decode_spans Transformers::QuestionAnsweringPipeline
-
deepcopy Transformers::Copy
-
#delete Transformers::BatchEncoding
-
#dequantize Transformers::PreTrainedModel
-
#device Transformers::ModuleUtilsMixin
-
#dim Transformers::Distilbert::DistilBertConfig
-
#disentangled_attention_bias Transformers::DebertaV2::DisentangledSelfAttention
-
display_progress Transformers::HfHub
-
#do_lower_case Transformers::Distilbert::DistilBertTokenizer::BasicTokenizer
-
#do_lower_case Transformers::Bert::BertTokenizer::BasicTokenizer
-
#do_split_on_punc Transformers::Bert::BertTokenizer::BasicTokenizer
-
#dropout Transformers::Distilbert::DistilBertConfig
-
#dummy_inputs Transformers::PreTrainedModel
-
#each Transformers::PipelineIterator
-
#encode Transformers::SentenceTransformer
-
#encoder_stride Transformers::Vit::ViTConfig
-
#encodings Transformers::BatchEncoding
-
#eos_token_id Transformers::XlmRoberta::XLMRobertaConfig
-
#eos_token_id Transformers::SpecialTokensMixin
-
#eos_token_id Transformers::Mpnet::MPNetConfig
-
#etag Transformers::HfHub::HfFileMetadata
-
extract_commit_hash Transformers::Utils::Hub
-
fast_init Transformers
-
#feed_forward_chunk Transformers::Bert::BertLayer
-
#feed_forward_chunk Transformers::XlmRoberta::XLMRobertaLayer
-
#ff_chunk Transformers::Distilbert::FFN
-
#forward Transformers::Bert::BertOnlyMLMHead
-
#forward Transformers::Mpnet::MPNetOutput
-
#forward Transformers::DebertaV2::ConvLayer
-
#forward Transformers::XlmRoberta::XLMRobertaForCausalLM
-
#forward Transformers::Mpnet::MPNetModel
-
#forward Transformers::DebertaV2::DebertaV2Layer
-
#forward Transformers::Bert::BertEncoder
-
#forward Transformers::Bert::BertForTokenClassification
-
#forward Transformers::DebertaV2::DebertaV2PredictionHeadTransform
-
#forward Transformers::XlmRoberta::XLMRobertaOutput
-
#forward Transformers::Mpnet::MPNetForSequenceClassification
-
#forward Transformers::DebertaV2::DebertaV2LMPredictionHead
-
#forward Transformers::XlmRoberta::XLMRobertaLayer
-
#forward Transformers::XlmRoberta::XLMRobertaForMaskedLM
-
#forward Transformers::Distilbert::TransformerBlock
-
#forward Transformers::Mpnet::MPNetIntermediate
-
#forward Transformers::Mpnet::MPNetForTokenClassification
-
#forward Transformers::XlmRoberta::XLMRobertaForSequenceClassification
-
#forward Transformers::DebertaV2::DebertaV2Intermediate
-
#forward Transformers::Distilbert::DistilBertForSequenceClassification
-
#forward Transformers::XlmRoberta::XLMRobertaSelfAttention
-
#forward Transformers::XlmRoberta::XLMRobertaIntermediate
-
#forward Transformers::XlmRoberta::XLMRobertaAttention
-
#forward Transformers::DebertaV2::ContextPooler
-
#forward Transformers::DebertaV2::DebertaV2Output
-
#forward Transformers::XlmRoberta::XLMRobertaSelfOutput
-
#forward Transformers::DebertaV2::DebertaV2SelfOutput
-
#forward Transformers::XlmRoberta::XLMRobertaModel
-
#forward Transformers::Distilbert::DistilBertModel
-
#forward Transformers::Mpnet::MPNetPooler
-
#forward Transformers::Mpnet::MPNetAttention
-
#forward Transformers::Bert::BertSelfAttention
-
#forward Transformers::Mpnet::MPNetSelfAttention
-
#forward Transformers::XlmRoberta::XLMRobertaClassificationHead
-
#forward Transformers::Bert::BertOutput
-
#forward Transformers::Bert::BertSelfOutput
-
#forward Transformers::DebertaV2::DebertaV2Model
-
#forward Transformers::Bert::BertLayer
-
#forward Transformers::Bert::BertPredictionHeadTransform
-
#forward Transformers::Distilbert::Embeddings
-
#forward Transformers::DebertaV2::DebertaV2ForMultipleChoice
-
#forward Transformers::Mpnet::MPNetForQuestionAnswering
-
#forward Transformers::Distilbert::DistilBertForQuestionAnswering
-
#forward Transformers::DebertaV2::DebertaV2ForQuestionAnswering
-
#forward Transformers::DebertaV2::DebertaV2OnlyMLMHead
-
#forward Transformers::DebertaV2::StableDropout
-
#forward Transformers::DebertaV2::DebertaV2Attention
-
#forward Transformers::Bert::BertPooler
-
#forward Transformers::DebertaV2::DebertaV2Encoder
-
#forward Transformers::Bert::BertIntermediate
-
#forward Transformers::Bert::BertModel
-
#forward Transformers::Bert::BertLMPredictionHead
-
#forward Transformers::GELUActivation
-
#forward Transformers::DebertaV2::DebertaV2Embeddings
-
#forward Transformers::Mpnet::MPNetLMHead
-
#forward Transformers::XlmRoberta::XLMRobertaForMultipleChoice
-
#forward Transformers::Bert::BertForMaskedLM
-
#forward Transformers::Distilbert::DistilBertForMaskedLM
-
#forward Transformers::Mpnet::MPNetClassificationHead
-
#forward Transformers::Distilbert::Transformer
-
#forward Transformers::Vit::ViTForImageClassification
-
#forward Transformers::Vit::ViTPooler
-
#forward Transformers::Vit::ViTModel
-
#forward Transformers::Vit::ViTEncoder
-
#forward Transformers::Vit::ViTLayer
-
#forward Transformers::XlmRoberta::XLMRobertaPooler
-
#forward Transformers::Mpnet::MPNetLayer
-
#forward Transformers::Mpnet::MPNetEncoder
-
#forward Transformers::Distilbert::FFN
-
#forward Transformers::Vit::ViTAttention
-
#forward Transformers::Vit::ViTIntermediate
-
#forward Transformers::Vit::ViTOutput
-
#forward Transformers::XlmRoberta::XLMRobertaForTokenClassification
-
#forward Transformers::Vit::ViTSelfAttention
-
#forward Transformers::Vit::ViTSelfOutput
-
#forward Transformers::Vit::ViTPatchEmbeddings
-
#forward Transformers::Vit::ViTEmbeddings
-
#forward Transformers::XlmRoberta::XLMRobertaSdpaSelfAttention
-
#forward Transformers::DebertaV2::DebertaV2ForSequenceClassification
-
#forward Transformers::Mpnet::MPNetForMultipleChoice
-
#forward Transformers::Bert::BertForSequenceClassification
-
#forward Transformers::Mpnet::MPNetEmbeddings
-
#forward Transformers::DebertaV2::DisentangledSelfAttention
-
#forward Transformers::DebertaV2::DebertaV2ForMaskedLM
-
#forward Transformers::XlmRoberta::XLMRobertaForQuestionAnswering
-
#forward Transformers::Bert::BertEmbeddings
-
#forward Transformers::Mpnet::MPNetForMaskedLM
-
#forward Transformers::Distilbert::MultiHeadSelfAttention
-
#forward Transformers::XlmRoberta::XLMRobertaLMHead
-
#forward Transformers::XlmRoberta::XLMRobertaEncoder
-
#forward Transformers::XlmRoberta::XLMRobertaEmbeddings
-
#forward Transformers::Bert::BertAttention
-
#forward Transformers::DebertaV2::DebertaV2ForTokenClassification
-
#framework Transformers::PreTrainedModel
-
from_dict Transformers::PretrainedConfig
-
from_dict Transformers::ImageProcessingMixin
-
from_pretrained Transformers::PretrainedConfig
-
from_pretrained Transformers::ImageProcessingMixin
-
from_pretrained Transformers::AutoConfig
-
from_pretrained Transformers::PreTrainedModel
-
from_pretrained Transformers::AutoImageProcessor
-
from_pretrained Transformers::AutoTokenizer
-
from_pretrained Transformers::PreTrainedTokenizerBase
-
from_pretrained Transformers::BaseAutoModelClass
-
#gather_pre_entities Transformers::TokenClassificationPipeline
-
get_activation Transformers::Activations
-
#get_attention_mask Transformers::DebertaV2::DebertaV2Encoder
-
get_channel_dimension_axis Transformers::ImageUtils
-
get_config_dict Transformers::PretrainedConfig
-
#get_context Transformers::DebertaV2::StableDropout
-
get_default_model_and_revision Transformers::Pipelines
-
#get_extended_attention_mask Transformers::ModuleUtilsMixin
-
#get_head_mask Transformers::ModuleUtilsMixin
-
get_hf_file_metadata Transformers::HfHub
-
get_image_processor_dict Transformers::ImageProcessingMixin
-
#get_indices Transformers::QuestionAnsweringPipeline
-
#get_input_embeddings Transformers::Distilbert::DistilBertModel
-
#get_input_embeddings Transformers::DebertaV2::DebertaV2Model
-
#get_input_embeddings Transformers::XlmRoberta::XLMRobertaModel
-
#get_input_embeddings Transformers::PreTrainedModel
-
#get_input_embeddings Transformers::Mpnet::MPNetModel
-
#get_input_embeddings Transformers::DebertaV2::DebertaV2ForMultipleChoice
-
#get_input_embeddings Transformers::DebertaV2::DebertaV2ForSequenceClassification
-
#get_iterator Transformers::Pipeline
-
#get_mask Transformers::DebertaV2
-
#get_output_embeddings Transformers::XlmRoberta::XLMRobertaForCausalLM
-
#get_output_embeddings Transformers::XlmRoberta::XLMRobertaForMaskedLM
-
#get_output_embeddings Transformers::Mpnet::MPNetForMaskedLM
-
#get_output_embeddings Transformers::PreTrainedModel
-
#get_output_embeddings Transformers::DebertaV2::DebertaV2ForMaskedLM
-
#get_position_embeddings Transformers::Distilbert::DistilBertModel
-
#get_rel_embedding Transformers::DebertaV2::DebertaV2Encoder
-
#get_rel_pos Transformers::DebertaV2::DebertaV2Encoder
-
get_size_dict Transformers::ImageProcessingUtils
-
#get_special_tokens_mask Transformers::DebertaV2::DebertaV2TokenizerFast
-
#get_supported_tasks Transformers::PipelineRegistry
-
#get_tag Transformers::TokenClassificationPipeline
-
get_token_to_send Transformers::HfHub
-
#get_vocab Transformers::PreTrainedTokenizerFast
-
#getattr Transformers::PretrainedConfig
-
#group_entities Transformers::TokenClassificationPipeline
-
#group_sub_entities Transformers::TokenClassificationPipeline
-
has_file Transformers::Utils::Hub
-
#hasattr Transformers::PretrainedConfig
-
hf_hub_download Transformers::HfHub
-
hf_hub_url Transformers::HfHub
-
hf_raise_for_status Transformers::HfHub
-
#hidden_act Transformers::XlmRoberta::XLMRobertaConfig
-
#hidden_act Transformers::Vit::ViTConfig
-
#hidden_act Transformers::Bert::BertConfig
-
#hidden_act Transformers::DebertaV2::DebertaV2Config
-
#hidden_act Transformers::Mpnet::MPNetConfig
-
#hidden_dim Transformers::Distilbert::DistilBertConfig
-
#hidden_dropout_prob Transformers::Bert::BertConfig
-
#hidden_dropout_prob Transformers::Mpnet::MPNetConfig
-
#hidden_dropout_prob Transformers::Vit::ViTConfig
-
#hidden_dropout_prob Transformers::DebertaV2::DebertaV2Config
-
#hidden_dropout_prob Transformers::XlmRoberta::XLMRobertaConfig
-
#hidden_size Transformers::Mpnet::MPNetConfig
-
#hidden_size Transformers::Bert::BertConfig
-
#hidden_size Transformers::Vit::ViTConfig
-
#hidden_size Transformers::XlmRoberta::XLMRobertaConfig
-
#hidden_size Transformers::DebertaV2::DebertaV2Config
-
http_get Transformers::HfHub
-
http_user_agent Transformers::Utils::Hub
-
#id2label Transformers::PretrainedConfig
-
#image_size Transformers::Vit::ViTConfig
-
#include? Transformers::LazyAutoMapping
-
#include? Transformers::BatchEncoding
-
infer_channel_dimension_format Transformers::ImageUtils
-
infer_framework Transformers::Utils
-
infer_framework_load_model Transformers::Pipelines
-
#init_context Transformers::DebertaV2::StableDropout
-
#init_kwargs Transformers::PreTrainedTokenizerBase
-
#init_weights Transformers::PreTrainedModel
-
#initialize Transformers::Bert::BertSelfOutput
-
#initialize Transformers::Bert::BertSelfAttention
-
#initialize Transformers::Bert::BertAttention
-
#initialize Transformers::Bert::BertEmbeddings
-
#initialize Transformers::LazyAutoMapping
-
#initialize Transformers::DebertaV2::DebertaV2ForMaskedLM
-
#initialize Transformers::Bert::BertIntermediate
-
#initialize Transformers::BatchFeature
-
#initialize Transformers::PreTrainedTokenizerFast
-
#initialize Transformers::PreTrainedTokenizerBase
-
#initialize Transformers::DebertaV2::DebertaV2ForMultipleChoice
-
#initialize Transformers::DebertaV2::DebertaV2Model
-
#initialize Transformers::SpecialTokensMixin
-
#initialize Transformers::BatchEncoding
-
#initialize Transformers::Vit::ViTForImageClassification
-
#initialize Transformers::Vit::ViTModel
-
#initialize Transformers::Vit::ViTLayer
-
#initialize Transformers::Vit::ViTEncoder
-
#initialize Transformers::Vit::ViTPooler
-
#initialize Transformers::Distilbert::DistilBertForQuestionAnswering
-
#initialize Transformers::Vit::ViTIntermediate
-
#initialize Transformers::Vit::ViTAttention
-
#initialize Transformers::Vit::ViTSelfOutput
-
#initialize Transformers::Vit::ViTPatchEmbeddings
-
#initialize Transformers::Vit::ViTSelfAttention
-
#initialize Transformers::Vit::ViTOutput
-
#initialize Transformers::XlmRoberta::XLMRobertaClassificationHead
-
#initialize Transformers::Vit::ViTEmbeddings
-
#initialize Transformers::Distilbert::DistilBertTokenizer::WordpieceTokenizer
-
#initialize Transformers::BaseImageProcessor
-
#initialize Transformers::XlmRoberta::XLMRobertaForSequenceClassification
-
#initialize Transformers::SquadFeatures
-
#initialize Transformers::SentenceTransformer
-
#initialize Transformers::SquadExample
-
#initialize Transformers::ConvertSlowTokenizer::Converter
-
#initialize Transformers::Distilbert::DistilBertForSequenceClassification
-
#initialize Transformers::DebertaV2::DebertaV2Layer
-
#initialize Transformers::HfHub::HfFileMetadata
-
#initialize Transformers::DebertaV2::DebertaV2OnlyMLMHead
-
#initialize Transformers::Distilbert::DistilBertTokenizer::BasicTokenizer
-
#initialize Transformers::XlmRoberta::XLMRobertaForMultipleChoice
-
#initialize Transformers::DebertaV2::DebertaV2Config
-
#initialize Transformers::XlmRoberta::XLMRobertaForTokenClassification
-
#initialize Transformers::HfHub::HfHubHTTPError
-
#initialize Transformers::XlmRoberta::XLMRobertaEncoder
-
#initialize Transformers::XlmRoberta::XLMRobertaIntermediate
-
#initialize Transformers::PretrainedConfig
-
#initialize Transformers::DebertaV2::DebertaV2TokenizerFast
-
#initialize Transformers::XlmRoberta::XLMRobertaSelfAttention
-
#initialize Transformers::XlmRoberta::XLMRobertaLayer
-
#initialize Transformers::Distilbert::DistilBertConfig
-
#initialize Transformers::DebertaV2::DebertaV2Embeddings
-
#initialize Transformers::DebertaV2::DisentangledSelfAttention
-
#initialize Transformers::XlmRoberta::XLMRobertaEmbeddings
-
#initialize Transformers::DebertaV2::DebertaV2ForTokenClassification
-
#initialize Transformers::DebertaV2::DebertaV2PredictionHeadTransform
-
#initialize Transformers::PreTrainedTokenizer
-
#initialize Transformers::Distilbert::DistilBertTokenizer
-
#initialize Transformers::Distilbert::MultiHeadSelfAttention
-
#initialize Transformers::PipelineDataset
-
#initialize Transformers::QuestionAnsweringPipeline
-
#initialize Transformers::Vit::ViTConfig
-
#initialize Transformers::XlmRoberta::XLMRobertaSdpaSelfAttention
-
#initialize Transformers::XlmRoberta::XLMRobertaOutput
-
#initialize Transformers::Distilbert::DistilBertModel
-
#initialize Transformers::XlmRoberta::XLMRobertaModel
-
#initialize Transformers::Mpnet::MPNetClassificationHead
-
#initialize Transformers::Mpnet::MPNetForQuestionAnswering
-
#initialize Transformers::Mpnet::MPNetForSequenceClassification
-
#initialize Transformers::Mpnet::MPNetForMultipleChoice
-
#initialize Transformers::Mpnet::MPNetLMHead
-
#initialize Transformers::Mpnet::MPNetForTokenClassification
-
#initialize Transformers::Mpnet::MPNetForMaskedLM
-
#initialize Transformers::Mpnet::MPNetPooler
-
#initialize Transformers::Mpnet::MPNetModel
-
#initialize Transformers::Mpnet::MPNetLayer
-
#initialize Transformers::Mpnet::MPNetEncoder
-
#initialize Transformers::Mpnet::MPNetIntermediate
-
#initialize Transformers::Mpnet::MPNetAttention
-
#initialize Transformers::Mpnet::MPNetSelfAttention
-
#initialize Transformers::Mpnet::MPNetEmbeddings
-
#initialize Transformers::Mpnet::MPNetOutput
-
#initialize Transformers::Bert::BertForMaskedLM
-
#initialize Transformers::Bert::BertForTokenClassification
-
#initialize Transformers::Bert::BertModel
-
#initialize Transformers::Bert::BertPredictionHeadTransform
-
#initialize Transformers::Bert::BertLMPredictionHead
-
#initialize Transformers::Bert::BertEncoder
-
#initialize Transformers::Bert::BertPooler
-
#initialize Transformers::Bert::BertOutput
-
#initialize Transformers::Bert::BertLayer
-
#initialize Transformers::Bert::BertOnlyMLMHead
-
#initialize Transformers::Distilbert::DistilBertTokenizerFast
-
#initialize Transformers::XlmRoberta::XLMRobertaForMaskedLM
-
#initialize Transformers::ImageClassificationPipeline
-
#initialize Transformers::Bert::BertConfig
-
#initialize Transformers::TokenClassificationPipeline
-
#initialize Transformers::XlmRoberta::XLMRobertaSelfOutput
-
#initialize Transformers::Distilbert::DistilBertForMaskedLM
-
#initialize Transformers::Distilbert::TransformerBlock
-
#initialize Transformers::XlmRoberta::XLMRobertaTokenizerFast
-
#initialize Transformers::Bert::BertTokenizer
-
#initialize Transformers::TextClassificationPipeline
-
#initialize Transformers::Bert::BertTokenizer::BasicTokenizer
-
#initialize Transformers::Bert::BertTokenizer::WordpieceTokenizer
-
#initialize Transformers::LazyConfigMapping
-
#initialize Transformers::Bert::BertTokenizerFast
-
#initialize Transformers::Vit::ViTImageProcessor
-
#initialize Transformers::XlmRoberta::XLMRobertaConfig
-
#initialize Transformers::Mpnet::MPNetConfig
-
#initialize Transformers::Distilbert::Transformer
-
#initialize Transformers::DebertaV2::DebertaV2LMPredictionHead
-
#initialize Transformers::DebertaV2::DebertaV2Output
-
#initialize Transformers::Bert::BertForSequenceClassification
-
#initialize Transformers::XlmRoberta::XLMRobertaPooler
-
#initialize Transformers::PipelineRegistry
-
#initialize Transformers::XlmRoberta::XLMRobertaForCausalLM
-
#initialize Transformers::Pipeline
-
#initialize Transformers::PipelineIterator
-
#initialize Transformers::DebertaV2::DebertaV2ForSequenceClassification
-
#initialize Transformers::Distilbert::Embeddings
-
#initialize Transformers::PreTrainedModel
-
#initialize Transformers::XlmRoberta::XLMRobertaLMHead
-
#initialize Transformers::ModelOutput
-
#initialize Transformers::ExplicitEnum
-
#initialize Transformers::XlmRoberta::XLMRobertaAttention
-
#initialize Transformers::DebertaV2::DebertaV2ForQuestionAnswering
-
#initialize Transformers::GELUActivation
-
#initialize Transformers::ClassInstantier
-
#initialize Transformers::DebertaV2::ConvLayer
-
#initialize Transformers::DebertaV2::DebertaV2Encoder
-
#initialize Transformers::DebertaV2::DropoutContext
-
#initialize Transformers::XlmRoberta::XLMRobertaForQuestionAnswering
-
#initialize Transformers::DebertaV2::ContextPooler
-
#initialize Transformers::DebertaV2::StableDropout
-
#initialize Transformers::Mpnet::MPNetTokenizerFast
-
#initialize Transformers::DebertaV2::DebertaV2SelfOutput
-
#initialize Transformers::DebertaV2::DebertaV2Attention
-
#initialize Transformers::DebertaV2::DebertaV2Intermediate
-
#initialize Transformers::Distilbert::FFN
-
#initializer_range Transformers::Mpnet::MPNetConfig
-
#initializer_range Transformers::Bert::BertConfig
-
#initializer_range Transformers::XlmRoberta::XLMRobertaConfig
-
#initializer_range Transformers::Vit::ViTConfig
-
#initializer_range Transformers::Distilbert::DistilBertConfig
-
#initializer_range Transformers::DebertaV2::DebertaV2Config
-
#intermediate_size Transformers::Mpnet::MPNetConfig
-
#intermediate_size Transformers::Bert::BertConfig
-
#intermediate_size Transformers::Vit::ViTConfig
-
#intermediate_size Transformers::DebertaV2::DebertaV2Config
-
#intermediate_size Transformers::XlmRoberta::XLMRobertaConfig
-
#is_decoder Transformers::PretrainedConfig
-
#is_encoder_decoder Transformers::PretrainedConfig
-
#is_fast Transformers::PreTrainedTokenizerFast
-
#is_fast Transformers::PreTrainedTokenizer
-
is_numo_array Transformers::Utils
-
is_offline_mode Transformers::Utils::Hub
-
is_remote_url Transformers::Utils::Hub
-
is_scaled_image Transformers::ImageUtils
-
is_torch_device Transformers::Utils
-
is_torch_tensor Transformers::Utils
-
is_valid_image Transformers::ImageUtils
-
is_vips_image Transformers::ImageUtils
-
is_vision_available Transformers::Utils
-
#isin Transformers::QuestionAnsweringPipeline
-
#items Transformers::BatchFeature
-
#items Transformers::BatchEncoding
-
#keys Transformers::BatchFeature
-
#layer_norm_eps Transformers::Mpnet::MPNetConfig
-
#layer_norm_eps Transformers::Bert::BertConfig
-
#layer_norm_eps Transformers::Vit::ViTConfig
-
#layer_norm_eps Transformers::DebertaV2::DebertaV2Config
-
#layer_norm_eps Transformers::XlmRoberta::XLMRobertaConfig
-
load_image Transformers::ImageUtils
-
#location Transformers::HfHub::HfFileMetadata
-
logger Transformers
-
make_list_of_images Transformers::ImageUtils
-
make_log_bucket_position Transformers::DebertaV2
-
#mask_token Transformers::Mpnet::MPNetTokenizerFast
-
#mask_token= Transformers::Mpnet::MPNetTokenizerFast
-
#max_position_embeddings Transformers::Mpnet::MPNetConfig
-
#max_position_embeddings Transformers::Bert::BertConfig
-
#max_position_embeddings Transformers::Distilbert::DistilBertConfig
-
#max_position_embeddings Transformers::DebertaV2::DebertaV2Config
-
#max_position_embeddings Transformers::XlmRoberta::XLMRobertaConfig
-
#max_relative_positions Transformers::DebertaV2::DebertaV2Config
-
#message Transformers::Todo
-
#method_missing Transformers::PretrainedConfig
-
#model_max_length Transformers::PreTrainedTokenizerBase
-
#model_type_to_module_name Transformers::LazyConfigMapping
-
#n_heads Transformers::Distilbert::DistilBertConfig
-
#n_layers Transformers::Distilbert::DistilBertConfig
-
#name_or_path Transformers::PretrainedConfig
-
#name_or_path= Transformers::PretrainedConfig
-
netloc Transformers::HfHub
-
#never_split Transformers::Bert::BertTokenizer::BasicTokenizer
-
no_init_weights Transformers::ModelingUtils
-
#normalize Transformers::QuestionAnsweringArgumentHandler
-
#normalize Transformers::BaseImageProcessor
-
normalize Transformers::ImageTransforms
-
#num_attention_heads Transformers::Mpnet::MPNetConfig
-
#num_attention_heads Transformers::Bert::BertConfig
-
#num_attention_heads Transformers::Vit::ViTConfig
-
#num_attention_heads Transformers::XlmRoberta::XLMRobertaConfig
-
#num_attention_heads Transformers::DebertaV2::DebertaV2Config
-
#num_channels Transformers::Vit::ViTConfig
-
#num_hidden_layers Transformers::Mpnet::MPNetConfig
-
#num_hidden_layers Transformers::DebertaV2::DebertaV2Config
-
#num_hidden_layers Transformers::Bert::BertConfig
-
#num_hidden_layers Transformers::Vit::ViTConfig
-
#num_hidden_layers Transformers::XlmRoberta::XLMRobertaConfig
-
#num_labels Transformers::PretrainedConfig
-
#num_labels= Transformers::PretrainedConfig
-
#num_patches Transformers::Vit::ViTPatchEmbeddings
-
#output_attentions Transformers::PretrainedConfig
-
#output_dim Transformers::DebertaV2::ContextPooler
-
#output_hidden_states Transformers::PretrainedConfig
-
p2c_dynamic_expand Transformers::DebertaV2
-
#pad_token_id Transformers::Mpnet::MPNetConfig
-
#pad_token_id Transformers::SpecialTokensMixin
-
#pad_token_id Transformers::PretrainedConfig
-
#pad_token_id Transformers::Distilbert::DistilBertConfig
-
#pad_token_id Transformers::DebertaV2::DebertaV2Config
-
#pad_token_id Transformers::XlmRoberta::XLMRobertaConfig
-
parents Transformers::HfHub
-
#patch_size Transformers::Vit::ViTConfig
-
pipeline Transformers
-
#pooler_dropout Transformers::DebertaV2::DebertaV2Config
-
#pooler_hidden_act Transformers::DebertaV2::DebertaV2Config
-
#pooler_hidden_size Transformers::DebertaV2::DebertaV2Config
-
#pos_att_type Transformers::DebertaV2::DebertaV2Config
-
pos_dynamic_expand Transformers::DebertaV2
-
#position_biased_input Transformers::DebertaV2::DebertaV2Config
-
#position_embedding_type Transformers::Bert::BertConfig
-
#position_embedding_type Transformers::XlmRoberta::XLMRobertaConfig
-
#post_init Transformers::PreTrainedModel
-
#postprocess Transformers::TokenClassificationPipeline
-
#postprocess Transformers::ImageClassificationPipeline
-
#postprocess Transformers::QuestionAnsweringPipeline
-
#postprocess Transformers::FeatureExtractionPipeline
-
#postprocess Transformers::RerankingPipeline
-
#postprocess Transformers::EmbeddingPipeline
-
#postprocess Transformers::ImageFeatureExtractionPipeline
-
#prepare_inputs_for_generation Transformers::XlmRoberta::XLMRobertaForCausalLM
-
#preprocess Transformers::Vit::ViTImageProcessor
-
#preprocess Transformers::ImageFeatureExtractionPipeline
-
#preprocess Transformers::TokenClassificationPipeline
-
#preprocess Transformers::ImageClassificationPipeline
-
#preprocess Transformers::QuestionAnsweringPipeline
-
#preprocess Transformers::FeatureExtractionPipeline
-
#preprocess Transformers::BaseImageProcessor
-
#preprocess Transformers::RerankingPipeline
-
#preprocess Transformers::EmbeddingPipeline
-
#problem_type Transformers::PretrainedConfig
-
#prune_heads Transformers::Mpnet::MPNetAttention
-
#prune_heads Transformers::XlmRoberta::XLMRobertaAttention
-
#prune_heads Transformers::Vit::ViTAttention
-
#prune_heads Transformers::PreTrainedModel
-
#prune_heads Transformers::Distilbert::MultiHeadSelfAttention
-
#pruned_heads Transformers::PretrainedConfig
-
#qa_dropout Transformers::Distilbert::DistilBertConfig
-
#qkv_bias Transformers::Vit::ViTConfig
-
#question_text Transformers::SquadExample
-
#relative_attention Transformers::DebertaV2::DebertaV2Config
-
#relative_attention_num_buckets Transformers::Mpnet::MPNetConfig
-
relative_position_bucket Transformers::Mpnet::MPNetEncoder
-
repo_folder_name Transformers::HfHub
-
requires_backends Transformers::Utils
-
#rescale Transformers::BaseImageProcessor
-
rescale Transformers::ImageTransforms
-
#resize Transformers::Vit::ViTImageProcessor
-
resize Transformers::ImageTransforms
-
resolve_trust_remote_code Transformers::DynamicModuleUtils
-
#respond_to_missing? Transformers::PretrainedConfig
-
#run_single Transformers::ChunkPipeline
-
#save_vocabulary Transformers::Distilbert::DistilBertTokenizerFast
-
#save_vocabulary Transformers::Mpnet::MPNetTokenizerFast
-
#select_starts_ends Transformers::QuestionAnsweringPipeline
-
#sep_token_id Transformers::SpecialTokensMixin
-
#seq_classif_dropout Transformers::Distilbert::DistilBertConfig
-
#sequence_ids Transformers::BatchEncoding
-
#set_input_embeddings Transformers::Mpnet::MPNetModel
-
#set_input_embeddings Transformers::PreTrainedModel
-
#set_input_embeddings Transformers::DebertaV2::DebertaV2ForMultipleChoice
-
#set_input_embeddings Transformers::DebertaV2::DebertaV2Model
-
#set_input_embeddings Transformers::DebertaV2::DebertaV2ForSequenceClassification
-
#set_input_embeddings Transformers::XlmRoberta::XLMRobertaModel
-
#set_output_embeddings Transformers::Mpnet::MPNetForMaskedLM
-
#set_output_embeddings Transformers::DebertaV2::DebertaV2ForMaskedLM
-
#set_output_embeddings Transformers::XlmRoberta::XLMRobertaForMaskedLM
-
#set_output_embeddings Transformers::XlmRoberta::XLMRobertaForCausalLM
-
#sinusoidal_pos_embds Transformers::Distilbert::DistilBertConfig
-
#size Transformers::HfHub::HfFileMetadata
-
#size Transformers::PipelineIterator
-
#size Transformers::PipelineDataset
-
#special_tokens_map Transformers::SpecialTokensMixin
-
#strip_accents Transformers::Bert::BertTokenizer::BasicTokenizer
-
#strip_accents Transformers::Distilbert::DistilBertTokenizer::BasicTokenizer
-
#tie_encoder_decoder Transformers::PretrainedConfig
-
#tie_weights Transformers::PreTrainedModel
-
#tie_word_embeddings Transformers::PretrainedConfig
-
#to Transformers::BatchFeature
-
to_channel_dimension_format Transformers::ImageTransforms
-
#to_dict Transformers::PretrainedConfig
-
#to_diff_dict Transformers::PretrainedConfig
-
#to_h Transformers::BatchFeature
-
#to_h Transformers::BatchEncoding
-
#to_json_string Transformers::PretrainedConfig
-
to_numo_array Transformers::ImageUtils
-
to_pil_image Transformers::ImageTransforms
-
#to_s Transformers::PretrainedConfig
-
#to_s Transformers::ExplicitEnum
-
#to_tuple Transformers::ModelOutput
-
#tokenize Transformers::PreTrainedTokenizer
-
#tokenize_chinese_chars Transformers::Bert::BertTokenizer::BasicTokenizer
-
#tokenize_chinese_chars Transformers::Distilbert::DistilBertTokenizer::BasicTokenizer
-
#tokenizer_class Transformers::PretrainedConfig
-
#torch_dtype Transformers::Pipeline
-
#transpose_for_scores Transformers::Mpnet::MPNetSelfAttention
-
#transpose_for_scores Transformers::Bert::BertSelfAttention
-
#transpose_for_scores Transformers::Vit::ViTSelfAttention
-
#transpose_for_scores Transformers::XlmRoberta::XLMRobertaSelfAttention
-
#transpose_for_scores Transformers::DebertaV2::DisentangledSelfAttention
-
try_to_load_from_cache Transformers::HfHub
-
#type_vocab_size Transformers::Bert::BertConfig
-
#type_vocab_size Transformers::XlmRoberta::XLMRobertaConfig
-
#type_vocab_size Transformers::DebertaV2::DebertaV2Config
-
#unk_token_id Transformers::SpecialTokensMixin
-
#unravel_index Transformers::QuestionAnsweringPipeline
-
#use_cache Transformers::Bert::BertConfig
-
#use_cache Transformers::XlmRoberta::XLMRobertaConfig
-
#use_return_dict Transformers::PretrainedConfig
-
valid_images Transformers::ImageUtils
-
validate_kwargs Transformers::ImageUtils
-
validate_preprocess_arguments Transformers::ImageUtils
-
#values Transformers::BatchFeature
-
#vocab Transformers::Bert::BertTokenizer
-
#vocab Transformers::PreTrainedTokenizerFast
-
#vocab Transformers::Distilbert::DistilBertTokenizer
-
#vocab_size Transformers::Mpnet::MPNetConfig
-
#vocab_size Transformers::Bert::BertConfig
-
#vocab_size Transformers::PreTrainedTokenizer
-
#vocab_size Transformers::DebertaV2::DebertaV2Config
-
#vocab_size Transformers::XlmRoberta::XLMRobertaConfig
-
#vocab_size Transformers::Distilbert::DistilBertConfig
-
#warn_if_padding_and_no_attention_mask Transformers::PreTrainedModel