-
-
-
#decode
Tokenizers::CharBPETokenizer
-
-
-
-
-
#encode
Tokenizers::CharBPETokenizer
-
-
-
-
-
-
-
-
new
Tokenizers::PreTokenizers::Punctuation
-
new
Tokenizers::Trainers::WordPieceTrainer
-
new
Tokenizers::Models::BPE
-
new
Tokenizers::Decoders::CTC
-
new
Tokenizers::AddedToken
-
new
Tokenizers::Decoders::Strip
-
new
Tokenizers::Models::Unigram
-
new
Tokenizers::Models::WordLevel
-
new
Tokenizers::Processors::TemplateProcessing
-
new
Tokenizers::Models::WordPiece
-
new
Tokenizers::Processors::RobertaProcessing
-
new
Tokenizers::Normalizers::BertNormalizer
-
new
Tokenizers::Trainers::WordLevelTrainer
-
new
Tokenizers::Normalizers::Strip
-
new
Tokenizers::Decoders::Metaspace
-
new
Tokenizers::Decoders::WordPiece
-
new
Tokenizers::Normalizers::Prepend
-
new
Tokenizers::Decoders::BPEDecoder
-
new
Tokenizers::PreTokenizers::Split
-
new
Tokenizers::Trainers::BpeTrainer
-
new
Tokenizers::PreTokenizers::Digits
-
new
Tokenizers::Processors::ByteLevel
-
new
Tokenizers::PreTokenizers::Metaspace
-
new
Tokenizers::Trainers::UnigramTrainer
-
new
Tokenizers::PreTokenizers::ByteLevel
-
#save
Tokenizers::Tokenizer
-
#to_s
Tokenizers::Tokenizer
-
-
-
-