Class: Regexp::Lexer
- Inherits:
-
Object
- Object
- Regexp::Lexer
- Defined in:
- lib/regexp_parser/lexer.rb
Overview
A very thin wrapper around the scanner that breaks quantified literal runs, collects emitted tokens into an array, calculates their nesting depth, and normalizes tokens for the parser, and checks if they are implemented by the given syntax flavor.
Constant Summary collapse
- OPENING_TOKENS =
%i[ capture passive lookahead nlookahead lookbehind nlookbehind atomic options options_switch named absence open ].freeze
- CLOSING_TOKENS =
%i[close].freeze
- CONDITION_TOKENS =
%i[condition condition_close].freeze
Class Method Summary collapse
Instance Method Summary collapse
- #emit(token) ⇒ Object
- #lex(input, syntax = nil, options: nil, collect_tokens: true, &block) ⇒ Object
Class Method Details
.lex(input, syntax = nil, options: nil, collect_tokens: true, &block) ⇒ Object Also known as: scan
16 17 18 |
# File 'lib/regexp_parser/lexer.rb', line 16 def self.lex(input, syntax = nil, options: nil, collect_tokens: true, &block) new.lex(input, syntax, options: , collect_tokens: collect_tokens, &block) end |
Instance Method Details
#emit(token) ⇒ Object
71 72 73 74 75 76 77 78 79 |
# File 'lib/regexp_parser/lexer.rb', line 71 def emit(token) if block # TODO: in v3.0.0, remove `collect_tokens:` kwarg and only collect w/o block res = block.call(token) tokens << res if collect_tokens else tokens << token end end |
#lex(input, syntax = nil, options: nil, collect_tokens: true, &block) ⇒ Object
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
# File 'lib/regexp_parser/lexer.rb', line 20 def lex(input, syntax = nil, options: nil, collect_tokens: true, &block) syntax = syntax ? Regexp::Syntax.for(syntax) : Regexp::Syntax::CURRENT self.block = block self.collect_tokens = collect_tokens self.tokens = [] self.prev_token = nil self.preprev_token = nil self.nesting = 0 self.set_nesting = 0 self.conditional_nesting = 0 self.shift = 0 Regexp::Scanner.scan(input, options: , collect_tokens: false) do |type, token, text, ts, te| type, token = *syntax.normalize(type, token) syntax.check! type, token ascend(type, token) if (last = prev_token) && type == :quantifier && ( (last.type == :literal && (parts = break_literal(last))) || (last.token == :codepoint_list && (parts = break_codepoint_list(last))) ) emit(parts[0]) last = parts[1] end current = Regexp::Token.new(type, token, text, ts + shift, te + shift, nesting, set_nesting, conditional_nesting) if type == :conditional && CONDITION_TOKENS.include?(token) current = merge_condition(current, last) elsif last last.next = current current.previous = last emit(last) end self.preprev_token = last self.prev_token = current descend(type, token) end emit(prev_token) if prev_token collect_tokens ? tokens : nil end |