Class: RubyLLM::StreamAccumulator

Inherits:
Object
  • Object
show all
Defined in:
lib/ruby_llm/stream_accumulator.rb

Overview

Assembles streaming responses from LLMs into complete messages.

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initializeStreamAccumulator

Returns a new instance of StreamAccumulator.



8
9
10
11
12
13
14
15
16
17
18
19
20
21
# File 'lib/ruby_llm/stream_accumulator.rb', line 8

def initialize
  @content = +''
  @thinking_text = +''
  @thinking_signature = nil
  @tool_calls = {}
  @input_tokens = nil
  @output_tokens = nil
  @cached_tokens = nil
  @cache_creation_tokens = nil
  @thinking_tokens = nil
  @inside_think_tag = false
  @pending_think_tag = +''
  @latest_tool_call_id = nil
end

Instance Attribute Details

#contentObject (readonly)

Returns the value of attribute content.



6
7
8
# File 'lib/ruby_llm/stream_accumulator.rb', line 6

def content
  @content
end

#model_idObject (readonly)

Returns the value of attribute model_id.



6
7
8
# File 'lib/ruby_llm/stream_accumulator.rb', line 6

def model_id
  @model_id
end

#tool_callsObject (readonly)

Returns the value of attribute tool_calls.



6
7
8
# File 'lib/ruby_llm/stream_accumulator.rb', line 6

def tool_calls
  @tool_calls
end

Instance Method Details

#add(chunk) ⇒ Object



23
24
25
26
27
28
29
30
31
# File 'lib/ruby_llm/stream_accumulator.rb', line 23

def add(chunk)
  RubyLLM.logger.debug chunk.inspect if RubyLLM.config.log_stream_debug
  @model_id ||= chunk.model_id

  handle_chunk_content(chunk)
  append_thinking_from_chunk(chunk)
  count_tokens chunk
  RubyLLM.logger.debug inspect if RubyLLM.config.log_stream_debug
end

#to_message(response) ⇒ Object



33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# File 'lib/ruby_llm/stream_accumulator.rb', line 33

def to_message(response)
  Message.new(
    role: :assistant,
    content: content.empty? ? nil : content,
    thinking: Thinking.build(
      text: @thinking_text.empty? ? nil : @thinking_text,
      signature: @thinking_signature
    ),
    tokens: Tokens.build(
      input: @input_tokens,
      output: @output_tokens,
      cached: @cached_tokens,
      cache_creation: @cache_creation_tokens,
      thinking: @thinking_tokens
    ),
    model_id: model_id,
    tool_calls: tool_calls_from_stream,
    raw: response
  )
end