Class: Langchain::Assistant

Inherits:
Object
  • Object
show all
Defined in:
lib/langchain/assistant.rb,
lib/langchain/assistant/llm/adapter.rb,
lib/langchain/assistant/messages/base.rb,
lib/langchain/assistant/llm/adapters/base.rb,
lib/langchain/assistant/llm/adapters/ollama.rb,
lib/langchain/assistant/llm/adapters/openai.rb,
lib/langchain/assistant/llm/adapters/anthropic.rb,
lib/langchain/assistant/llm/adapters/mistral_ai.rb,
lib/langchain/assistant/messages/ollama_message.rb,
lib/langchain/assistant/messages/openai_message.rb,
lib/langchain/assistant/llm/adapters/google_gemini.rb,
lib/langchain/assistant/messages/anthropic_message.rb,
lib/langchain/assistant/messages/mistral_ai_message.rb,
lib/langchain/assistant/messages/google_gemini_message.rb,
lib/langchain/assistant/llm/adapters/aws_bedrock_anthropic.rb

Overview

Assistants are Agent-like objects that leverage helpful instructions, LLMs, tools and knowledge to respond to user queries. Assistants can be configured with an LLM of your choice, any vector search database and easily extended with additional tools.

Usage:

llm = Langchain::LLM::GoogleGemini.new(api_key: ENV["GOOGLE_GEMINI_API_KEY"])
assistant = Langchain::Assistant.new(
  llm: llm,
  instructions: "You're a News Reporter AI",
  tools: [Langchain::Tool::NewsRetriever.new(api_key: ENV["NEWS_API_KEY"])]
)

Defined Under Namespace

Modules: LLM, Messages

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(llm:, tools: [], instructions: nil, tool_choice: "auto", parallel_tool_calls: true, messages: [], add_message_callback: nil, tool_execution_callback: nil, &block) ⇒ Assistant

Create a new assistant

Parameters:

  • llm (Langchain::LLM::Base)

    LLM instance that the assistant will use

  • tools (Array<Langchain::Tool::Base>) (defaults to: [])

    Tools that the assistant has access to

  • instructions (String) (defaults to: nil)

    The system instructions

  • tool_choice (String) (defaults to: "auto")

    Specify how tools should be selected. Options: “auto”, “any”, “none”, or <specific function name>

  • parallel_tool_calls (Boolean) (defaults to: true)

    Whether or not to run tools in parallel

  • messages (Array<Langchain::Assistant::Messages::Base>) (defaults to: [])

    The messages

  • add_message_callback (Proc) (defaults to: nil)

    A callback function (Proc or lambda) that is called when any message is added to the conversation

  • tool_execution_callback (Proc) (defaults to: nil)

    A callback function (Proc or lambda) that is called right before a tool function is executed



40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# File 'lib/langchain/assistant.rb', line 40

def initialize(
  llm:,
  tools: [],
  instructions: nil,
  tool_choice: "auto",
  parallel_tool_calls: true,
  messages: [],
  # Callbacks
  add_message_callback: nil,
  tool_execution_callback: nil,
  &block
)
  unless tools.is_a?(Array) && tools.all? { |tool| tool.class.singleton_class.included_modules.include?(Langchain::ToolDefinition) }
    raise ArgumentError, "Tools must be an array of objects extending Langchain::ToolDefinition"
  end

  @llm = llm
  @llm_adapter = LLM::Adapter.build(llm)

  @add_message_callback = add_message_callback if validate_callback!("add_message_callback", add_message_callback)
  @tool_execution_callback = tool_execution_callback if validate_callback!("tool_execution_callback", tool_execution_callback)

  self.messages = messages
  @tools = tools
  @parallel_tool_calls = parallel_tool_calls
  self.tool_choice = tool_choice
  self.instructions = instructions
  @block = block
  @state = :ready

  @total_prompt_tokens = 0
  @total_completion_tokens = 0
  @total_tokens = 0
end

Instance Attribute Details

#add_message_callbackObject

Returns the value of attribute add_message_callback.



25
26
27
# File 'lib/langchain/assistant.rb', line 25

def add_message_callback
  @add_message_callback
end

#instructionsObject

Returns the value of attribute instructions.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def instructions
  @instructions
end

#llmObject (readonly)

Returns the value of attribute llm.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def llm
  @llm
end

#llm_adapterObject (readonly)

Returns the value of attribute llm_adapter.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def llm_adapter
  @llm_adapter
end

#messagesObject

Returns the value of attribute messages.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def messages
  @messages
end

#parallel_tool_callsObject

Returns the value of attribute parallel_tool_calls.



25
26
27
# File 'lib/langchain/assistant.rb', line 25

def parallel_tool_calls
  @parallel_tool_calls
end

#stateObject (readonly)

Returns the value of attribute state.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def state
  @state
end

#tool_choiceObject

Returns the value of attribute tool_choice.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def tool_choice
  @tool_choice
end

#tool_execution_callbackObject

Returns the value of attribute tool_execution_callback.



25
26
27
# File 'lib/langchain/assistant.rb', line 25

def tool_execution_callback
  @tool_execution_callback
end

#toolsObject

Returns the value of attribute tools.



25
26
27
# File 'lib/langchain/assistant.rb', line 25

def tools
  @tools
end

#total_completion_tokensObject (readonly)

Returns the value of attribute total_completion_tokens.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def total_completion_tokens
  @total_completion_tokens
end

#total_prompt_tokensObject (readonly)

Returns the value of attribute total_prompt_tokens.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def total_prompt_tokens
  @total_prompt_tokens
end

#total_tokensObject (readonly)

Returns the value of attribute total_tokens.



15
16
17
# File 'lib/langchain/assistant.rb', line 15

def total_tokens
  @total_tokens
end

Instance Method Details

#add_message(role: "user", content: nil, image_url: nil, tool_calls: [], tool_call_id: nil) ⇒ Array<Langchain::Message>

Add a user message to the messages array

Parameters:

  • role (String) (defaults to: "user")

    The role attribute of the message. Default: “user”

  • content (String) (defaults to: nil)

    The content of the message

  • image_url (String) (defaults to: nil)

    The URL of the image to include in the message

  • tool_calls (Array<Hash>) (defaults to: [])

    The tool calls to include in the message

  • tool_call_id (String) (defaults to: nil)

    The ID of the tool call to include in the message

Returns:

  • (Array<Langchain::Message>)

    The messages



83
84
85
86
87
88
89
90
91
92
93
94
95
# File 'lib/langchain/assistant.rb', line 83

def add_message(role: "user", content: nil, image_url: nil, tool_calls: [], tool_call_id: nil)
  message = build_message(role: role, content: content, image_url: image_url, tool_calls: tool_calls, tool_call_id: tool_call_id)

  # Call the callback with the message
  add_message_callback.call(message) if add_message_callback # rubocop:disable Style/SafeNavigation

  # Prepend the message to the messages array
  messages << message

  @state = :ready

  messages
end

#add_message_and_run(content: nil, image_url: nil, auto_tool_execution: false) ⇒ Array<Langchain::Message>

Add a user message and run the assistant

Parameters:

  • content (String) (defaults to: nil)

    The content of the message

  • auto_tool_execution (Boolean) (defaults to: false)

    Whether or not to automatically run tools

Returns:

  • (Array<Langchain::Message>)

    The messages



160
161
162
163
# File 'lib/langchain/assistant.rb', line 160

def add_message_and_run(content: nil, image_url: nil, auto_tool_execution: false)
  add_message(content: content, image_url: image_url, role: "user")
  run(auto_tool_execution: auto_tool_execution)
end

#add_message_and_run!(content: nil, image_url: nil) ⇒ Array<Langchain::Message>

Add a user message and run the assistant with automatic tool execution

Parameters:

  • content (String) (defaults to: nil)

    The content of the message

Returns:

  • (Array<Langchain::Message>)

    The messages



169
170
171
# File 'lib/langchain/assistant.rb', line 169

def add_message_and_run!(content: nil, image_url: nil)
  add_message_and_run(content: content, image_url: image_url, auto_tool_execution: true)
end

#add_messages(messages:) ⇒ Array<Langchain::Message>

Add multiple messages

Parameters:

  • messages (Array<Hash>)

    The messages to add

Returns:

  • (Array<Langchain::Message>)

    The messages



125
126
127
128
129
# File 'lib/langchain/assistant.rb', line 125

def add_messages(messages:)
  messages.each do |message_hash|
    add_message(**message_hash.slice(:content, :role, :tool_calls, :tool_call_id))
  end
end

#array_of_message_hashesArray<Hash>

Convert messages to an LLM APIs-compatible array of hashes

Returns:

  • (Array<Hash>)

    Messages as an OpenAI API-compatible array of hashes



100
101
102
103
104
# File 'lib/langchain/assistant.rb', line 100

def array_of_message_hashes
  messages
    .map(&:to_hash)
    .compact
end

#clear_messages!Array

Delete all messages

Returns:

  • (Array)

    Empty messages array



186
187
188
189
# File 'lib/langchain/assistant.rb', line 186

def clear_messages!
  # TODO: If this a bug? Should we keep the "system" message?
  @messages = []
end

#prompt_of_concatenated_messagesObject

Only used by the Assistant when it calls the LLM#complete() method



107
108
109
# File 'lib/langchain/assistant.rb', line 107

def prompt_of_concatenated_messages
  messages.map(&:to_s).join
end

#run(auto_tool_execution: false) ⇒ Array<Langchain::Message>

Run the assistant

Parameters:

  • auto_tool_execution (Boolean) (defaults to: false)

    Whether or not to automatically run tools

Returns:

  • (Array<Langchain::Message>)

    The messages



135
136
137
138
139
140
141
142
143
144
145
146
# File 'lib/langchain/assistant.rb', line 135

def run(auto_tool_execution: false)
  if messages.empty?
    Langchain.logger.warn("#{self.class} - No messages to process")
    @state = :completed
    return
  end

  @state = :in_progress
  @state = handle_state until run_finished?(auto_tool_execution)

  messages
end

#run!Array<Langchain::Message>

Run the assistant with automatic tool execution

Returns:

  • (Array<Langchain::Message>)

    The messages



151
152
153
# File 'lib/langchain/assistant.rb', line 151

def run!
  run(auto_tool_execution: true)
end

#submit_tool_output(tool_call_id:, output:) ⇒ Array<Langchain::Message>

Submit tool output

Parameters:

  • tool_call_id (String)

    The ID of the tool call to submit output for

  • output (String)

    The output of the tool

Returns:

  • (Array<Langchain::Message>)

    The messages



178
179
180
181
# File 'lib/langchain/assistant.rb', line 178

def submit_tool_output(tool_call_id:, output:)
  # TODO: Validate that `tool_call_id` is valid by scanning messages and checking if this tool call ID was invoked
  add_message(role: @llm_adapter.tool_role, content: output, tool_call_id: tool_call_id)
end