Class: Langchain::LLM::Anthropic

Inherits:
Base
  • Object
show all
Defined in:
lib/langchain/llm/anthropic.rb

Overview

Wrapper around Anthropic APIs.

Gem requirements:

gem "anthropic", "~> 0.3.2"

Usage:

llm = Langchain::LLM::Anthropic.new(api_key: ENV["ANTHROPIC_API_KEY"])

Constant Summary collapse

DEFAULTS =
{
  temperature: 0.0,
  completion_model: "claude-2.1",
  chat_model: "claude-3-5-sonnet-20240620",
  max_tokens: 256
}.freeze

Instance Attribute Summary

Attributes inherited from Base

#client, #defaults

Instance Method Summary collapse

Methods inherited from Base

#chat_parameters, #default_dimension, #default_dimensions, #embed, #summarize

Methods included from DependencyHelper

#depends_on

Constructor Details

#initialize(api_key:, llm_options: {}, default_options: {}) ⇒ Langchain::LLM::Anthropic

Initialize an Anthropic LLM instance

Parameters:

  • api_key (String)

    The API key to use

  • llm_options (Hash) (defaults to: {})

    Options to pass to the Anthropic client

  • default_options (Hash) (defaults to: {})

    Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens: }



27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# File 'lib/langchain/llm/anthropic.rb', line 27

def initialize(api_key:, llm_options: {}, default_options: {})
  depends_on "anthropic"

  @client = ::Anthropic::Client.new(access_token: api_key, **llm_options)
  @defaults = DEFAULTS.merge(default_options)
  chat_parameters.update(
    model: {default: @defaults[:chat_model]},
    temperature: {default: @defaults[:temperature]},
    max_tokens: {default: @defaults[:max_tokens]},
    metadata: {},
    system: {}
  )
  chat_parameters.ignore(:n, :user)
  chat_parameters.remap(stop: :stop_sequences)
end

Instance Method Details

#chat(params = {}, &block) ⇒ Langchain::LLM::AnthropicResponse

Generate a chat completion for given messages

Parameters:

  • params (Hash) (defaults to: {})

    unified chat parmeters from [Langchain::LLM::Parameters::Chat::SCHEMA]

Options Hash (params):

  • :messages (Array<String>)

    Input messages

  • :model (String)

    The model that will complete your prompt

  • :max_tokens (Integer)

    Maximum number of tokens to generate before stopping

  • :metadata (Hash)

    Object describing metadata about the request

  • :stop_sequences (Array<String>)

    Custom text sequences that will cause the model to stop generating

  • :stream (Boolean)

    Whether to incrementally stream the response using server-sent events

  • :system (String)

    System prompt

  • :temperature (Float)

    Amount of randomness injected into the response

  • :tools (Array<String>)

    Definitions of tools that the model may use

  • :top_k (Integer)

    Only sample from the top K options for each subsequent token

  • :top_p (Float)

    Use nucleus sampling.

Returns:

Raises:

  • (ArgumentError)


103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# File 'lib/langchain/llm/anthropic.rb', line 103

def chat(params = {}, &block)
  set_extra_headers! if params[:tools]

  parameters = chat_parameters.to_params(params)

  raise ArgumentError.new("messages argument is required") if Array(parameters[:messages]).empty?
  raise ArgumentError.new("model argument is required") if parameters[:model].empty?
  raise ArgumentError.new("max_tokens argument is required") if parameters[:max_tokens].nil?

  if block
    @response_chunks = []
    parameters[:stream] = proc do |chunk|
      @response_chunks << chunk
      yield chunk
    end
  end

  response = client.messages(parameters: parameters)

  response = response_from_chunks if block
  reset_response_chunks

  Langchain::LLM::AnthropicResponse.new(response)
end

#complete(prompt:, model: @defaults[:completion_model], max_tokens: @defaults[:max_tokens], stop_sequences: nil, temperature: @defaults[:temperature], top_p: nil, top_k: nil, metadata: nil, stream: nil) ⇒ Langchain::LLM::AnthropicResponse

Generate a completion for a given prompt

Parameters:

  • prompt (String)

    Prompt to generate a completion for

  • model (String) (defaults to: @defaults[:completion_model])

    The model to use

  • max_tokens_to_sample (Integer)

    The maximum number of tokens to sample

  • stop_sequences (Array<String>) (defaults to: nil)

    The stop sequences to use

  • temperature (Float) (defaults to: @defaults[:temperature])

    The temperature to use

  • top_p (Float) (defaults to: nil)

    The top p value to use

  • top_k (Integer) (defaults to: nil)

    The top k value to use

  • metadata (Hash) (defaults to: nil)

    The metadata to use

  • stream (Boolean) (defaults to: nil)

    Whether to stream the response

Returns:

Raises:

  • (ArgumentError)


55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# File 'lib/langchain/llm/anthropic.rb', line 55

def complete(
  prompt:,
  model: @defaults[:completion_model],
  max_tokens: @defaults[:max_tokens],
  stop_sequences: nil,
  temperature: @defaults[:temperature],
  top_p: nil,
  top_k: nil,
  metadata: nil,
  stream: nil
)
  raise ArgumentError.new("model argument is required") if model.empty?
  raise ArgumentError.new("max_tokens argument is required") if max_tokens.nil?

  parameters = {
    model: model,
    prompt: prompt,
    max_tokens_to_sample: max_tokens,
    temperature: temperature
  }
  parameters[:stop_sequences] = stop_sequences if stop_sequences
  parameters[:top_p] = top_p if top_p
  parameters[:top_k] = top_k if top_k
  parameters[:metadata] =  if 
  parameters[:stream] = stream if stream

  response = with_api_error_handling do
    client.complete(parameters: parameters)
  end

  Langchain::LLM::AnthropicResponse.new(response)
end

#response_from_chunksObject



137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# File 'lib/langchain/llm/anthropic.rb', line 137

def response_from_chunks
  grouped_chunks = @response_chunks.group_by { |chunk| chunk["index"] }.except(nil)

  usage = @response_chunks.find { |chunk| chunk["type"] == "message_delta" }&.dig("usage")
  stop_reason = @response_chunks.find { |chunk| chunk["type"] == "message_delta" }&.dig("delta", "stop_reason")

  content = grouped_chunks.map do |_index, chunks|
    text = chunks.map { |chunk| chunk.dig("delta", "text") }.join
    if !text.nil? && !text.empty?
      {"type" => "text", "text" => text}
    else
      tool_calls_from_choice_chunks(chunks)
    end
  end.flatten

  @response_chunks.first&.slice("id", "object", "created", "model")
    &.merge!(
      {
        "content" => content,
        "usage" => usage,
        "role" => "assistant",
        "stop_reason" => stop_reason
      }
    )
end

#tool_calls_from_choice_chunks(chunks) ⇒ Object



163
164
165
166
167
168
169
170
171
172
173
174
175
176
# File 'lib/langchain/llm/anthropic.rb', line 163

def tool_calls_from_choice_chunks(chunks)
  return unless (first_block = chunks.find { |chunk| chunk.dig("content_block", "type") == "tool_use" })

  chunks.group_by { |chunk| chunk["index"] }.map do |index, chunks|
    input = chunks.select { |chunk| chunk.dig("delta", "partial_json") }
      .map! { |chunk| chunk.dig("delta", "partial_json") }.join
    {
      "id" => first_block.dig("content_block", "id"),
      "type" => "tool_use",
      "name" => first_block.dig("content_block", "name"),
      "input" => JSON.parse(input).transform_keys(&:to_sym)
    }
  end.compact
end

#with_api_error_handlingObject



128
129
130
131
132
133
134
135
# File 'lib/langchain/llm/anthropic.rb', line 128

def with_api_error_handling
  response = yield
  return if response.empty?

  raise Langchain::LLM::ApiError.new "Anthropic API error: #{response.dig("error", "message")}" if response&.dig("error")

  response
end