Class: Langchain::LLM::Anthropic
- Defined in:
- lib/langchain/llm/anthropic.rb
Overview
Constant Summary collapse
- DEFAULTS =
{ temperature: 0.0, completion_model: "claude-2.1", chat_model: "claude-3-5-sonnet-20240620", max_tokens: 256 }.freeze
Instance Attribute Summary
Attributes inherited from Base
Instance Method Summary collapse
-
#chat(params = {}, &block) ⇒ Langchain::LLM::AnthropicResponse
Generate a chat completion for given messages.
-
#complete(prompt:, model: @defaults[:completion_model], max_tokens: @defaults[:max_tokens], stop_sequences: nil, temperature: @defaults[:temperature], top_p: nil, top_k: nil, metadata: nil, stream: nil) ⇒ Langchain::LLM::AnthropicResponse
Generate a completion for a given prompt.
-
#initialize(api_key:, llm_options: {}, default_options: {}) ⇒ Langchain::LLM::Anthropic
constructor
Initialize an Anthropic LLM instance.
- #response_from_chunks ⇒ Object
- #tool_calls_from_choice_chunks(chunks) ⇒ Object
- #with_api_error_handling ⇒ Object
Methods inherited from Base
#chat_parameters, #default_dimension, #default_dimensions, #embed, #summarize
Methods included from DependencyHelper
Constructor Details
#initialize(api_key:, llm_options: {}, default_options: {}) ⇒ Langchain::LLM::Anthropic
Initialize an Anthropic LLM instance
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
# File 'lib/langchain/llm/anthropic.rb', line 27 def initialize(api_key:, llm_options: {}, default_options: {}) depends_on "anthropic" @client = ::Anthropic::Client.new(access_token: api_key, **) @defaults = DEFAULTS.merge() chat_parameters.update( model: {default: @defaults[:chat_model]}, temperature: {default: @defaults[:temperature]}, max_tokens: {default: @defaults[:max_tokens]}, metadata: {}, system: {} ) chat_parameters.ignore(:n, :user) chat_parameters.remap(stop: :stop_sequences) end |
Instance Method Details
#chat(params = {}, &block) ⇒ Langchain::LLM::AnthropicResponse
Generate a chat completion for given messages
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
# File 'lib/langchain/llm/anthropic.rb', line 103 def chat(params = {}, &block) set_extra_headers! if params[:tools] parameters = chat_parameters.to_params(params) raise ArgumentError.new("messages argument is required") if Array(parameters[:messages]).empty? raise ArgumentError.new("model argument is required") if parameters[:model].empty? raise ArgumentError.new("max_tokens argument is required") if parameters[:max_tokens].nil? if block @response_chunks = [] parameters[:stream] = proc do |chunk| @response_chunks << chunk yield chunk end end response = client.(parameters: parameters) response = response_from_chunks if block reset_response_chunks Langchain::LLM::AnthropicResponse.new(response) end |
#complete(prompt:, model: @defaults[:completion_model], max_tokens: @defaults[:max_tokens], stop_sequences: nil, temperature: @defaults[:temperature], top_p: nil, top_k: nil, metadata: nil, stream: nil) ⇒ Langchain::LLM::AnthropicResponse
Generate a completion for a given prompt
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
# File 'lib/langchain/llm/anthropic.rb', line 55 def complete( prompt:, model: @defaults[:completion_model], max_tokens: @defaults[:max_tokens], stop_sequences: nil, temperature: @defaults[:temperature], top_p: nil, top_k: nil, metadata: nil, stream: nil ) raise ArgumentError.new("model argument is required") if model.empty? raise ArgumentError.new("max_tokens argument is required") if max_tokens.nil? parameters = { model: model, prompt: prompt, max_tokens_to_sample: max_tokens, temperature: temperature } parameters[:stop_sequences] = stop_sequences if stop_sequences parameters[:top_p] = top_p if top_p parameters[:top_k] = top_k if top_k parameters[:metadata] = if parameters[:stream] = stream if stream response = with_api_error_handling do client.complete(parameters: parameters) end Langchain::LLM::AnthropicResponse.new(response) end |
#response_from_chunks ⇒ Object
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
# File 'lib/langchain/llm/anthropic.rb', line 137 def response_from_chunks grouped_chunks = @response_chunks.group_by { |chunk| chunk["index"] }.except(nil) usage = @response_chunks.find { |chunk| chunk["type"] == "message_delta" }&.dig("usage") stop_reason = @response_chunks.find { |chunk| chunk["type"] == "message_delta" }&.dig("delta", "stop_reason") content = grouped_chunks.map do |_index, chunks| text = chunks.map { |chunk| chunk.dig("delta", "text") }.join if !text.nil? && !text.empty? {"type" => "text", "text" => text} else tool_calls_from_choice_chunks(chunks) end end.flatten @response_chunks.first&.slice("id", "object", "created", "model") &.merge!( { "content" => content, "usage" => usage, "role" => "assistant", "stop_reason" => stop_reason } ) end |
#tool_calls_from_choice_chunks(chunks) ⇒ Object
163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
# File 'lib/langchain/llm/anthropic.rb', line 163 def tool_calls_from_choice_chunks(chunks) return unless (first_block = chunks.find { |chunk| chunk.dig("content_block", "type") == "tool_use" }) chunks.group_by { |chunk| chunk["index"] }.map do |index, chunks| input = chunks.select { |chunk| chunk.dig("delta", "partial_json") } .map! { |chunk| chunk.dig("delta", "partial_json") }.join { "id" => first_block.dig("content_block", "id"), "type" => "tool_use", "name" => first_block.dig("content_block", "name"), "input" => JSON.parse(input).transform_keys(&:to_sym) } end.compact end |
#with_api_error_handling ⇒ Object
128 129 130 131 132 133 134 135 |
# File 'lib/langchain/llm/anthropic.rb', line 128 def with_api_error_handling response = yield return if response.empty? raise Langchain::LLM::ApiError.new "Anthropic API error: #{response.dig("error", "message")}" if response&.dig("error") response end |