Class: Boxcars::Anthropic
- Defined in:
- lib/boxcars/engine/anthropic.rb
Overview
A engine that uses OpenAI’s API.
Constant Summary collapse
- DEFAULT_PARAMS =
The default parameters to use when asking the engine.
{ model: "claude-3-5-sonnet-20240620", max_tokens: 4096, temperature: 0.1 }.freeze
- DEFAULT_NAME =
the default name of the engine
"Anthropic engine"
- DEFAULT_DESCRIPTION =
the default description of the engine
"useful for when you need to use Anthropic AI to answer questions. " \ "You should ask targeted questions"
Instance Attribute Summary collapse
-
#batch_size ⇒ Object
readonly
Returns the value of attribute batch_size.
-
#llm_params ⇒ Object
readonly
Returns the value of attribute llm_params.
-
#model_kwargs ⇒ Object
readonly
Returns the value of attribute model_kwargs.
-
#prompts ⇒ Object
readonly
Returns the value of attribute prompts.
Instance Method Summary collapse
- #anthropic_client(anthropic_api_key: nil) ⇒ Object
-
#check_response(response, must_haves: %w[completion])) ⇒ Object
make sure we got a valid response.
-
#client(prompt:, inputs: {}, **kwargs) ⇒ Object
Get an answer from the engine.
- #combine_assistant(params) ⇒ Object
-
#combine_assistant_entries(hashes) ⇒ Object
if we have multiple assistant entries in a row, we need to combine them.
- #conversation_model?(model) ⇒ Boolean
-
#convert_to_anthropic(params) ⇒ Object
convert generic parameters to Anthopic specific ones.
-
#default_params ⇒ Object
Get the default parameters for the engine.
- #default_prefixes ⇒ Object
-
#engine_type ⇒ Object
the engine type.
- #extract_model_version(model_string) ⇒ Object
-
#generate(prompts:, stop: nil) ⇒ EngineResult
Call out to OpenAI’s endpoint with k unique prompts.
-
#generation_info(sub_choices) ⇒ Array<Generation>
Get generation informaton.
-
#get_num_tokens(text:) ⇒ Object
calculate the number of tokens used.
-
#initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], **kwargs) ⇒ Anthropic
constructor
A engine is the driver for a single tool to run.
-
#max_tokens_for_prompt(prompt_text) ⇒ Integer
Calculate the maximum number of tokens possible to generate for a prompt.
-
#modelname_to_contextsize(_modelname) ⇒ Object
lookup the context size for a model by name.
-
#run(question, **kwargs) ⇒ Object
get an answer from the engine for a question.
Constructor Details
#initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], **kwargs) ⇒ Anthropic
A engine is the driver for a single tool to run.
28 29 30 31 32 33 |
# File 'lib/boxcars/engine/anthropic.rb', line 28 def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], **kwargs) @llm_params = DEFAULT_PARAMS.merge(kwargs) @prompts = prompts @batch_size = 20 super(description: description, name: name) end |
Instance Attribute Details
#batch_size ⇒ Object (readonly)
Returns the value of attribute batch_size.
8 9 10 |
# File 'lib/boxcars/engine/anthropic.rb', line 8 def batch_size @batch_size end |
#llm_params ⇒ Object (readonly)
Returns the value of attribute llm_params.
8 9 10 |
# File 'lib/boxcars/engine/anthropic.rb', line 8 def llm_params @llm_params end |
#model_kwargs ⇒ Object (readonly)
Returns the value of attribute model_kwargs.
8 9 10 |
# File 'lib/boxcars/engine/anthropic.rb', line 8 def model_kwargs @model_kwargs end |
#prompts ⇒ Object (readonly)
Returns the value of attribute prompts.
8 9 10 |
# File 'lib/boxcars/engine/anthropic.rb', line 8 def prompts @prompts end |
Instance Method Details
#anthropic_client(anthropic_api_key: nil) ⇒ Object
39 40 41 |
# File 'lib/boxcars/engine/anthropic.rb', line 39 def anthropic_client(anthropic_api_key: nil) ::Anthropic::Client.new(access_token: anthropic_api_key) end |
#check_response(response, must_haves: %w[completion])) ⇒ Object
make sure we got a valid response
117 118 119 120 121 122 123 124 125 126 127 128 129 |
# File 'lib/boxcars/engine/anthropic.rb', line 117 def check_response(response, must_haves: %w[completion]) if response['error'] code = response.dig('error', 'code') msg = response.dig('error', 'message') || 'unknown error' raise KeyError, "ANTHOPIC_API_KEY not valid" if code == 'invalid_api_key' raise ValueError, "Anthropic error: #{msg}" end must_haves.each do |key| raise ValueError, "Expecting key #{key} in response" unless response.key?(key) end end |
#client(prompt:, inputs: {}, **kwargs) ⇒ Object
Get an answer from the engine. rubocop:disable Metrics/PerceivedComplexity
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
# File 'lib/boxcars/engine/anthropic.rb', line 49 def client(prompt:, inputs: {}, **kwargs) model_params = llm_params.merge(kwargs) api_key = Boxcars.configuration.anthropic_api_key(**kwargs) aclient = anthropic_client(anthropic_api_key: api_key) prompt = prompt.first if prompt.is_a?(Array) if conversation_model?(model_params[:model]) params = convert_to_anthropic(prompt.(inputs).merge(model_params)) if Boxcars.configuration.log_prompts if params[:messages].length < 2 && params[:system].present? Boxcars.debug(">>>>>> Role: system <<<<<<\n#{params[:system]}") end Boxcars.debug(params[:messages].last(2).map { |p| ">>>>>> Role: #{p[:role]} <<<<<<\n#{p[:content]}" }.join("\n"), :cyan) end response = aclient.(parameters: params) response['completion'] = response.dig('content', 0, 'text') response.delete('content') response else params = prompt.as_prompt(inputs: inputs, prefixes: default_prefixes, show_roles: true).merge(model_params) params[:prompt] = "\n\n#{params[:prompt]}" unless params[:prompt].start_with?("\n\n") params[:stop_sequences] = params.delete(:stop) if params.key?(:stop) Boxcars.debug("Prompt after formatting:#{params[:prompt]}", :cyan) if Boxcars.configuration.log_prompts aclient.complete(parameters: params) end end |
#combine_assistant(params) ⇒ Object
208 209 210 211 212 |
# File 'lib/boxcars/engine/anthropic.rb', line 208 def combine_assistant(params) params[:messages] = combine_assistant_entries(params[:messages]) params[:messages].last[:content].rstrip! if params[:messages].last[:role] == :assistant params end |
#combine_assistant_entries(hashes) ⇒ Object
if we have multiple assistant entries in a row, we need to combine them
215 216 217 218 219 220 221 222 223 224 225 |
# File 'lib/boxcars/engine/anthropic.rb', line 215 def combine_assistant_entries(hashes) combined_hashes = [] hashes.each do |hash| if combined_hashes.empty? || combined_hashes.last[:role] != :assistant || hash[:role] != :assistant combined_hashes << hash else combined_hashes.last[:content].concat("\n", hash[:content].rstrip) end end combined_hashes end |
#conversation_model?(model) ⇒ Boolean
35 36 37 |
# File 'lib/boxcars/engine/anthropic.rb', line 35 def conversation_model?(model) @conversation_model ||= (extract_model_version(model) > 3.49) end |
#convert_to_anthropic(params) ⇒ Object
convert generic parameters to Anthopic specific ones
201 202 203 204 205 206 |
# File 'lib/boxcars/engine/anthropic.rb', line 201 def convert_to_anthropic(params) params[:stop_sequences] = params.delete(:stop) if params.key?(:stop) params[:system] = params[:messages].shift[:content] if params.dig(:messages, 0, :role) == :system params[:messages].pop if params[:messages].last[:content].blank? combine_assistant(params) end |
#default_params ⇒ Object
Get the default parameters for the engine.
93 94 95 |
# File 'lib/boxcars/engine/anthropic.rb', line 93 def default_params llm_params end |
#default_prefixes ⇒ Object
227 228 229 |
# File 'lib/boxcars/engine/anthropic.rb', line 227 def default_prefixes { system: "Human: ", user: "Human: ", assistant: "Assistant: ", history: :history } end |
#engine_type ⇒ Object
the engine type
161 162 163 |
# File 'lib/boxcars/engine/anthropic.rb', line 161 def engine_type "claude" end |
#extract_model_version(model_string) ⇒ Object
187 188 189 190 191 192 193 194 195 196 197 198 |
# File 'lib/boxcars/engine/anthropic.rb', line 187 def extract_model_version(model_string) # Use a regular expression to find the version number match = model_string.match(/claude-(\d+)(?:-(\d+))?/) raise ArgumentError, "No version number found in model string: #{model_string}" unless match major = match[1].to_i minor = match[2].to_i # Combine major and minor versions major + (minor.to_f / 10) end |
#generate(prompts:, stop: nil) ⇒ EngineResult
Call out to OpenAI’s endpoint with k unique prompts.
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
# File 'lib/boxcars/engine/anthropic.rb', line 136 def generate(prompts:, stop: nil) params = {} params[:stop] = stop if stop choices = [] # Get the token usage from the response. # Includes prompt, completion, and total tokens used. prompts.each_slice(batch_size) do |sub_prompts| sub_prompts.each do |sprompts, inputs| response = client(prompt: sprompts, inputs: inputs, **params) check_response(response) choices << response end end n = params.fetch(:n, 1) generations = [] prompts.each_with_index do |_prompt, i| sub_choices = choices[i * n, (i + 1) * n] generations.push(generation_info(sub_choices)) end EngineResult.new(generations: generations, engine_output: { token_usage: {} }) end |
#generation_info(sub_choices) ⇒ Array<Generation>
Get generation informaton
100 101 102 103 104 105 106 107 108 109 110 |
# File 'lib/boxcars/engine/anthropic.rb', line 100 def generation_info(sub_choices) sub_choices.map do |choice| Generation.new( text: choice["completion"], generation_info: { finish_reason: choice.fetch("stop_reason", nil), logprobs: choice.fetch("logprobs", nil) } ) end end |
#get_num_tokens(text:) ⇒ Object
calculate the number of tokens used
166 167 168 |
# File 'lib/boxcars/engine/anthropic.rb', line 166 def get_num_tokens(text:) text.split.length # TODO: hook up to token counting gem end |
#max_tokens_for_prompt(prompt_text) ⇒ Integer
Calculate the maximum number of tokens possible to generate for a prompt.
179 180 181 182 183 184 185 |
# File 'lib/boxcars/engine/anthropic.rb', line 179 def max_tokens_for_prompt(prompt_text) num_tokens = get_num_tokens(prompt_text) # get max context size for model by name max_size = modelname_to_contextsize(model_name) max_size - num_tokens end |
#modelname_to_contextsize(_modelname) ⇒ Object
lookup the context size for a model by name
172 173 174 |
# File 'lib/boxcars/engine/anthropic.rb', line 172 def modelname_to_contextsize(_modelname) 100000 end |
#run(question, **kwargs) ⇒ Object
get an answer from the engine for a question.
80 81 82 83 84 85 86 87 88 89 90 |
# File 'lib/boxcars/engine/anthropic.rb', line 80 def run(question, **kwargs) prompt = Prompt.new(template: question) response = client(prompt: prompt, **kwargs) raise Error, "Anthropic: No response from API" unless response raise Error, "Anthropic: #{response['error']}" if response['error'] answer = response['completion'] Boxcars.debug(response, :yellow) answer end |