Class: BxBuilderChain::Llm::OpenAi

Inherits:
Base
  • Object
show all
Defined in:
lib/bx_builder_chain/llm/open_ai.rb

Overview

LLM interface for OpenAI APIs: platform.openai.com/overview

Gem requirements:

gem "ruby-openai", "~> 4.0.0"

Usage:

openai = BxBuilderChain::LLM::OpenAI.new(api_key:, llm_options: {})

Constant Summary collapse

DEFAULTS =
{
  temperature: 0.0,
  completion_model_name: "text-davinci-003",
  chat_completion_model_name: "gpt-3.5-turbo",
  embeddings_model_name: "text-embedding-ada-002",
  dimension: 1536
}.freeze
LENGTH_VALIDATOR =
BxBuilderChain::Utils::TokenLength::OpenAiValidator
ROLE_MAPPING =
{
  "ai" => "assistant",
  "human" => "user"
}

Instance Attribute Summary collapse

Attributes inherited from Base

#client

Instance Method Summary collapse

Methods inherited from Base

#count_tokens, #default_dimension, #summarize

Methods included from DependencyHelper

#depends_on

Constructor Details

#initialize(api_key: BxBuilderChain.configuration.openai_api_key, llm_options: {}, default_options: {}) ⇒ OpenAi

Returns a new instance of OpenAi.



28
29
30
31
32
33
34
# File 'lib/bx_builder_chain/llm/open_ai.rb', line 28

def initialize(api_key: BxBuilderChain.configuration.openai_api_key, llm_options: {}, default_options: {})
  depends_on "ruby-openai"
  require "openai"

  @client = ::OpenAI::Client.new(access_token: api_key, **llm_options)
  @defaults = DEFAULTS.merge(default_options)
end

Instance Attribute Details

#functionsObject

Returns the value of attribute functions.



26
27
28
# File 'lib/bx_builder_chain/llm/open_ai.rb', line 26

def functions
  @functions
end

Instance Method Details

#chat(prompt: "", messages: [], context: "", examples: [], **options) {|AIMessage| ... } ⇒ AIMessage

Generate a chat completion for a given prompt or messages.

Examples

# simplest case, just give a prompt
openai.chat prompt: "When was Ruby first released?"

# prompt plus some context about how to respond
openai.chat context: "You are RubyGPT, a helpful chat bot for helping people learn Ruby", prompt: "Does Ruby have a REPL like IPython?"

# full control over messages that get sent, equivilent to the above
openai.chat messages: [
  {
    role: "system",
    content: "You are RubyGPT, a helpful chat bot for helping people learn Ruby", prompt: "Does Ruby have a REPL like IPython?"
  },
  {
    role: "user",
    content: "When was Ruby first released?"
  }
]

# few-short prompting with examples
openai.chat prompt: "When was factory_bot released?",
  examples: [
    {
      role: "user",
      content: "When was Ruby on Rails released?"
    }
    {
      role: "assistant",
      content: "2004"
    },
  ]

Parameters:

  • prompt (HumanMessage) (defaults to: "")

    The prompt to generate a chat completion for

  • messages (Array<AIMessage|HumanMessage>) (defaults to: [])

    The messages that have been sent in the conversation

  • context (SystemMessage) (defaults to: "")

    An initial context to provide as a system message, ie “You are RubyGPT, a helpful chat bot for helping people learn Ruby”

  • examples (Array<AIMessage|HumanMessage>) (defaults to: [])

    Examples of messages to provide to the model. Useful for Few-Shot Prompting

  • options (Hash)

    extra parameters passed to OpenAI::Client#chat

Yields:

  • (AIMessage)

    Stream responses back one String at a time

Returns:

  • (AIMessage)

    The chat completion

Raises:

  • (ArgumentError)


119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# File 'lib/bx_builder_chain/llm/open_ai.rb', line 119

def chat(prompt: "", messages: [], context: "", examples: [], **options)
  raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?

  parameters = compose_parameters @defaults[:chat_completion_model_name], options
  parameters[:messages] = compose_chat_messages(prompt: prompt, messages: messages, context: context, examples: examples)

  if functions
    parameters[:functions] = functions
  else
    parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
  end

  response = client.chat(parameters: parameters)
  
  response.dig("choices", 0, "message", "content")
end

#complete(prompt:, **params) ⇒ String

Generate a completion for a given prompt

Parameters:

  • prompt (String)

    The prompt to generate a completion for

  • params

    extra parameters passed to OpenAI::Client#complete

Returns:

  • (String)

    The completion



65
66
67
68
69
70
71
72
73
# File 'lib/bx_builder_chain/llm/open_ai.rb', line 65

def complete(prompt:, **params)
  parameters = compose_parameters @defaults[:completion_model_name], params

  parameters[:prompt] = prompt
  parameters[:max_tokens] = validate_max_tokens(prompt, parameters[:model])

  response = client.completions(parameters: parameters)
  response.dig("choices", 0, "text")
end

#embed(text:, **params) ⇒ Array

Generate an embedding for a given text

Parameters:

  • text (String)

    The text to generate an embedding for

  • params

    extra parameters passed to OpenAI::Client#embeddings

Returns:

  • (Array)

    The embedding



43
44
45
46
47
48
49
50
51
52
53
54
55
56
# File 'lib/bx_builder_chain/llm/open_ai.rb', line 43

def embed(text:, **params)
  parameters = {model: @defaults[:embeddings_model_name], input: text}

  validate_max_tokens(text, parameters[:model])

  response = client.embeddings(parameters: parameters.merge(params))
  embedding = response.dig("data", 0, "embedding")

  return embedding if embedding

  puts response
  raise "Error: #{response.dig("data")}"

end