Class: Langchain::LLM::OpenAI

Inherits:
Base
  • Object
show all
Defined in:
lib/langchain/llm/openai.rb

Overview

LLM interface for OpenAI APIs: platform.openai.com/overview

Gem requirements:

gem "ruby-openai", "~> 6.3.0"

Usage:

llm = Langchain::LLM::OpenAI.new(
  api_key: ENV["OPENAI_API_KEY"],
  llm_options: {}, # Available options: https://github.com/alexrudall/ruby-openai/blob/main/lib/openai/client.rb#L5-L13
  default_options: {}
)

Direct Known Subclasses

Azure

Constant Summary collapse

DEFAULTS =
{
  n: 1,
  temperature: 0.0,
  chat_model: "gpt-4o-mini",
  embedding_model: "text-embedding-3-small"
}.freeze
EMBEDDING_SIZES =
{
  "text-embedding-ada-002" => 1536,
  "text-embedding-3-large" => 3072,
  "text-embedding-3-small" => 1536
}.freeze

Instance Attribute Summary

Attributes inherited from Base

#client, #defaults

Instance Method Summary collapse

Methods inherited from Base

#chat_parameters, #default_dimension

Methods included from DependencyHelper

#depends_on

Constructor Details

#initialize(api_key:, llm_options: {}, default_options: {}) ⇒ OpenAI

Initialize an OpenAI LLM instance

Parameters:

  • api_key (String)

    The API key to use

  • client_options (Hash)

    Options to pass to the OpenAI::Client constructor



33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# File 'lib/langchain/llm/openai.rb', line 33

def initialize(api_key:, llm_options: {}, default_options: {})
  depends_on "ruby-openai", req: "openai"

  llm_options[:log_errors] = Langchain.logger.debug? unless llm_options.key?(:log_errors)

  @client = ::OpenAI::Client.new(access_token: api_key, **llm_options) do |f|
    f.response :logger, Langchain.logger, {headers: true, bodies: true, errors: true}
  end

  @defaults = DEFAULTS.merge(default_options)
  chat_parameters.update(
    model: {default: @defaults[:chat_model]},
    logprobs: {},
    top_logprobs: {},
    n: {default: @defaults[:n]},
    temperature: {default: @defaults[:temperature]},
    user: {},
    response_format: {default: @defaults[:response_format]}
  )
  chat_parameters.ignore(:top_k)
end

Instance Method Details

#chat(params = {}, &block) ⇒ Object

Generate a chat completion for given messages.

Parameters:

  • params (Hash) (defaults to: {})

    unified chat parmeters from [Langchain::LLM::Parameters::Chat::SCHEMA]

Options Hash (params):

  • :messages (Array<Hash>)

    List of messages comprising the conversation so far

  • :model (String)

    ID of the model to use

Raises:

  • (ArgumentError)


120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# File 'lib/langchain/llm/openai.rb', line 120

def chat(params = {}, &block)
  parameters = chat_parameters.to_params(params)

  raise ArgumentError.new("messages argument is required") if Array(parameters[:messages]).empty?
  raise ArgumentError.new("model argument is required") if parameters[:model].to_s.empty?
  if parameters[:tool_choice] && Array(parameters[:tools]).empty?
    raise ArgumentError.new("'tool_choice' is only allowed when 'tools' are specified.")
  end

  if block
    @response_chunks = []
    parameters[:stream_options] = {include_usage: true}
    parameters[:stream] = proc do |chunk, _bytesize|
      chunk_content = chunk.dig("choices", 0) || {}
      @response_chunks << chunk
      yield chunk_content
    end
  end

  response = with_api_error_handling do
    client.chat(parameters: parameters)
  end

  response = response_from_chunks if block
  reset_response_chunks

  Langchain::LLM::OpenAIResponse.new(response)
end

#complete(prompt:, **params) ⇒ Langchain::LLM::OpenAIResponse

rubocop:disable Style/ArgumentsForwarding Generate a completion for a given prompt

Parameters:

  • prompt (String)

    The prompt to generate a completion for

  • params (Hash)

    The parameters to pass to the ‘chat()` method

Returns:



102
103
104
105
106
107
108
109
110
111
# File 'lib/langchain/llm/openai.rb', line 102

def complete(prompt:, **params)
  Langchain.logger.warn "DEPRECATED: `Langchain::LLM::OpenAI#complete` is deprecated, and will be removed in the next major version. Use `Langchain::LLM::OpenAI#chat` instead."

  if params[:stop_sequences]
    params[:stop] = params.delete(:stop_sequences)
  end
  # Should we still accept the `messages: []` parameter here?
  messages = [{role: "user", content: prompt}]
  chat(messages: messages, **params)
end

#default_dimensionsObject



162
163
164
# File 'lib/langchain/llm/openai.rb', line 162

def default_dimensions
  @defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embedding_model])
end

#embed(text:, model: , encoding_format: nil, user: nil, dimensions: ) ⇒ Langchain::LLM::OpenAIResponse

Generate an embedding for a given text

Parameters:

  • text (String)

    The text to generate an embedding for

  • model (String) (defaults to: )

    ID of the model to use

  • encoding_format (String) (defaults to: nil)

    The format to return the embeddings in. Can be either float or base64.

  • user (String) (defaults to: nil)

    A unique identifier representing your end-user

Returns:

Raises:

  • (ArgumentError)


62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# File 'lib/langchain/llm/openai.rb', line 62

def embed(
  text:,
  model: defaults[:embedding_model],
  encoding_format: nil,
  user: nil,
  dimensions: @defaults[:dimensions]
)
  raise ArgumentError.new("text argument is required") if text.empty?
  raise ArgumentError.new("model argument is required") if model.empty?
  raise ArgumentError.new("encoding_format must be either float or base64") if encoding_format && %w[float base64].include?(encoding_format)

  parameters = {
    input: text,
    model: model
  }
  parameters[:encoding_format] = encoding_format if encoding_format
  parameters[:user] = user if user

  if dimensions
    parameters[:dimensions] = dimensions
  elsif EMBEDDING_SIZES.key?(model)
    parameters[:dimensions] = EMBEDDING_SIZES[model]
  end

  # dimensions parameter not supported by text-embedding-ada-002 model
  parameters.delete(:dimensions) if model == "text-embedding-ada-002"

  response = with_api_error_handling do
    client.embeddings(parameters: parameters)
  end

  Langchain::LLM::OpenAIResponse.new(response)
end

#summarize(text:) ⇒ String

Generate a summary for a given text

Parameters:

  • text (String)

    The text to generate a summary for

Returns:

  • (String)

    The summary



153
154
155
156
157
158
159
160
# File 'lib/langchain/llm/openai.rb', line 153

def summarize(text:)
  prompt_template = Langchain::Prompt.load_from_path(
    file_path: Langchain.root.join("langchain/llm/prompts/summarize_template.yaml")
  )
  prompt = prompt_template.format(text: text)

  complete(prompt: prompt)
end