Method: Langchain::LLM::GoogleGemini#chat

Defined in:
lib/langchain/llm/google_gemini.rb

#chat(params = {}) ⇒ Object

Generate a chat completion for a given prompt

Parameters:

  • messages (Array<Hash>)

    List of messages comprising the conversation so far

  • model (String)

    The model to use

  • tools (Array<Hash>)

    A list of Tools the model may use to generate the next response

  • tool_choice (String)

    Specifies the mode in which function calling should execute. If unspecified, the default value will be set to AUTO. Possible values: AUTO, ANY, NONE

  • system (String)

    Developer set system instruction

Raises:

  • (ArgumentError)
[View source]

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# File 'lib/langchain/llm/google_gemini.rb', line 39

def chat(params = {})
  params[:system] = {parts: [{text: params[:system]}]} if params[:system]
  params[:tools] = {function_declarations: params[:tools]} if params[:tools]

  raise ArgumentError.new("messages argument is required") if Array(params[:messages]).empty?

  parameters = chat_parameters.to_params(params)
  parameters[:generation_config] ||= {}
  parameters[:generation_config][:temperature] ||= parameters[:temperature] if parameters[:temperature]
  parameters.delete(:temperature)
  parameters[:generation_config][:top_p] ||= parameters[:top_p] if parameters[:top_p]
  parameters.delete(:top_p)
  parameters[:generation_config][:top_k] ||= parameters[:top_k] if parameters[:top_k]
  parameters.delete(:top_k)
  parameters[:generation_config][:max_output_tokens] ||= parameters[:max_tokens] if parameters[:max_tokens]
  parameters.delete(:max_tokens)
  parameters[:generation_config][:response_mime_type] ||= parameters[:response_format] if parameters[:response_format]
  parameters.delete(:response_format)
  parameters[:generation_config][:stop_sequences] ||= parameters[:stop] if parameters[:stop]
  parameters.delete(:stop)

  uri = URI("https://generativelanguage.googleapis.com/v1beta/models/#{parameters[:model]}:generateContent?key=#{api_key}")

  parsed_response = http_post(uri, parameters)

  wrapped_response = Langchain::LLM::GoogleGeminiResponse.new(parsed_response, model: parameters[:model])

  if wrapped_response.chat_completion || Array(wrapped_response.tool_calls).any?
    wrapped_response
  else
    raise StandardError.new(parsed_response)
  end
end