Class: LLM::Gemini

Inherits:
Provider show all
Defined in:
lib/llm/providers/gemini.rb,
lib/llm/providers/gemini/audio.rb,
lib/llm/providers/gemini/files.rb,
lib/llm/providers/gemini/images.rb,
lib/llm/providers/gemini/models.rb,
lib/llm/providers/gemini/error_handler.rb,
lib/llm/providers/gemini/stream_parser.rb,
lib/llm/providers/gemini/request_adapter.rb,
lib/llm/providers/gemini/response_adapter.rb

Overview

The Gemini class implements a provider for Gemini. The Gemini provider can accept multiple inputs (text, images, audio, and video). The inputs can be provided inline via the prompt for files under 20MB or via the Gemini Files API for files that are over 20MB.

Examples:

#!/usr/bin/env ruby
require "llm"

llm = LLM.gemini(key: ENV["KEY"])
bot = LLM::Bot.new(llm)
bot.chat ["Tell me about this photo", File.open("/images/horse.jpg", "rb")]
bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }

Defined Under Namespace

Classes: Audio, Files, Images, Models

Constant Summary collapse

HOST =
"generativelanguage.googleapis.com"

Instance Method Summary collapse

Methods inherited from Provider

#chat, clients, #inspect, #moderations, #respond, #responses, #schema, #server_tool, #vector_stores, #with

Constructor Details

#initializeGemini

Returns a new instance of Gemini.

Parameters:

  • key (String, nil)

    The secret key for authentication



36
37
38
# File 'lib/llm/providers/gemini.rb', line 36

def initialize(**)
  super(host: HOST, **)
end

Instance Method Details

#assistant_roleString

Returns the role of the assistant in the conversation. Usually "assistant" or "model"

Returns:

  • (String)

    Returns the role of the assistant in the conversation. Usually "assistant" or "model"



161
162
163
# File 'lib/llm/providers/gemini.rb', line 161

def assistant_role
  "model"
end

#audioLLM::Gemini::Audio

Provides an interface to Gemini's audio API

Returns:

See Also:



78
79
80
# File 'lib/llm/providers/gemini.rb', line 78

def audio
  LLM::Gemini::Audio.new(self)
end

#complete(prompt, params = {}) ⇒ LLM::Response

Provides an interface to the chat completions API

Examples:

llm = LLM.openai(key: ENV["KEY"])
messages = [{role: "system", content: "Your task is to answer all of my questions"}]
res = llm.complete("5 + 2 ?", messages:)
print "[#{res.choices[0].role}]", res.choices[0].content, "\n"

Parameters:

  • prompt (String)

    The input prompt to be completed

  • params (Hash) (defaults to: {})

    The parameters to maintain throughout the conversation. Any parameter the provider supports can be included and not only those listed here.

Returns:

Raises:

See Also:



66
67
68
69
70
71
72
# File 'lib/llm/providers/gemini.rb', line 66

def complete(prompt, params = {})
  params, stream, tools, role, model = normalize_complete_params(params)
  req = build_complete_request(prompt, params, role, model, stream)
  res = execute(request: req, stream: stream)
  ResponseAdapter.adapt(res, type: :completion)
    .extend(Module.new { define_method(:__tools__) { tools } })
end

#default_modelString

Returns the default model for chat completions

Returns:

  • (String)

See Also:



110
111
112
# File 'lib/llm/providers/gemini.rb', line 110

def default_model
  "gemini-2.5-flash"
end

#developer_roleSymbol

Returns the providers developer role

Returns:

  • (Symbol)


155
156
157
# File 'lib/llm/providers/gemini.rb', line 155

def developer_role
  :user
end

#embed(input, model: "text-embedding-004", **params) ⇒ LLM::Response

Provides an embedding

Parameters:

  • input (String, Array<String>)

    The input to embed

  • model (String) (defaults to: "text-embedding-004")

    The embedding model to use

  • params (Hash)

    Other embedding parameters

Returns:



47
48
49
50
51
52
53
54
# File 'lib/llm/providers/gemini.rb', line 47

def embed(input, model: "text-embedding-004", **params)
  model = model.respond_to?(:id) ? model.id : model
  path = ["/v1beta/models/#{model}", "embedContent?key=#{@key}"].join(":")
  req = Net::HTTP::Post.new(path, headers)
  req.body = LLM.json.dump({content: {parts: [{text: input}]}})
  res = execute(request: req)
  ResponseAdapter.adapt(res, type: :embedding)
end

#filesLLM::Gemini::Files

Provides an interface to Gemini's file management API

Returns:

See Also:



94
95
96
# File 'lib/llm/providers/gemini.rb', line 94

def files
  LLM::Gemini::Files.new(self)
end

#imagessee LLM::Gemini::Images

Provides an interface to Gemini's image generation API

Returns:

See Also:



86
87
88
# File 'lib/llm/providers/gemini.rb', line 86

def images
  LLM::Gemini::Images.new(self)
end

#modelsLLM::Gemini::Models

Provides an interface to Gemini's models API

Returns:

See Also:



102
103
104
# File 'lib/llm/providers/gemini.rb', line 102

def models
  LLM::Gemini::Models.new(self)
end

#server_toolsString => LLM::ServerTool

Note:

This method includes certain tools that require configuration through a set of options that are easier to set through the LLM::Provider#server_tool method.

Returns:

See Also:



121
122
123
124
125
126
127
# File 'lib/llm/providers/gemini.rb', line 121

def server_tools
  {
    google_search: server_tool(:google_search),
    code_execution: server_tool(:code_execution),
    url_context: server_tool(:url_context)
  }
end

#system_roleSymbol

Returns the providers system role

Returns:

  • (Symbol)

    Returns the providers system role



148
149
150
# File 'lib/llm/providers/gemini.rb', line 148

def system_role
  :user
end

#user_roleSymbol

Returns the providers user role

Returns:

  • (Symbol)

    Returns the providers user role



141
142
143
# File 'lib/llm/providers/gemini.rb', line 141

def user_role
  :user
end

#web_search(query:) ⇒ LLM::Response

A convenience method for performing a web search using the Google Search tool.

Parameters:

  • query (String)

    The search query.

Returns:



134
135
136
# File 'lib/llm/providers/gemini.rb', line 134

def web_search(query:)
  ResponseAdapter.adapt(complete(query, tools: [server_tools[:google_search]]), type: :web_search)
end