Module: RubyLLM::Providers::VertexAI::Models

Included in:
RubyLLM::Providers::VertexAI
Defined in:
lib/ruby_llm/providers/vertexai/models.rb

Overview

Models methods for the Vertex AI integration

Constant Summary collapse

KNOWN_GOOGLE_MODELS =

Gemini and other Google models that aren’t returned by the API

%w[
  gemini-2.5-flash-lite
  gemini-2.5-pro
  gemini-2.5-flash
  gemini-2.0-flash-lite-001
  gemini-2.0-flash-001
  gemini-2.0-flash
  gemini-2.0-flash-exp
  gemini-1.5-pro-002
  gemini-1.5-pro
  gemini-1.5-flash-002
  gemini-1.5-flash
  gemini-1.5-flash-8b
  gemini-pro
  gemini-pro-vision
  gemini-exp-1206
  gemini-exp-1121
  gemini-embedding-001
  text-embedding-005
  text-embedding-004
  text-multilingual-embedding-002
].freeze

Instance Method Summary collapse

Instance Method Details

#list_modelsObject



32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# File 'lib/ruby_llm/providers/vertexai/models.rb', line 32

def list_models
  all_models = []
  page_token = nil

  all_models.concat(build_known_models)

  loop do
    response = @connection.get('publishers/google/models') do |req|
      req.headers['x-goog-user-project'] = @config.vertexai_project_id
      req.params = { pageSize: 100 }
      req.params[:pageToken] = page_token if page_token
    end

    publisher_models = response.body['publisherModels'] || []
    publisher_models.each do |model_data|
      next if model_data['launchStage'] == 'DEPRECATED'

      model_id = extract_model_id_from_path(model_data['name'])
      all_models << build_model_from_api_data(model_data, model_id)
    end

    page_token = response.body['nextPageToken']
    break unless page_token
  end

  all_models
rescue StandardError => e
  RubyLLM.logger.debug "Error fetching Vertex AI models: #{e.message}"
  build_known_models
end