Class: Gpt4all::ConversationalAI
- Inherits:
-
Object
- Object
- Gpt4all::ConversationalAI
- Defined in:
- lib/gpt4all/conversational_ai.rb
Overview
rubocop:disable Metrics/ClassLength
Constant Summary collapse
- OSX_INTEL_URL =
'https://github.com/nomic-ai/gpt4all/blob/main/chat/gpt4all-lora-quantized-OSX-intel?raw=true'
- OSX_M1_URL =
'https://github.com/nomic-ai/gpt4all/blob/main/chat/gpt4all-lora-quantized-OSX-m1?raw=true'
- LINUX_URL =
'https://github.com/nomic-ai/gpt4all/blob/main/chat/gpt4all-lora-quantized-linux-x86?raw=true'
- WINDOWS_URL =
'https://github.com/nomic-ai/gpt4all/blob/main/chat/gpt4all-lora-quantized-win64.exe?raw=true'
Instance Attribute Summary collapse
-
#decoder_config ⇒ Object
Returns the value of attribute decoder_config.
-
#executable_path ⇒ Object
Returns the value of attribute executable_path.
-
#force_download ⇒ Object
Returns the value of attribute force_download.
-
#model ⇒ Object
Returns the value of attribute model.
-
#model_path ⇒ Object
Returns the value of attribute model_path.
-
#test_mode ⇒ Object
Returns the value of attribute test_mode.
Instance Method Summary collapse
-
#initialize(model: 'gpt4all-lora-quantized', force_download: false, decoder_config: {}) ⇒ ConversationalAI
constructor
rubocop:disable Metrics/MethodLength.
-
#prepare_resources(force_download: false) ⇒ Object
rubocop:enable Metrics/MethodLength.
- #prompt(input) ⇒ Object
- #restart_bot ⇒ Object
- #start_bot ⇒ Object
- #stop_bot ⇒ Object
Constructor Details
#initialize(model: 'gpt4all-lora-quantized', force_download: false, decoder_config: {}) ⇒ ConversationalAI
rubocop:disable Metrics/MethodLength
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
# File 'lib/gpt4all/conversational_ai.rb', line 21 def initialize(model: 'gpt4all-lora-quantized', force_download: false, decoder_config: {}) @bot = nil @model = model @decoder_config = decoder_config @executable_path = "#{Dir.home}/.nomic/gpt4all" @model_path = "#{Dir.home}/.nomic/#{model}.bin" @force_download = force_download @test_mode = false return unless %w[gpt4all-lora-quantized gpt4all-lora-unfiltered-quantized].none?(model) raise "Model #{model} is not supported. Current models supported are: gpt4all-lora-quantized gpt4all-lora-unfiltered-quantized" end |
Instance Attribute Details
#decoder_config ⇒ Object
Returns the value of attribute decoder_config.
12 13 14 |
# File 'lib/gpt4all/conversational_ai.rb', line 12 def decoder_config @decoder_config end |
#executable_path ⇒ Object
Returns the value of attribute executable_path.
12 13 14 |
# File 'lib/gpt4all/conversational_ai.rb', line 12 def executable_path @executable_path end |
#force_download ⇒ Object
Returns the value of attribute force_download.
12 13 14 |
# File 'lib/gpt4all/conversational_ai.rb', line 12 def force_download @force_download end |
#model ⇒ Object
Returns the value of attribute model.
12 13 14 |
# File 'lib/gpt4all/conversational_ai.rb', line 12 def model @model end |
#model_path ⇒ Object
Returns the value of attribute model_path.
12 13 14 |
# File 'lib/gpt4all/conversational_ai.rb', line 12 def model_path @model_path end |
#test_mode ⇒ Object
Returns the value of attribute test_mode.
12 13 14 |
# File 'lib/gpt4all/conversational_ai.rb', line 12 def test_mode @test_mode end |
Instance Method Details
#prepare_resources(force_download: false) ⇒ Object
rubocop:enable Metrics/MethodLength
38 39 40 41 42 43 44 45 |
# File 'lib/gpt4all/conversational_ai.rb', line 38 def prepare_resources(force_download: false) download_promises = [] download_promises << download_executable if force_download || !File.exist?(executable_path) download_promises << download_model if force_download || !File.exist?(model_path) download_promises.compact.each(&:call) end |
#prompt(input) ⇒ Object
77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
# File 'lib/gpt4all/conversational_ai.rb', line 77 def prompt(input) ensure_bot_is_ready begin bot.first.puts(input) response = read_from_bot rescue StandardError => e puts "Error during prompt: #{e.}" restart_bot response = prompt(input) end response end |
#restart_bot ⇒ Object
72 73 74 75 |
# File 'lib/gpt4all/conversational_ai.rb', line 72 def restart_bot stop_bot start_bot end |
#start_bot ⇒ Object
47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
# File 'lib/gpt4all/conversational_ai.rb', line 47 def start_bot stop_bot if bot spawn_args = [executable_path, '--model', model_path] decoder_config.each do |key, value| spawn_args.push("--#{key}", value.to_s) end @bot = Open3.popen2e(*spawn_args) @bot_pid = bot.last.pid wait_for_bot_ready end |
#stop_bot ⇒ Object
62 63 64 65 66 67 68 69 70 |
# File 'lib/gpt4all/conversational_ai.rb', line 62 def stop_bot return unless bot bot[0].close bot[1].close bot[2].exit @bot = nil @bot_pid = nil end |