Class: LlmLib::OpenAIClient
- Inherits:
-
Object
- Object
- LlmLib::OpenAIClient
- Defined in:
- lib/llm_lib.rb
Instance Method Summary collapse
- #chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n") ⇒ Object
- #gpt4_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n") ⇒ Object
-
#initialize(apikey) ⇒ OpenAIClient
constructor
# Example API call prompt = “Once upon a time” max_tokens = 100 response = client.chat_gpt_call(prompt, max_tokens) puts response.
Constructor Details
#initialize(apikey) ⇒ OpenAIClient
# Example API call prompt = “Once upon a time” max_tokens = 100 response = client.chat_gpt_call(prompt, max_tokens) puts response
14 15 16 |
# File 'lib/llm_lib.rb', line 14 def initialize(apikey) @apikey = apikey end |
Instance Method Details
#chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n") ⇒ Object
18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
# File 'lib/llm_lib.rb', line 18 def chat_gpt_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n") model = "gpt-3.5-turbo" response = OpenAI.send(@apikey, model, prompt, max_tokens, temperature, top_p, n, stream, stop ) response end |
#gpt4_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n") ⇒ Object
33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
# File 'lib/llm_lib.rb', line 33 def gpt4_call(prompt, max_tokens, temperature = 0, top_p = 1, n = 1, stream = false, stop = "\n") model = "gpt-4" response = OpenAI.send(@apikey, model, prompt, max_tokens, temperature, top_p, n, stream, stop ) response end |