23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
# File 'lib/llm_memory/broca.rb', line 23
def respond(args)
final_prompt = generate_prompt(args)
@messages.push({role: "user", content: final_prompt})
adjust_token_count
begin
response = client.chat(
parameters: {
model: @model,
messages: @messages,
temperature: @temperature
}
)
LlmMemory.logger.debug(response)
response_content = response.dig("choices", 0, "message", "content")
@messages.push({role: "system", content: response_content}) unless response_content.nil?
response_content
rescue => e
LlmMemory.logger.info(e.inspect)
nil
end
end
|