Class: Langchain::LLM::OllamaResponse
Instance Attribute Summary
Attributes inherited from BaseResponse
#context, #model, #raw_response
Instance Method Summary
collapse
#chat_completions
Constructor Details
#initialize(raw_response, model: nil, prompt_tokens: nil) ⇒ OllamaResponse
Returns a new instance of OllamaResponse.
5
6
7
8
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 5
def initialize(raw_response, model: nil, prompt_tokens: nil)
@prompt_tokens = prompt_tokens
super(raw_response, model: model)
end
|
Instance Method Details
#chat_completion ⇒ Object
14
15
16
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 14
def chat_completion
raw_response.dig("message", "content")
end
|
#completion ⇒ Object
18
19
20
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 18
def completion
raw_response.dig("response")
end
|
#completion_tokens ⇒ Object
42
43
44
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 42
def completion_tokens
raw_response.dig("eval_count") if done?
end
|
#completions ⇒ Object
22
23
24
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 22
def completions
[completion].compact
end
|
#created_at ⇒ Object
10
11
12
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 10
def created_at
Time.parse(raw_response.dig("created_at")) if raw_response.dig("created_at")
end
|
#embedding ⇒ Object
26
27
28
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 26
def embedding
embeddings.first
end
|
#embeddings ⇒ Object
30
31
32
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 30
def embeddings
raw_response&.dig("embeddings") || []
end
|
#prompt_tokens ⇒ Object
38
39
40
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 38
def prompt_tokens
raw_response.fetch("prompt_eval_count", 0) if done?
end
|
#role ⇒ Object
34
35
36
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 34
def role
"assistant"
end
|
50
51
52
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 50
def tool_calls
Array(raw_response.dig("message", "tool_calls"))
end
|
#total_tokens ⇒ Object
46
47
48
|
# File 'lib/langchain/llm/response/ollama_response.rb', line 46
def total_tokens
prompt_tokens + completion_tokens if done?
end
|