Class: Langchain::LLM::MistralAIResponse
Instance Attribute Summary
Attributes inherited from BaseResponse
#context, #raw_response
Instance Method Summary
collapse
#completion, #completions, #embeddings, #initialize
Instance Method Details
#chat_completion ⇒ Object
9
10
11
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 9
def chat_completion
chat_completions.dig(0, "message", "content")
end
|
#chat_completions ⇒ Object
13
14
15
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 13
def chat_completions
raw_response.dig("choices")
end
|
#completion_tokens ⇒ Object
37
38
39
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 37
def completion_tokens
raw_response.dig("usage", "completion_tokens")
end
|
#created_at ⇒ Object
41
42
43
44
45
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 41
def created_at
if raw_response.dig("created_at")
Time.at(raw_response.dig("created_at"))
end
end
|
#embedding ⇒ Object
25
26
27
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 25
def embedding
raw_response.dig("data", 0, "embedding")
end
|
#model ⇒ Object
5
6
7
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 5
def model
raw_response["model"]
end
|
#prompt_tokens ⇒ Object
29
30
31
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 29
def prompt_tokens
raw_response.dig("usage", "prompt_tokens")
end
|
#role ⇒ Object
21
22
23
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 21
def role
raw_response.dig("choices", 0, "message", "role")
end
|
17
18
19
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 17
def tool_calls
chat_completions.dig(0, "message", "tool_calls") || []
end
|
#total_tokens ⇒ Object
33
34
35
|
# File 'lib/langchain/llm/response/mistral_ai_response.rb', line 33
def total_tokens
raw_response.dig("usage", "total_tokens")
end
|