Class: AssemblyAI::AsyncLemurClient

Inherits:
Object
  • Object
show all
Defined in:
lib/assemblyai/lemur/client.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(request_client:) ⇒ AssemblyAI::AsyncLemurClient

Parameters:



279
280
281
# File 'lib/assemblyai/lemur/client.rb', line 279

def initialize(request_client:)
  @request_client = request_client
end

Instance Attribute Details

#request_clientAssemblyAI::AsyncRequestClient (readonly)



275
276
277
# File 'lib/assemblyai/lemur/client.rb', line 275

def request_client
  @request_client
end

Instance Method Details

#action_items(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil, temperature: nil, answer_format: nil, request_options: nil) ⇒ AssemblyAI::Lemur::LemurActionItemsResponse

Use LeMUR to generate a list of action items from a transcript

Examples:

api = AssemblyAI::Client.new(
  environment: AssemblyAI::Environment::DEFAULT,
  base_url: "https://api.example.com",
  api_key: "YOUR_API_KEY"
)
api.lemur.action_items(answer_format: "Bullet Points")

Parameters:

  • transcript_ids (Array<String>) (defaults to: nil)

    A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower. Use either transcript_ids or input_text as input into LeMUR.

  • input_text (String) (defaults to: nil)

    Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000. Use either transcript_ids or input_text as input into LeMUR.

  • context (String, Hash{String => Object}) (defaults to: nil)

    Context to provide the model. This can be a string or a free-form JSON value.

  • final_model (AssemblyAI::Lemur::LemurModel) (defaults to: nil)

    The model that is used for the final prompt after compression is performed.

  • max_output_size (Integer) (defaults to: nil)

    Max output size in tokens, up to 4000

  • temperature (Float) (defaults to: nil)

    The temperature to use for the model. Higher values result in answers that are more creative, lower values are more conservative. Can be any value between 0.0 and 1.0 inclusive.

  • answer_format (String) (defaults to: nil)

    How you want the action items to be returned. This can be any text. Defaults to “Bullet Points”.

  • request_options (AssemblyAI::RequestOptions) (defaults to: nil)

Returns:



466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
# File 'lib/assemblyai/lemur/client.rb', line 466

def action_items(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
                 temperature: nil, answer_format: nil, request_options: nil)
  Async do
    response = @request_client.conn.post do |req|
      req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
      req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
      req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
      req.body = {
        **(request_options&.additional_body_parameters || {}),
        transcript_ids: transcript_ids,
        input_text: input_text,
        context: context,
        final_model: final_model,
        max_output_size: max_output_size,
        temperature: temperature,
        answer_format: answer_format
      }.compact
      req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/action-items"
    end
    AssemblyAI::Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
  end
end

#get_response(request_id:, request_options: nil) ⇒ AssemblyAI::Lemur::LemurStringResponse, AssemblyAI::Lemur::LemurQuestionAnswerResponse

Retrieve a LeMUR response that was previously generated.

Examples:

api = AssemblyAI::Client.new(
  environment: AssemblyAI::Environment::DEFAULT,
  base_url: "https://api.example.com",
  api_key: "YOUR_API_KEY"
)
api.lemur.get_response(request_id: "request_id")

Parameters:

  • request_id (String)

    The ID of the LeMUR request you previously made. This would be found in the response of the original request.

  • request_options (AssemblyAI::RequestOptions) (defaults to: nil)

Returns:



502
503
504
505
506
507
508
509
510
511
512
# File 'lib/assemblyai/lemur/client.rb', line 502

def get_response(request_id:, request_options: nil)
  Async do
    response = @request_client.conn.get do |req|
      req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
      req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
      req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
      req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/#{request_id}"
    end
    AssemblyAI::Lemur::LemurResponse.from_json(json_object: response.body)
  end
end

#purge_request_data(request_id:, request_options: nil) ⇒ AssemblyAI::Lemur::PurgeLemurRequestDataResponse

Delete the data for a previously submitted LeMUR request.

The LLM response data, as well as any context provided in the original request
will be removed.

Examples:

api = AssemblyAI::Client.new(
  environment: AssemblyAI::Environment::DEFAULT,
  base_url: "https://api.example.com",
  api_key: "YOUR_API_KEY"
)
api.lemur.purge_request_data(request_id: "request_id")

Parameters:

  • request_id (String)

    The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.

  • request_options (AssemblyAI::RequestOptions) (defaults to: nil)

Returns:



529
530
531
532
533
534
535
536
537
538
539
# File 'lib/assemblyai/lemur/client.rb', line 529

def purge_request_data(request_id:, request_options: nil)
  Async do
    response = @request_client.conn.delete do |req|
      req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
      req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
      req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
      req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/#{request_id}"
    end
    AssemblyAI::Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
  end
end

#question_answer(questions:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil, temperature: nil, request_options: nil) ⇒ AssemblyAI::Lemur::LemurQuestionAnswerResponse

Question & Answer allows you to ask free-form questions about a single

transcript or a group of transcripts.
The questions can be any whose answers you find useful, such as judging whether
a caller is likely to become a customer or whether all items on a meeting's
agenda were covered.

Examples:

api = AssemblyAI::Client.new(
  environment: AssemblyAI::Environment::DEFAULT,
  base_url: "https://api.example.com",
  api_key: "YOUR_API_KEY"
)
api.lemur.question_answer(questions: [{ question: "Where are there wildfires?", answer_format: "List of countries in ISO 3166-1 alpha-2 format", answer_options: ["US", "CA"] }, { question: "Is global warming affecting wildfires?", answer_options: ["yes", "no"] }])

Parameters:

  • transcript_ids (Array<String>) (defaults to: nil)

    A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower. Use either transcript_ids or input_text as input into LeMUR.

  • input_text (String) (defaults to: nil)

    Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000. Use either transcript_ids or input_text as input into LeMUR.

  • context (String, Hash{String => Object}) (defaults to: nil)

    Context to provide the model. This can be a string or a free-form JSON value.

  • final_model (AssemblyAI::Lemur::LemurModel) (defaults to: nil)

    The model that is used for the final prompt after compression is performed.

  • max_output_size (Integer) (defaults to: nil)

    Max output size in tokens, up to 4000

  • temperature (Float) (defaults to: nil)

    The temperature to use for the model. Higher values result in answers that are more creative, lower values are more conservative. Can be any value between 0.0 and 1.0 inclusive.

  • questions (Array<Hash>)

    A list of questions to askRequest of type Array<AssemblyAI::Lemur::LemurQuestion>, as a Hash

    • :question (String)

    • :context (Hash)

    • :answer_format (String)

    • :answer_options (Array<String>)

  • request_options (AssemblyAI::RequestOptions) (defaults to: nil)

Returns:



417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
# File 'lib/assemblyai/lemur/client.rb', line 417

def question_answer(questions:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
                    temperature: nil, request_options: nil)
  Async do
    response = @request_client.conn.post do |req|
      req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
      req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
      req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
      req.body = {
        **(request_options&.additional_body_parameters || {}),
        transcript_ids: transcript_ids,
        input_text: input_text,
        context: context,
        final_model: final_model,
        max_output_size: max_output_size,
        temperature: temperature,
        questions: questions
      }.compact
      req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/question-answer"
    end
    AssemblyAI::Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
  end
end

#summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil, temperature: nil, answer_format: nil, request_options: nil) ⇒ AssemblyAI::Lemur::LemurSummaryResponse

Custom Summary allows you to distill a piece of audio into a few impactful

sentences.
You can give the model context to obtain more targeted results while outputting
the results in a variety of formats described in human language.

Examples:

api = AssemblyAI::Client.new(
  environment: AssemblyAI::Environment::DEFAULT,
  base_url: "https://api.example.com",
  api_key: "YOUR_API_KEY"
)
api.lemur.summary

Parameters:

  • transcript_ids (Array<String>) (defaults to: nil)

    A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower. Use either transcript_ids or input_text as input into LeMUR.

  • input_text (String) (defaults to: nil)

    Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000. Use either transcript_ids or input_text as input into LeMUR.

  • context (String, Hash{String => Object}) (defaults to: nil)

    Context to provide the model. This can be a string or a free-form JSON value.

  • final_model (AssemblyAI::Lemur::LemurModel) (defaults to: nil)

    The model that is used for the final prompt after compression is performed.

  • max_output_size (Integer) (defaults to: nil)

    Max output size in tokens, up to 4000

  • temperature (Float) (defaults to: nil)

    The temperature to use for the model. Higher values result in answers that are more creative, lower values are more conservative. Can be any value between 0.0 and 1.0 inclusive.

  • answer_format (String) (defaults to: nil)

    How you want the summary to be returned. This can be any text. Examples: “TLDR”, “bullet points”

  • request_options (AssemblyAI::RequestOptions) (defaults to: nil)

Returns:



361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
# File 'lib/assemblyai/lemur/client.rb', line 361

def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
            temperature: nil, answer_format: nil, request_options: nil)
  Async do
    response = @request_client.conn.post do |req|
      req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
      req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
      req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
      req.body = {
        **(request_options&.additional_body_parameters || {}),
        transcript_ids: transcript_ids,
        input_text: input_text,
        context: context,
        final_model: final_model,
        max_output_size: max_output_size,
        temperature: temperature,
        answer_format: answer_format
      }.compact
      req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/summary"
    end
    AssemblyAI::Lemur::LemurSummaryResponse.from_json(json_object: response.body)
  end
end

#task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil, temperature: nil, request_options: nil) ⇒ AssemblyAI::Lemur::LemurTaskResponse

Use the LeMUR task endpoint to input your own LLM prompt.

Examples:

api = AssemblyAI::Client.new(
  environment: AssemblyAI::Environment::DEFAULT,
  base_url: "https://api.example.com",
  api_key: "YOUR_API_KEY"
)
api.lemur.task(prompt: "List all the locations affected by wildfires.")

Parameters:

  • transcript_ids (Array<String>) (defaults to: nil)

    A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower. Use either transcript_ids or input_text as input into LeMUR.

  • input_text (String) (defaults to: nil)

    Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000. Use either transcript_ids or input_text as input into LeMUR.

  • context (String, Hash{String => Object}) (defaults to: nil)

    Context to provide the model. This can be a string or a free-form JSON value.

  • final_model (AssemblyAI::Lemur::LemurModel) (defaults to: nil)

    The model that is used for the final prompt after compression is performed.

  • max_output_size (Integer) (defaults to: nil)

    Max output size in tokens, up to 4000

  • temperature (Float) (defaults to: nil)

    The temperature to use for the model. Higher values result in answers that are more creative, lower values are more conservative. Can be any value between 0.0 and 1.0 inclusive.

  • prompt (String)

    Your text to prompt the model to produce a desired output, including any context you want to pass into the model.

  • request_options (AssemblyAI::RequestOptions) (defaults to: nil)

Returns:



309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
# File 'lib/assemblyai/lemur/client.rb', line 309

def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
         temperature: nil, request_options: nil)
  Async do
    response = @request_client.conn.post do |req|
      req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
      req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
      req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
      req.body = {
        **(request_options&.additional_body_parameters || {}),
        transcript_ids: transcript_ids,
        input_text: input_text,
        context: context,
        final_model: final_model,
        max_output_size: max_output_size,
        temperature: temperature,
        prompt: prompt
      }.compact
      req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/task"
    end
    AssemblyAI::Lemur::LemurTaskResponse.from_json(json_object: response.body)
  end
end