Module: ElasticsearchServerless::API::MachineLearning::Actions
- Defined in:
- lib/elasticsearch-serverless/api/machine_learning/put_job.rb,
lib/elasticsearch-serverless/api/machine_learning/get_jobs.rb,
lib/elasticsearch-serverless/api/machine_learning/open_job.rb,
lib/elasticsearch-serverless/api/machine_learning/close_job.rb,
lib/elasticsearch-serverless/api/machine_learning/flush_job.rb,
lib/elasticsearch-serverless/api/machine_learning/reset_job.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_job.rb,
lib/elasticsearch-serverless/api/machine_learning/put_filter.rb,
lib/elasticsearch-serverless/api/machine_learning/update_job.rb,
lib/elasticsearch-serverless/api/machine_learning/get_filters.rb,
lib/elasticsearch-serverless/api/machine_learning/put_calendar.rb,
lib/elasticsearch-serverless/api/machine_learning/put_datafeed.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_filter.rb,
lib/elasticsearch-serverless/api/machine_learning/get_calendars.rb,
lib/elasticsearch-serverless/api/machine_learning/get_datafeeds.rb,
lib/elasticsearch-serverless/api/machine_learning/get_job_stats.rb,
lib/elasticsearch-serverless/api/machine_learning/stop_datafeed.rb,
lib/elasticsearch-serverless/api/machine_learning/update_filter.rb,
lib/elasticsearch-serverless/api/machine_learning/start_datafeed.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_calendar.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_datafeed.rb,
lib/elasticsearch-serverless/api/machine_learning/update_datafeed.rb,
lib/elasticsearch-serverless/api/machine_learning/preview_datafeed.rb,
lib/elasticsearch-serverless/api/machine_learning/put_calendar_job.rb,
lib/elasticsearch-serverless/api/machine_learning/put_trained_model.rb,
lib/elasticsearch-serverless/api/machine_learning/get_datafeed_stats.rb,
lib/elasticsearch-serverless/api/machine_learning/get_trained_models.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_calendar_job.rb,
lib/elasticsearch-serverless/api/machine_learning/evaluate_data_frame.rb,
lib/elasticsearch-serverless/api/machine_learning/get_calendar_events.rb,
lib/elasticsearch-serverless/api/machine_learning/get_overall_buckets.rb,
lib/elasticsearch-serverless/api/machine_learning/infer_trained_model.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_trained_model.rb,
lib/elasticsearch-serverless/api/machine_learning/post_calendar_events.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_calendar_event.rb,
lib/elasticsearch-serverless/api/machine_learning/estimate_model_memory.rb,
lib/elasticsearch-serverless/api/machine_learning/put_trained_model_alias.rb,
lib/elasticsearch-serverless/api/machine_learning/get_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/get_trained_models_stats.rb,
lib/elasticsearch-serverless/api/machine_learning/put_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/stop_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_trained_model_alias.rb,
lib/elasticsearch-serverless/api/machine_learning/start_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/delete_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/update_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/preview_data_frame_analytics.rb,
lib/elasticsearch-serverless/api/machine_learning/put_trained_model_vocabulary.rb,
lib/elasticsearch-serverless/api/machine_learning/stop_trained_model_deployment.rb,
lib/elasticsearch-serverless/api/machine_learning/get_data_frame_analytics_stats.rb,
lib/elasticsearch-serverless/api/machine_learning/start_trained_model_deployment.rb,
lib/elasticsearch-serverless/api/machine_learning/update_trained_model_deployment.rb,
lib/elasticsearch-serverless/api/machine_learning/put_trained_model_definition_part.rb
Instance Method Summary collapse
-
#close_job(arguments = {}) ⇒ Object
Close anomaly detection jobs.
-
#delete_calendar(arguments = {}) ⇒ Object
Delete a calendar.
-
#delete_calendar_event(arguments = {}) ⇒ Object
Delete events from a calendar.
-
#delete_calendar_job(arguments = {}) ⇒ Object
Delete anomaly jobs from a calendar.
-
#delete_data_frame_analytics(arguments = {}) ⇒ Object
Delete a data frame analytics job.
-
#delete_datafeed(arguments = {}) ⇒ Object
Delete a datafeed.
-
#delete_filter(arguments = {}) ⇒ Object
Delete a filter.
-
#delete_job(arguments = {}) ⇒ Object
Delete an anomaly detection job.
-
#delete_trained_model(arguments = {}) ⇒ Object
Delete an unreferenced trained model.
-
#delete_trained_model_alias(arguments = {}) ⇒ Object
Delete a trained model alias.
-
#estimate_model_memory(arguments = {}) ⇒ Object
Estimate job model memory usage.
-
#evaluate_data_frame(arguments = {}) ⇒ Object
Evaluate data frame analytics.
-
#flush_job(arguments = {}) ⇒ Object
Force buffered data to be processed.
-
#get_calendar_events(arguments = {}) ⇒ Object
Get info about events in calendars.
-
#get_calendars(arguments = {}) ⇒ Object
Get calendar configuration info.
-
#get_data_frame_analytics(arguments = {}) ⇒ Object
Get data frame analytics job configuration info.
-
#get_data_frame_analytics_stats(arguments = {}) ⇒ Object
Get data frame analytics jobs usage info.
-
#get_datafeed_stats(arguments = {}) ⇒ Object
Get datafeeds usage info.
-
#get_datafeeds(arguments = {}) ⇒ Object
Get datafeeds configuration info.
-
#get_filters(arguments = {}) ⇒ Object
Get filters.
-
#get_job_stats(arguments = {}) ⇒ Object
Get anomaly detection jobs usage info.
-
#get_jobs(arguments = {}) ⇒ Object
Get anomaly detection jobs configuration info.
-
#get_overall_buckets(arguments = {}) ⇒ Object
Get overall bucket results.
-
#get_trained_models(arguments = {}) ⇒ Object
Get trained model configuration info.
-
#get_trained_models_stats(arguments = {}) ⇒ Object
Get trained models usage info.
-
#infer_trained_model(arguments = {}) ⇒ Object
Evaluate a trained model.
-
#open_job(arguments = {}) ⇒ Object
Open anomaly detection jobs.
-
#post_calendar_events(arguments = {}) ⇒ Object
Add scheduled events to the calendar.
-
#preview_data_frame_analytics(arguments = {}) ⇒ Object
Preview features used by data frame analytics.
-
#preview_datafeed(arguments = {}) ⇒ Object
Preview a datafeed.
-
#put_calendar(arguments = {}) ⇒ Object
Create a calendar.
-
#put_calendar_job(arguments = {}) ⇒ Object
Add anomaly detection job to calendar.
-
#put_data_frame_analytics(arguments = {}) ⇒ Object
Create a data frame analytics job.
-
#put_datafeed(arguments = {}) ⇒ Object
Create a datafeed.
-
#put_filter(arguments = {}) ⇒ Object
Create a filter.
-
#put_job(arguments = {}) ⇒ Object
Create an anomaly detection job.
-
#put_trained_model(arguments = {}) ⇒ Object
Create a trained model.
-
#put_trained_model_alias(arguments = {}) ⇒ Object
Create or update a trained model alias.
-
#put_trained_model_definition_part(arguments = {}) ⇒ Object
Create part of a trained model definition.
-
#put_trained_model_vocabulary(arguments = {}) ⇒ Object
Create a trained model vocabulary.
-
#reset_job(arguments = {}) ⇒ Object
Reset an anomaly detection job.
-
#start_data_frame_analytics(arguments = {}) ⇒ Object
Start a data frame analytics job.
-
#start_datafeed(arguments = {}) ⇒ Object
Start datafeeds.
-
#start_trained_model_deployment(arguments = {}) ⇒ Object
Start a trained model deployment.
-
#stop_data_frame_analytics(arguments = {}) ⇒ Object
Stop data frame analytics jobs.
-
#stop_datafeed(arguments = {}) ⇒ Object
Stop datafeeds.
-
#stop_trained_model_deployment(arguments = {}) ⇒ Object
Stop a trained model deployment.
-
#update_data_frame_analytics(arguments = {}) ⇒ Object
Update a data frame analytics job.
-
#update_datafeed(arguments = {}) ⇒ Object
Update a datafeed.
-
#update_filter(arguments = {}) ⇒ Object
Update a filter.
-
#update_job(arguments = {}) ⇒ Object
Update an anomaly detection job.
-
#update_trained_model_deployment(arguments = {}) ⇒ Object
Update a trained model deployment.
Instance Method Details
#close_job(arguments = {}) ⇒ Object
Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job.
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
# File 'lib/elasticsearch-serverless/api/machine_learning/close_job.rb', line 42 def close_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.close_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/_close" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_calendar(arguments = {}) ⇒ Object
Delete a calendar. Removes all scheduled events from a calendar, then deletes it.
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_calendar.rb', line 33 def delete_calendar(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_calendar" } defined_params = [:calendar_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _calendar_id = arguments.delete(:calendar_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/calendars/#{Utils.listify(_calendar_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_calendar_event(arguments = {}) ⇒ Object
Delete events from a calendar.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_calendar_event.rb', line 34 def delete_calendar_event(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_calendar_event" } defined_params = [:calendar_id, :event_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] raise ArgumentError, "Required argument 'event_id' missing" unless arguments[:event_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _calendar_id = arguments.delete(:calendar_id) _event_id = arguments.delete(:event_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/calendars/#{Utils.listify(_calendar_id)}/events/#{Utils.listify(_event_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_calendar_job(arguments = {}) ⇒ Object
Delete anomaly jobs from a calendar.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_calendar_job.rb', line 34 def delete_calendar_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_calendar_job" } defined_params = [:calendar_id, :job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _calendar_id = arguments.delete(:calendar_id) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/calendars/#{Utils.listify(_calendar_id)}/jobs/#{Utils.listify(_job_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_data_frame_analytics(arguments = {}) ⇒ Object
Delete a data frame analytics job.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_data_frame_analytics.rb', line 34 def delete_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'id' missing" unless arguments[:id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/data_frame/analytics/#{Utils.listify(_id)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_datafeed(arguments = {}) ⇒ Object
Delete a datafeed.
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_datafeed.rb', line 37 def delete_datafeed(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_datafeed" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'datafeed_id' missing" unless arguments[:datafeed_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/datafeeds/#{Utils.listify(_datafeed_id)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_filter(arguments = {}) ⇒ Object
Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_filter.rb', line 34 def delete_filter(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_filter" } defined_params = [:filter_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'filter_id' missing" unless arguments[:filter_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _filter_id = arguments.delete(:filter_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/filters/#{Utils.listify(_filter_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_job(arguments = {}) ⇒ Object
Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request.
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_job.rb', line 45 def delete_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_trained_model(arguments = {}) ⇒ Object
Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_trained_model.rb', line 34 def delete_trained_model(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_trained_model" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/trained_models/#{Utils.listify(_model_id)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#delete_trained_model_alias(arguments = {}) ⇒ Object
Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the model_id
, this API returns an error.
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# File 'lib/elasticsearch-serverless/api/machine_learning/delete_trained_model_alias.rb', line 36 def delete_trained_model_alias(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.delete_trained_model_alias" } defined_params = [:model_id, :model_alias].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] raise ArgumentError, "Required argument 'model_alias' missing" unless arguments[:model_alias] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_alias = arguments.delete(:model_alias) _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_DELETE path = "_ml/trained_models/#{Utils.listify(_model_id)}/model_aliases/#{Utils.listify(_model_alias)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#estimate_model_memory(arguments = {}) ⇒ Object
Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
# File 'lib/elasticsearch-serverless/api/machine_learning/estimate_model_memory.rb', line 35 def estimate_model_memory(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.estimate_model_memory" } raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/anomaly_detectors/_estimate_model_memory" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#evaluate_data_frame(arguments = {}) ⇒ Object
Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present.
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
# File 'lib/elasticsearch-serverless/api/machine_learning/evaluate_data_frame.rb', line 36 def evaluate_data_frame(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.evaluate_data_frame" } raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/data_frame/_evaluate" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#flush_job(arguments = {}) ⇒ Object
Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data.
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
# File 'lib/elasticsearch-serverless/api/machine_learning/flush_job.rb', line 51 def flush_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.flush_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/_flush" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_calendar_events(arguments = {}) ⇒ Object
Get info about events in calendars.
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_calendar_events.rb', line 37 def get_calendar_events(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_calendar_events" } defined_params = [:calendar_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _calendar_id = arguments.delete(:calendar_id) method = ElasticsearchServerless::API::HTTP_GET path = "_ml/calendars/#{Utils.listify(_calendar_id)}/events" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_calendars(arguments = {}) ⇒ Object
Get calendar configuration info.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_calendars.rb', line 35 def get_calendars(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_calendars" } defined_params = [:calendar_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _calendar_id = arguments.delete(:calendar_id) method = if body ElasticsearchServerless::API::HTTP_POST else ElasticsearchServerless::API::HTTP_GET end path = if _calendar_id "_ml/calendars/#{Utils.listify(_calendar_id)}" else "_ml/calendars" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_data_frame_analytics(arguments = {}) ⇒ Object
Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_data_frame_analytics.rb', line 51 def get_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_GET path = if _id "_ml/data_frame/analytics/#{Utils.listify(_id)}" else "_ml/data_frame/analytics" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_data_frame_analytics_stats(arguments = {}) ⇒ Object
Get data frame analytics jobs usage info.
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_data_frame_analytics_stats.rb', line 46 def get_data_frame_analytics_stats(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_data_frame_analytics_stats" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_GET path = if _id "_ml/data_frame/analytics/#{Utils.listify(_id)}/_stats" else "_ml/data_frame/analytics/_stats" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_datafeed_stats(arguments = {}) ⇒ Object
Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using _all
, by specifying * as the <feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the only information you receive is the datafeed_id
and the state
. This API returns a maximum of 10,000 datafeeds.
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_datafeed_stats.rb', line 48 def get_datafeed_stats(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_datafeed_stats" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_GET path = if _datafeed_id "_ml/datafeeds/#{Utils.listify(_datafeed_id)}/_stats" else "_ml/datafeeds/_stats" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_datafeeds(arguments = {}) ⇒ Object
Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using _all
, by specifying * as the <feed_id>, or by omitting the <feed_id>. This API returns a maximum of 10,000 datafeeds.
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_datafeeds.rb', line 50 def get_datafeeds(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_datafeeds" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_GET path = if _datafeed_id "_ml/datafeeds/#{Utils.listify(_datafeed_id)}" else "_ml/datafeeds" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_filters(arguments = {}) ⇒ Object
Get filters. You can get a single filter or all filters.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_filters.rb', line 35 def get_filters(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_filters" } defined_params = [:filter_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _filter_id = arguments.delete(:filter_id) method = ElasticsearchServerless::API::HTTP_GET path = if _filter_id "_ml/filters/#{Utils.listify(_filter_id)}" else "_ml/filters" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_job_stats(arguments = {}) ⇒ Object
Get anomaly detection jobs usage info.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_job_stats.rb', line 43 def get_job_stats(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_job_stats" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_GET path = if _job_id "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/_stats" else "_ml/anomaly_detectors/_stats" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_jobs(arguments = {}) ⇒ Object
Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using _all
, by specifying * as the <job_id>, or by omitting the <job_id>.
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_jobs.rb', line 49 def get_jobs(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_jobs" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_GET path = if _job_id "_ml/anomaly_detectors/#{Utils.listify(_job_id)}" else "_ml/anomaly_detectors" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_overall_buckets(arguments = {}) ⇒ Object
Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The overall_score
is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum anomaly_score
per anomaly detection job in the overall bucket is calculated. Then the top_n
of those scores are averaged to result in the overall_score
. This means that you can fine-tune the overall_score
so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set top_n
to 1
, the overall_score
is the maximum bucket score in the overall bucket. Alternatively, if you set top_n
to the number of jobs, the overall_score
is high only when all jobs detect anomalies in that overall bucket. If you set the bucket_span
parameter (to a value greater than its default), the overall_score
is the maximum overall_score
of the overall buckets that have a span equal to the jobs’ largest bucket span.
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_overall_buckets.rb', line 72 def get_overall_buckets(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_overall_buckets" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _job_id = arguments.delete(:job_id) method = if body ElasticsearchServerless::API::HTTP_POST else ElasticsearchServerless::API::HTTP_GET end path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/results/overall_buckets" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_trained_models(arguments = {}) ⇒ Object
Get trained model configuration info.
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_trained_models.rb', line 52 def get_trained_models(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_trained_models" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_GET path = if _model_id "_ml/trained_models/#{Utils.listify(_model_id)}" else "_ml/trained_models" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#get_trained_models_stats(arguments = {}) ⇒ Object
Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
# File 'lib/elasticsearch-serverless/api/machine_learning/get_trained_models_stats.rb', line 43 def get_trained_models_stats(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.get_trained_models_stats" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_GET path = if _model_id "_ml/trained_models/#{Utils.listify(_model_id)}/_stats" else "_ml/trained_models/_stats" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#infer_trained_model(arguments = {}) ⇒ Object
Evaluate a trained model.
*Deprecation notice*: /_ml/trained_models/model_id/deployment/_infer is deprecated. Use /_ml/trained_models/model_id/_infer instead Deprecated since version 8.3.0
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
# File 'lib/elasticsearch-serverless/api/machine_learning/infer_trained_model.rb', line 39 def infer_trained_model(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.infer_trained_model" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/trained_models/#{Utils.listify(_model_id)}/_infer" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#open_job(arguments = {}) ⇒ Object
Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received.
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
# File 'lib/elasticsearch-serverless/api/machine_learning/open_job.rb', line 40 def open_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.open_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/_open" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#post_calendar_events(arguments = {}) ⇒ Object
Add scheduled events to the calendar.
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# File 'lib/elasticsearch-serverless/api/machine_learning/post_calendar_events.rb', line 33 def post_calendar_events(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.post_calendar_events" } defined_params = [:calendar_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _calendar_id = arguments.delete(:calendar_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/calendars/#{Utils.listify(_calendar_id)}/events" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#preview_data_frame_analytics(arguments = {}) ⇒ Object
Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
# File 'lib/elasticsearch-serverless/api/machine_learning/preview_data_frame_analytics.rb', line 34 def preview_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.preview_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _id = arguments.delete(:id) method = if body ElasticsearchServerless::API::HTTP_POST else ElasticsearchServerless::API::HTTP_GET end path = if _id "_ml/data_frame/analytics/#{Utils.listify(_id)}/_preview" else "_ml/data_frame/analytics/_preview" end params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#preview_datafeed(arguments = {}) ⇒ Object
Preview a datafeed. This API returns the first “page” of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials.
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
# File 'lib/elasticsearch-serverless/api/machine_learning/preview_datafeed.rb', line 46 def preview_datafeed(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.preview_datafeed" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _datafeed_id = arguments.delete(:datafeed_id) method = if body ElasticsearchServerless::API::HTTP_POST else ElasticsearchServerless::API::HTTP_GET end path = if _datafeed_id "_ml/datafeeds/#{Utils.listify(_datafeed_id)}/_preview" else "_ml/datafeeds/_preview" end params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_calendar(arguments = {}) ⇒ Object
Create a calendar.
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_calendar.rb', line 33 def put_calendar(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_calendar" } defined_params = [:calendar_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _calendar_id = arguments.delete(:calendar_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/calendars/#{Utils.listify(_calendar_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_calendar_job(arguments = {}) ⇒ Object
Add anomaly detection job to calendar.
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_calendar_job.rb', line 33 def put_calendar_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_calendar_job" } defined_params = [:calendar_id, :job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'calendar_id' missing" unless arguments[:calendar_id] raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _calendar_id = arguments.delete(:calendar_id) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/calendars/#{Utils.listify(_calendar_id)}/jobs/#{Utils.listify(_job_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_data_frame_analytics(arguments = {}) ⇒ Object
Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index.
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_data_frame_analytics.rb', line 37 def put_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'id' missing" unless arguments[:id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/data_frame/analytics/#{Utils.listify(_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_datafeed(arguments = {}) ⇒ Object
Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (frequency
). If you are concerned about delayed data, you can add a delay (query_delay
) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-config
index. Do not give users write
privileges on the .ml-config
index.
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_datafeed.rb', line 50 def put_datafeed(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_datafeed" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'datafeed_id' missing" unless arguments[:datafeed_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/datafeeds/#{Utils.listify(_datafeed_id)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_filter(arguments = {}) ⇒ Object
Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the custom_rules
property of detector configuration objects.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_filter.rb', line 35 def put_filter(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_filter" } defined_params = [:filter_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'filter_id' missing" unless arguments[:filter_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _filter_id = arguments.delete(:filter_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/filters/#{Utils.listify(_filter_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_job(arguments = {}) ⇒ Object
Create an anomaly detection job. If you include a datafeed_config
, you must have read index privileges on the source index.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_job.rb', line 34 def put_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_trained_model(arguments = {}) ⇒ Object
Create a trained model. Enable you to supply a trained model that is not created by data frame analytics.
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_trained_model.rb', line 39 def put_trained_model(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_trained_model" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/trained_models/#{Utils.listify(_model_id)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_trained_model_alias(arguments = {}) ⇒ Object
Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning.
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_trained_model_alias.rb', line 52 def put_trained_model_alias(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_trained_model_alias" } defined_params = [:model_id, :model_alias].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] raise ArgumentError, "Required argument 'model_alias' missing" unless arguments[:model_alias] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_alias = arguments.delete(:model_alias) _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/trained_models/#{Utils.listify(_model_id)}/model_aliases/#{Utils.listify(_model_alias)}" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_trained_model_definition_part(arguments = {}) ⇒ Object
Create part of a trained model definition.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_trained_model_definition_part.rb', line 35 def put_trained_model_definition_part(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_trained_model_definition_part" } defined_params = [:model_id, :part].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] raise ArgumentError, "Required argument 'part' missing" unless arguments[:part] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _model_id = arguments.delete(:model_id) _part = arguments.delete(:part) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/trained_models/#{Utils.listify(_model_id)}/definition/#{Utils.listify(_part)}" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#put_trained_model_vocabulary(arguments = {}) ⇒ Object
Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# File 'lib/elasticsearch-serverless/api/machine_learning/put_trained_model_vocabulary.rb', line 35 def put_trained_model_vocabulary(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.put_trained_model_vocabulary" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_PUT path = "_ml/trained_models/#{Utils.listify(_model_id)}/vocabulary" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#reset_job(arguments = {}) ⇒ Object
Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
# File 'lib/elasticsearch-serverless/api/machine_learning/reset_job.rb', line 41 def reset_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.reset_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/_reset" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#start_data_frame_analytics(arguments = {}) ⇒ Object
Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The index.number_of_shards
and index.number_of_replicas
settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings.
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
# File 'lib/elasticsearch-serverless/api/machine_learning/start_data_frame_analytics.rb', line 47 def start_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.start_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'id' missing" unless arguments[:id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/data_frame/analytics/#{Utils.listify(_id)}/_start" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#start_datafeed(arguments = {}) ⇒ Object
Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead.
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# File 'lib/elasticsearch-serverless/api/machine_learning/start_datafeed.rb', line 59 def start_datafeed(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.start_datafeed" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'datafeed_id' missing" unless arguments[:datafeed_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/datafeeds/#{Utils.listify(_datafeed_id)}/_start" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#start_trained_model_deployment(arguments = {}) ⇒ Object
Start a trained model deployment. It allocates the model to every machine learning node.
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
# File 'lib/elasticsearch-serverless/api/machine_learning/start_trained_model_deployment.rb', line 53 def start_trained_model_deployment(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.start_trained_model_deployment" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/trained_models/#{Utils.listify(_model_id)}/deployment/_start" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#stop_data_frame_analytics(arguments = {}) ⇒ Object
Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle.
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
# File 'lib/elasticsearch-serverless/api/machine_learning/stop_data_frame_analytics.rb', line 48 def stop_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.stop_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'id' missing" unless arguments[:id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/data_frame/analytics/#{Utils.listify(_id)}/_stop" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#stop_datafeed(arguments = {}) ⇒ Object
Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
# File 'lib/elasticsearch-serverless/api/machine_learning/stop_datafeed.rb', line 46 def stop_datafeed(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.stop_datafeed" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'datafeed_id' missing" unless arguments[:datafeed_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/datafeeds/#{Utils.listify(_datafeed_id)}/_stop" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#stop_trained_model_deployment(arguments = {}) ⇒ Object
Stop a trained model deployment.
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# File 'lib/elasticsearch-serverless/api/machine_learning/stop_trained_model_deployment.rb', line 38 def stop_trained_model_deployment(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.stop_trained_model_deployment" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/trained_models/#{Utils.listify(_model_id)}/deployment/_stop" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#update_data_frame_analytics(arguments = {}) ⇒ Object
Update a data frame analytics job.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# File 'lib/elasticsearch-serverless/api/machine_learning/update_data_frame_analytics.rb', line 35 def update_data_frame_analytics(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.update_data_frame_analytics" } defined_params = [:id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'id' missing" unless arguments[:id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _id = arguments.delete(:id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/data_frame/analytics/#{Utils.listify(_id)}/_update" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#update_datafeed(arguments = {}) ⇒ Object
Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
# File 'lib/elasticsearch-serverless/api/machine_learning/update_datafeed.rb', line 50 def update_datafeed(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.update_datafeed" } defined_params = [:datafeed_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'datafeed_id' missing" unless arguments[:datafeed_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _datafeed_id = arguments.delete(:datafeed_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/datafeeds/#{Utils.listify(_datafeed_id)}/_update" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#update_filter(arguments = {}) ⇒ Object
Update a filter. Updates the description of a filter, adds items, or removes items from the list.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
# File 'lib/elasticsearch-serverless/api/machine_learning/update_filter.rb', line 34 def update_filter(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.update_filter" } defined_params = [:filter_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'filter_id' missing" unless arguments[:filter_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _filter_id = arguments.delete(:filter_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/filters/#{Utils.listify(_filter_id)}/_update" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#update_job(arguments = {}) ⇒ Object
Update an anomaly detection job. Updates certain properties of an anomaly detection job.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
# File 'lib/elasticsearch-serverless/api/machine_learning/update_job.rb', line 34 def update_job(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.update_job" } defined_params = [:job_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'job_id' missing" unless arguments[:job_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _job_id = arguments.delete(:job_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/anomaly_detectors/#{Utils.listify(_job_id)}/_update" params = {} ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |
#update_trained_model_deployment(arguments = {}) ⇒ Object
Update a trained model deployment.
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# File 'lib/elasticsearch-serverless/api/machine_learning/update_trained_model_deployment.rb', line 39 def update_trained_model_deployment(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || "ml.update_trained_model_deployment" } defined_params = [:model_id].inject({}) do |set_variables, variable| set_variables[variable] = arguments[variable] if arguments.key?(variable) set_variables end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'model_id' missing" unless arguments[:model_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _model_id = arguments.delete(:model_id) method = ElasticsearchServerless::API::HTTP_POST path = "_ml/trained_models/#{Utils.listify(_model_id)}/deployment/_update" params = Utils.process_params(arguments) ElasticsearchServerless::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end |