Class: Google::Cloud::Asset::V1::IamPolicyAnalysisOutputConfig

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/cloud/asset/v1/asset_service.rb

Overview

Output configuration for export IAM policy analysis destination.

Defined Under Namespace

Classes: BigQueryDestination, GcsDestination

Instance Attribute Summary collapse

Instance Attribute Details

#bigquery_destination::Google::Cloud::Asset::V1::IamPolicyAnalysisOutputConfig::BigQueryDestination

Returns Destination on BigQuery.



863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
# File 'proto_docs/google/cloud/asset/v1/asset_service.rb', line 863

class IamPolicyAnalysisOutputConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # A Cloud Storage location.
  # @!attribute [rw] uri
  #   @return [::String]
  #     Required. The uri of the Cloud Storage object. It's the same uri that is used by
  #     gsutil. For example: "gs://bucket_name/object_name". See
  #     [Quickstart: Using the gsutil tool]
  #     (https://cloud.google.com/storage/docs/quickstart-gsutil) for examples.
  class GcsDestination
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # A BigQuery destination.
  # @!attribute [rw] dataset
  #   @return [::String]
  #     Required. The BigQuery dataset in format "projects/projectId/datasets/datasetId",
  #     to which the analysis results should be exported. If this dataset does
  #     not exist, the export call will return an INVALID_ARGUMENT error.
  # @!attribute [rw] table_prefix
  #   @return [::String]
  #     Required. The prefix of the BigQuery tables to which the analysis results will be
  #     written. Tables will be created based on this table_prefix if not exist:
  #     * <table_prefix>_analysis table will contain export operation's metadata.
  #     * <table_prefix>_analysis_result will contain all the
  #       {::Google::Cloud::Asset::V1::IamPolicyAnalysisResult IamPolicyAnalysisResult}.
  #     When [partition_key] is specified, both tables will be partitioned based
  #     on the [partition_key].
  # @!attribute [rw] partition_key
  #   @return [::Google::Cloud::Asset::V1::IamPolicyAnalysisOutputConfig::BigQueryDestination::PartitionKey]
  #     The partition key for BigQuery partitioned table.
  # @!attribute [rw] write_disposition
  #   @return [::String]
  #     Optional. Specifies the action that occurs if the destination table or partition
  #     already exists. The following values are supported:
  #
  #     * WRITE_TRUNCATE: If the table or partition already exists, BigQuery
  #     overwrites the entire table or all the partitions data.
  #     * WRITE_APPEND: If the table or partition already exists, BigQuery
  #     appends the data to the table or the latest partition.
  #     * WRITE_EMPTY: If the table already exists and contains data, an error is
  #     returned.
  #
  #     The default value is WRITE_APPEND. Each action is atomic and only occurs
  #     if BigQuery is able to complete the job successfully. Details are at
  #     https://cloud.google.com/bigquery/docs/loading-data-local#appending_to_or_overwriting_a_table_using_a_local_file.
  class BigQueryDestination
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # This enum determines the partition key column for the bigquery tables.
    # Partitioning can improve query performance and reduce query cost by
    # filtering partitions. Refer to
    # https://cloud.google.com/bigquery/docs/partitioned-tables for details.
    module PartitionKey
      # Unspecified partition key. Tables won't be partitioned using this
      # option.
      PARTITION_KEY_UNSPECIFIED = 0

      # The time when the request is received. If specified as partition key,
      # the result table(s) is partitoned by the RequestTime column, an
      # additional timestamp column representing when the request was received.
      REQUEST_TIME = 1
    end
  end
end

#gcs_destination::Google::Cloud::Asset::V1::IamPolicyAnalysisOutputConfig::GcsDestination

Returns Destination on Cloud Storage.



863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
# File 'proto_docs/google/cloud/asset/v1/asset_service.rb', line 863

class IamPolicyAnalysisOutputConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # A Cloud Storage location.
  # @!attribute [rw] uri
  #   @return [::String]
  #     Required. The uri of the Cloud Storage object. It's the same uri that is used by
  #     gsutil. For example: "gs://bucket_name/object_name". See
  #     [Quickstart: Using the gsutil tool]
  #     (https://cloud.google.com/storage/docs/quickstart-gsutil) for examples.
  class GcsDestination
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # A BigQuery destination.
  # @!attribute [rw] dataset
  #   @return [::String]
  #     Required. The BigQuery dataset in format "projects/projectId/datasets/datasetId",
  #     to which the analysis results should be exported. If this dataset does
  #     not exist, the export call will return an INVALID_ARGUMENT error.
  # @!attribute [rw] table_prefix
  #   @return [::String]
  #     Required. The prefix of the BigQuery tables to which the analysis results will be
  #     written. Tables will be created based on this table_prefix if not exist:
  #     * <table_prefix>_analysis table will contain export operation's metadata.
  #     * <table_prefix>_analysis_result will contain all the
  #       {::Google::Cloud::Asset::V1::IamPolicyAnalysisResult IamPolicyAnalysisResult}.
  #     When [partition_key] is specified, both tables will be partitioned based
  #     on the [partition_key].
  # @!attribute [rw] partition_key
  #   @return [::Google::Cloud::Asset::V1::IamPolicyAnalysisOutputConfig::BigQueryDestination::PartitionKey]
  #     The partition key for BigQuery partitioned table.
  # @!attribute [rw] write_disposition
  #   @return [::String]
  #     Optional. Specifies the action that occurs if the destination table or partition
  #     already exists. The following values are supported:
  #
  #     * WRITE_TRUNCATE: If the table or partition already exists, BigQuery
  #     overwrites the entire table or all the partitions data.
  #     * WRITE_APPEND: If the table or partition already exists, BigQuery
  #     appends the data to the table or the latest partition.
  #     * WRITE_EMPTY: If the table already exists and contains data, an error is
  #     returned.
  #
  #     The default value is WRITE_APPEND. Each action is atomic and only occurs
  #     if BigQuery is able to complete the job successfully. Details are at
  #     https://cloud.google.com/bigquery/docs/loading-data-local#appending_to_or_overwriting_a_table_using_a_local_file.
  class BigQueryDestination
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # This enum determines the partition key column for the bigquery tables.
    # Partitioning can improve query performance and reduce query cost by
    # filtering partitions. Refer to
    # https://cloud.google.com/bigquery/docs/partitioned-tables for details.
    module PartitionKey
      # Unspecified partition key. Tables won't be partitioned using this
      # option.
      PARTITION_KEY_UNSPECIFIED = 0

      # The time when the request is received. If specified as partition key,
      # the result table(s) is partitoned by the RequestTime column, an
      # additional timestamp column representing when the request was received.
      REQUEST_TIME = 1
    end
  end
end