Class: Google::Cloud::Dlp::V2::StorageConfig

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/privacy/dlp/v2/storage.rb

Overview

Shared message indicating Cloud storage type.

Defined Under Namespace

Classes: TimespanConfig

Instance Attribute Summary collapse

Instance Attribute Details

#big_query_options::Google::Cloud::Dlp::V2::BigQueryOptions

Returns BigQuery options.

Returns:



600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
# File 'proto_docs/google/privacy/dlp/v2/storage.rb', line 600

class StorageConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Configuration of the timespan of the items to include in scanning.
  # Currently only supported when inspecting Cloud Storage and BigQuery.
  # @!attribute [rw] start_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows older than this value.
  #     If not set, no lower time limit is applied.
  # @!attribute [rw] end_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows newer than this value.
  #     If not set, no upper time limit is applied.
  # @!attribute [rw] timestamp_field
  #   @return [::Google::Cloud::Dlp::V2::FieldId]
  #     Specification of the field containing the timestamp of scanned items.
  #     Used for data sources like Datastore and BigQuery.
  #
  #     **For BigQuery**
  #
  #     If this value is not specified and the table was modified between the
  #     given start and end times, the entire table will be scanned. If this
  #     value is specified, then rows are filtered based on the given start and
  #     end times. Rows with a `NULL` value in the provided BigQuery column are
  #     skipped.
  #     Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`,
  #     `TIMESTAMP`, and `DATETIME`.
  #
  #     If your BigQuery table is [partitioned at ingestion
  #     time](https://cloud.google.com/bigquery/docs/partitioned-tables#ingestion_time),
  #     you can use any of the following pseudo-columns as your timestamp field.
  #     When used with Cloud DLP, these pseudo-column names are case sensitive.
  #
  #     - `_PARTITIONTIME`
  #     - `_PARTITIONDATE`
  #     - `_PARTITION_LOAD_TIME`
  #
  #     **For Datastore**
  #
  #     If this value is specified, then entities are filtered based on the given
  #     start and end times. If an entity does not contain the provided timestamp
  #     property or contains empty or invalid values, then it is included.
  #     Valid data types of the provided timestamp property are: `TIMESTAMP`.
  #
  #     See the
  #     [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#bq-timespan)
  #     related to this operation.
  # @!attribute [rw] enable_auto_population_of_timespan_config
  #   @return [::Boolean]
  #     When the job is started by a JobTrigger we will automatically figure out
  #     a valid start_time to avoid scanning files that have not been modified
  #     since the last time the JobTrigger executed. This will be based on the
  #     time of the execution of the last run of the JobTrigger or the timespan
  #     end_time used in the last run of the JobTrigger.
  #
  #     **For BigQuery**
  #
  #     Inspect jobs triggered by automatic population will scan data that is at
  #     least three hours old when the job starts. This is because streaming
  #     buffer rows are not read during inspection and reading up to the current
  #     timestamp will result in skipped rows.
  #
  #     See the [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#recently-streamed-data)
  #     related to this operation.
  class TimespanConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#cloud_storage_options::Google::Cloud::Dlp::V2::CloudStorageOptions

Returns Cloud Storage options.

Returns:



600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
# File 'proto_docs/google/privacy/dlp/v2/storage.rb', line 600

class StorageConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Configuration of the timespan of the items to include in scanning.
  # Currently only supported when inspecting Cloud Storage and BigQuery.
  # @!attribute [rw] start_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows older than this value.
  #     If not set, no lower time limit is applied.
  # @!attribute [rw] end_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows newer than this value.
  #     If not set, no upper time limit is applied.
  # @!attribute [rw] timestamp_field
  #   @return [::Google::Cloud::Dlp::V2::FieldId]
  #     Specification of the field containing the timestamp of scanned items.
  #     Used for data sources like Datastore and BigQuery.
  #
  #     **For BigQuery**
  #
  #     If this value is not specified and the table was modified between the
  #     given start and end times, the entire table will be scanned. If this
  #     value is specified, then rows are filtered based on the given start and
  #     end times. Rows with a `NULL` value in the provided BigQuery column are
  #     skipped.
  #     Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`,
  #     `TIMESTAMP`, and `DATETIME`.
  #
  #     If your BigQuery table is [partitioned at ingestion
  #     time](https://cloud.google.com/bigquery/docs/partitioned-tables#ingestion_time),
  #     you can use any of the following pseudo-columns as your timestamp field.
  #     When used with Cloud DLP, these pseudo-column names are case sensitive.
  #
  #     - `_PARTITIONTIME`
  #     - `_PARTITIONDATE`
  #     - `_PARTITION_LOAD_TIME`
  #
  #     **For Datastore**
  #
  #     If this value is specified, then entities are filtered based on the given
  #     start and end times. If an entity does not contain the provided timestamp
  #     property or contains empty or invalid values, then it is included.
  #     Valid data types of the provided timestamp property are: `TIMESTAMP`.
  #
  #     See the
  #     [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#bq-timespan)
  #     related to this operation.
  # @!attribute [rw] enable_auto_population_of_timespan_config
  #   @return [::Boolean]
  #     When the job is started by a JobTrigger we will automatically figure out
  #     a valid start_time to avoid scanning files that have not been modified
  #     since the last time the JobTrigger executed. This will be based on the
  #     time of the execution of the last run of the JobTrigger or the timespan
  #     end_time used in the last run of the JobTrigger.
  #
  #     **For BigQuery**
  #
  #     Inspect jobs triggered by automatic population will scan data that is at
  #     least three hours old when the job starts. This is because streaming
  #     buffer rows are not read during inspection and reading up to the current
  #     timestamp will result in skipped rows.
  #
  #     See the [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#recently-streamed-data)
  #     related to this operation.
  class TimespanConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#datastore_options::Google::Cloud::Dlp::V2::DatastoreOptions

Returns Google Cloud Datastore options.

Returns:



600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
# File 'proto_docs/google/privacy/dlp/v2/storage.rb', line 600

class StorageConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Configuration of the timespan of the items to include in scanning.
  # Currently only supported when inspecting Cloud Storage and BigQuery.
  # @!attribute [rw] start_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows older than this value.
  #     If not set, no lower time limit is applied.
  # @!attribute [rw] end_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows newer than this value.
  #     If not set, no upper time limit is applied.
  # @!attribute [rw] timestamp_field
  #   @return [::Google::Cloud::Dlp::V2::FieldId]
  #     Specification of the field containing the timestamp of scanned items.
  #     Used for data sources like Datastore and BigQuery.
  #
  #     **For BigQuery**
  #
  #     If this value is not specified and the table was modified between the
  #     given start and end times, the entire table will be scanned. If this
  #     value is specified, then rows are filtered based on the given start and
  #     end times. Rows with a `NULL` value in the provided BigQuery column are
  #     skipped.
  #     Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`,
  #     `TIMESTAMP`, and `DATETIME`.
  #
  #     If your BigQuery table is [partitioned at ingestion
  #     time](https://cloud.google.com/bigquery/docs/partitioned-tables#ingestion_time),
  #     you can use any of the following pseudo-columns as your timestamp field.
  #     When used with Cloud DLP, these pseudo-column names are case sensitive.
  #
  #     - `_PARTITIONTIME`
  #     - `_PARTITIONDATE`
  #     - `_PARTITION_LOAD_TIME`
  #
  #     **For Datastore**
  #
  #     If this value is specified, then entities are filtered based on the given
  #     start and end times. If an entity does not contain the provided timestamp
  #     property or contains empty or invalid values, then it is included.
  #     Valid data types of the provided timestamp property are: `TIMESTAMP`.
  #
  #     See the
  #     [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#bq-timespan)
  #     related to this operation.
  # @!attribute [rw] enable_auto_population_of_timespan_config
  #   @return [::Boolean]
  #     When the job is started by a JobTrigger we will automatically figure out
  #     a valid start_time to avoid scanning files that have not been modified
  #     since the last time the JobTrigger executed. This will be based on the
  #     time of the execution of the last run of the JobTrigger or the timespan
  #     end_time used in the last run of the JobTrigger.
  #
  #     **For BigQuery**
  #
  #     Inspect jobs triggered by automatic population will scan data that is at
  #     least three hours old when the job starts. This is because streaming
  #     buffer rows are not read during inspection and reading up to the current
  #     timestamp will result in skipped rows.
  #
  #     See the [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#recently-streamed-data)
  #     related to this operation.
  class TimespanConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#hybrid_options::Google::Cloud::Dlp::V2::HybridOptions

Returns Hybrid inspection options.

Returns:



600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
# File 'proto_docs/google/privacy/dlp/v2/storage.rb', line 600

class StorageConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Configuration of the timespan of the items to include in scanning.
  # Currently only supported when inspecting Cloud Storage and BigQuery.
  # @!attribute [rw] start_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows older than this value.
  #     If not set, no lower time limit is applied.
  # @!attribute [rw] end_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows newer than this value.
  #     If not set, no upper time limit is applied.
  # @!attribute [rw] timestamp_field
  #   @return [::Google::Cloud::Dlp::V2::FieldId]
  #     Specification of the field containing the timestamp of scanned items.
  #     Used for data sources like Datastore and BigQuery.
  #
  #     **For BigQuery**
  #
  #     If this value is not specified and the table was modified between the
  #     given start and end times, the entire table will be scanned. If this
  #     value is specified, then rows are filtered based on the given start and
  #     end times. Rows with a `NULL` value in the provided BigQuery column are
  #     skipped.
  #     Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`,
  #     `TIMESTAMP`, and `DATETIME`.
  #
  #     If your BigQuery table is [partitioned at ingestion
  #     time](https://cloud.google.com/bigquery/docs/partitioned-tables#ingestion_time),
  #     you can use any of the following pseudo-columns as your timestamp field.
  #     When used with Cloud DLP, these pseudo-column names are case sensitive.
  #
  #     - `_PARTITIONTIME`
  #     - `_PARTITIONDATE`
  #     - `_PARTITION_LOAD_TIME`
  #
  #     **For Datastore**
  #
  #     If this value is specified, then entities are filtered based on the given
  #     start and end times. If an entity does not contain the provided timestamp
  #     property or contains empty or invalid values, then it is included.
  #     Valid data types of the provided timestamp property are: `TIMESTAMP`.
  #
  #     See the
  #     [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#bq-timespan)
  #     related to this operation.
  # @!attribute [rw] enable_auto_population_of_timespan_config
  #   @return [::Boolean]
  #     When the job is started by a JobTrigger we will automatically figure out
  #     a valid start_time to avoid scanning files that have not been modified
  #     since the last time the JobTrigger executed. This will be based on the
  #     time of the execution of the last run of the JobTrigger or the timespan
  #     end_time used in the last run of the JobTrigger.
  #
  #     **For BigQuery**
  #
  #     Inspect jobs triggered by automatic population will scan data that is at
  #     least three hours old when the job starts. This is because streaming
  #     buffer rows are not read during inspection and reading up to the current
  #     timestamp will result in skipped rows.
  #
  #     See the [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#recently-streamed-data)
  #     related to this operation.
  class TimespanConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end

#timespan_config::Google::Cloud::Dlp::V2::StorageConfig::TimespanConfig

Returns Configuration of the timespan of the items to include in scanning.

Returns:



600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
# File 'proto_docs/google/privacy/dlp/v2/storage.rb', line 600

class StorageConfig
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Configuration of the timespan of the items to include in scanning.
  # Currently only supported when inspecting Cloud Storage and BigQuery.
  # @!attribute [rw] start_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows older than this value.
  #     If not set, no lower time limit is applied.
  # @!attribute [rw] end_time
  #   @return [::Google::Protobuf::Timestamp]
  #     Exclude files, tables, or rows newer than this value.
  #     If not set, no upper time limit is applied.
  # @!attribute [rw] timestamp_field
  #   @return [::Google::Cloud::Dlp::V2::FieldId]
  #     Specification of the field containing the timestamp of scanned items.
  #     Used for data sources like Datastore and BigQuery.
  #
  #     **For BigQuery**
  #
  #     If this value is not specified and the table was modified between the
  #     given start and end times, the entire table will be scanned. If this
  #     value is specified, then rows are filtered based on the given start and
  #     end times. Rows with a `NULL` value in the provided BigQuery column are
  #     skipped.
  #     Valid data types of the provided BigQuery column are: `INTEGER`, `DATE`,
  #     `TIMESTAMP`, and `DATETIME`.
  #
  #     If your BigQuery table is [partitioned at ingestion
  #     time](https://cloud.google.com/bigquery/docs/partitioned-tables#ingestion_time),
  #     you can use any of the following pseudo-columns as your timestamp field.
  #     When used with Cloud DLP, these pseudo-column names are case sensitive.
  #
  #     - `_PARTITIONTIME`
  #     - `_PARTITIONDATE`
  #     - `_PARTITION_LOAD_TIME`
  #
  #     **For Datastore**
  #
  #     If this value is specified, then entities are filtered based on the given
  #     start and end times. If an entity does not contain the provided timestamp
  #     property or contains empty or invalid values, then it is included.
  #     Valid data types of the provided timestamp property are: `TIMESTAMP`.
  #
  #     See the
  #     [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#bq-timespan)
  #     related to this operation.
  # @!attribute [rw] enable_auto_population_of_timespan_config
  #   @return [::Boolean]
  #     When the job is started by a JobTrigger we will automatically figure out
  #     a valid start_time to avoid scanning files that have not been modified
  #     since the last time the JobTrigger executed. This will be based on the
  #     time of the execution of the last run of the JobTrigger or the timespan
  #     end_time used in the last run of the JobTrigger.
  #
  #     **For BigQuery**
  #
  #     Inspect jobs triggered by automatic population will scan data that is at
  #     least three hours old when the job starts. This is because streaming
  #     buffer rows are not read during inspection and reading up to the current
  #     timestamp will result in skipped rows.
  #
  #     See the [known
  #     issue](https://cloud.google.com/sensitive-data-protection/docs/known-issues#recently-streamed-data)
  #     related to this operation.
  class TimespanConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end
end