Module: Polars::IO
- Included in:
- Polars
- Defined in:
- lib/polars/io/csv.rb,
lib/polars/io/ipc.rb,
lib/polars/io/avro.rb,
lib/polars/io/json.rb,
lib/polars/io/ndjson.rb,
lib/polars/io/parquet.rb,
lib/polars/io/database.rb
Instance Method Summary collapse
-
#read_avro(source, columns: nil, n_rows: nil) ⇒ DataFrame
Read into a DataFrame from Apache Avro format.
-
#read_csv(source, has_header: true, columns: nil, new_columns: nil, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, ignore_errors: false, parse_dates: false, n_threads: nil, infer_schema_length: N_INFER_DEFAULT, batch_size: 8192, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, storage_options: nil, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, eol_char: "\n", truncate_ragged_lines: false) ⇒ DataFrame
Read a CSV file into a DataFrame.
-
#read_csv_batched(source, has_header: true, columns: nil, new_columns: nil, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, missing_utf8_is_empty_string: false, ignore_errors: false, parse_dates: false, n_threads: nil, infer_schema_length: N_INFER_DEFAULT, batch_size: 50_000, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, eol_char: "\n", raise_if_empty: true, truncate_ragged_lines: false, decimal_comma: false) ⇒ BatchedCsvReader
Read a CSV file in batches.
-
#read_database(query, schema_overrides: nil) ⇒ DataFrame
(also: #read_sql)
Read a SQL query into a DataFrame.
-
#read_ipc(source, columns: nil, n_rows: nil, memory_map: true, storage_options: nil, row_count_name: nil, row_count_offset: 0, rechunk: true) ⇒ DataFrame
Read into a DataFrame from Arrow IPC (Feather v2) file.
-
#read_ipc_schema(source) ⇒ Hash
Get a schema of the IPC file without reading data.
-
#read_ipc_stream(source, columns: nil, n_rows: nil, storage_options: nil, row_index_name: nil, row_index_offset: 0, rechunk: true) ⇒ DataFrame
Read into a DataFrame from Arrow IPC record batch stream.
-
#read_json(source, schema: nil, schema_overrides: nil, infer_schema_length: N_INFER_DEFAULT) ⇒ DataFrame
Read into a DataFrame from a JSON file.
-
#read_ndjson(source, schema: nil, schema_overrides: nil, ignore_errors: false) ⇒ DataFrame
Read into a DataFrame from a newline delimited JSON file.
-
#read_parquet(source, columns: nil, n_rows: nil, row_count_name: nil, row_count_offset: 0, parallel: "auto", use_statistics: true, hive_partitioning: nil, glob: true, schema: nil, hive_schema: nil, try_parse_hive_dates: true, rechunk: false, low_memory: false, storage_options: nil, credential_provider: nil, retries: 2, include_file_paths: nil, allow_missing_columns: false) ⇒ DataFrame
Read into a DataFrame from a parquet file.
-
#read_parquet_schema(source) ⇒ Hash
Get a schema of the Parquet file without reading data.
-
#scan_csv(source, has_header: true, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, missing_utf8_is_empty_string: false, ignore_errors: false, cache: true, with_column_names: nil, infer_schema_length: N_INFER_DEFAULT, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, parse_dates: false, eol_char: "\n", raise_if_empty: true, truncate_ragged_lines: false, decimal_comma: false, glob: true) ⇒ LazyFrame
Lazily read from a CSV file or multiple files via glob patterns.
-
#scan_ipc(source, n_rows: nil, cache: true, rechunk: true, row_count_name: nil, row_count_offset: 0, storage_options: nil, hive_partitioning: nil, hive_schema: nil, try_parse_hive_dates: true, include_file_paths: nil) ⇒ LazyFrame
Lazily read from an Arrow IPC (Feather v2) file or multiple files via glob patterns.
-
#scan_ndjson(source, infer_schema_length: N_INFER_DEFAULT, batch_size: 1024, n_rows: nil, low_memory: false, rechunk: true, row_count_name: nil, row_count_offset: 0) ⇒ LazyFrame
Lazily read from a newline delimited JSON file.
-
#scan_parquet(source, n_rows: nil, row_count_name: nil, row_count_offset: 0, parallel: "auto", use_statistics: true, hive_partitioning: nil, glob: true, schema: nil, hive_schema: nil, try_parse_hive_dates: true, rechunk: false, low_memory: false, cache: true, storage_options: nil, credential_provider: nil, retries: 2, include_file_paths: nil, allow_missing_columns: false) ⇒ LazyFrame
Lazily read from a parquet file or multiple files via glob patterns.
Instance Method Details
#read_avro(source, columns: nil, n_rows: nil) ⇒ DataFrame
Read into a DataFrame from Apache Avro format.
14 15 16 17 18 19 20 21 22 |
# File 'lib/polars/io/avro.rb', line 14 def read_avro(source, columns: nil, n_rows: nil) if Utils.pathlike?(source) source = Utils.normalize_filepath(source) end projection, column_names = Utils.handle_projection_columns(columns) rbdf = RbDataFrame.read_avro(source, column_names, projection, n_rows) Utils.wrap_df(rbdf) end |
#read_csv(source, has_header: true, columns: nil, new_columns: nil, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, ignore_errors: false, parse_dates: false, n_threads: nil, infer_schema_length: N_INFER_DEFAULT, batch_size: 8192, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, storage_options: nil, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, eol_char: "\n", truncate_ragged_lines: false) ⇒ DataFrame
This operation defaults to a rechunk
operation at the end, meaning that
all data will be stored continuously in memory.
Set rechunk: false
if you are benchmarking the csv-reader. A rechunk
is
an expensive operation.
Read a CSV file into a DataFrame.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
# File 'lib/polars/io/csv.rb', line 90 def read_csv( source, has_header: true, columns: nil, new_columns: nil, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, ignore_errors: false, parse_dates: false, n_threads: nil, infer_schema_length: N_INFER_DEFAULT, batch_size: 8192, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, storage_options: nil, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, eol_char: "\n", truncate_ragged_lines: false ) Utils._check_arg_is_1byte("sep", sep, false) Utils._check_arg_is_1byte("comment_char", comment_char, false) Utils._check_arg_is_1byte("quote_char", quote_char, true) Utils._check_arg_is_1byte("eol_char", eol_char, false) projection, columns = Utils.handle_projection_columns(columns) ||= {} if columns && !has_header columns.each do |column| if !column.start_with?("column_") raise ArgumentError, "Specified column names do not start with \"column_\", but autogenerated header names were requested." end end end if projection || new_columns raise Todo end df = nil _prepare_file_arg(source) do |data| df = _read_csv_impl( data, has_header: has_header, columns: columns || projection, sep: sep, comment_char: comment_char, quote_char: quote_char, skip_rows: skip_rows, dtypes: dtypes, null_values: null_values, ignore_errors: ignore_errors, parse_dates: parse_dates, n_threads: n_threads, infer_schema_length: infer_schema_length, batch_size: batch_size, n_rows: n_rows, encoding: encoding == "utf8-lossy" ? encoding : "utf8", low_memory: low_memory, rechunk: rechunk, skip_rows_after_header: skip_rows_after_header, row_count_name: row_count_name, row_count_offset: row_count_offset, eol_char: eol_char, truncate_ragged_lines: truncate_ragged_lines ) end if new_columns Utils._update_columns(df, new_columns) else df end end |
#read_csv_batched(source, has_header: true, columns: nil, new_columns: nil, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, missing_utf8_is_empty_string: false, ignore_errors: false, parse_dates: false, n_threads: nil, infer_schema_length: N_INFER_DEFAULT, batch_size: 50_000, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, eol_char: "\n", raise_if_empty: true, truncate_ragged_lines: false, decimal_comma: false) ⇒ BatchedCsvReader
Read a CSV file in batches.
Upon creation of the BatchedCsvReader
,
polars will gather statistics and determine the
file chunks. After that work will only be done
if next_batches
is called.
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 |
# File 'lib/polars/io/csv.rb', line 400 def read_csv_batched( source, has_header: true, columns: nil, new_columns: nil, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, missing_utf8_is_empty_string: false, ignore_errors: false, parse_dates: false, n_threads: nil, infer_schema_length: N_INFER_DEFAULT, batch_size: 50_000, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, eol_char: "\n", raise_if_empty: true, truncate_ragged_lines: false, decimal_comma: false ) projection, columns = Utils.handle_projection_columns(columns) if columns && !has_header columns.each do |column| if !column.start_with?("column_") raise ArgumentError, "Specified column names do not start with \"column_\", but autogenerated header names were requested." end end end if projection || new_columns raise Todo end BatchedCsvReader.new( source, has_header: has_header, columns: columns || projection, sep: sep, comment_char: comment_char, quote_char: quote_char, skip_rows: skip_rows, dtypes: dtypes, null_values: null_values, missing_utf8_is_empty_string: missing_utf8_is_empty_string, ignore_errors: ignore_errors, parse_dates: parse_dates, n_threads: n_threads, infer_schema_length: infer_schema_length, batch_size: batch_size, n_rows: n_rows, encoding: encoding == "utf8-lossy" ? encoding : "utf8", low_memory: low_memory, rechunk: rechunk, skip_rows_after_header: skip_rows_after_header, row_count_name: row_count_name, row_count_offset: row_count_offset, eol_char: eol_char, new_columns: new_columns, raise_if_empty: raise_if_empty, truncate_ragged_lines: truncate_ragged_lines, decimal_comma: decimal_comma ) end |
#read_database(query, schema_overrides: nil) ⇒ DataFrame Also known as: read_sql
Read a SQL query into a DataFrame.
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
# File 'lib/polars/io/database.rb', line 12 def read_database(query, schema_overrides: nil) if !defined?(ActiveRecord) raise Error, "Active Record not available" end result = if query.is_a?(ActiveRecord::Result) query elsif query.is_a?(ActiveRecord::Relation) query.connection_pool.with_connection { |c| c.select_all(query.to_sql) } elsif query.is_a?(::String) ActiveRecord::Base.connection_pool.with_connection { |c| c.select_all(query) } else raise ArgumentError, "Expected ActiveRecord::Relation, ActiveRecord::Result, or String" end data = {} schema_overrides = (schema_overrides || {}).transform_keys(&:to_s) result.columns.each_with_index do |k, i| column_type = result.column_types[i] data[k] = if column_type result.rows.map { |r| column_type.deserialize(r[i]) } else result.rows.map { |r| r[i] } end polars_type = case column_type&.type when :binary Binary when :boolean Boolean when :date Date when :datetime, :timestamp Datetime when :decimal Decimal when :float Float64 when :integer Int64 when :string, :text String when :time Time # TODO fix issue with null # when :json, :jsonb # Struct end schema_overrides[k] ||= polars_type if polars_type end DataFrame.new(data, schema_overrides: schema_overrides) end |
#read_ipc(source, columns: nil, n_rows: nil, memory_map: true, storage_options: nil, row_count_name: nil, row_count_offset: 0, rechunk: true) ⇒ DataFrame
Read into a DataFrame from Arrow IPC (Feather v2) file.
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
# File 'lib/polars/io/ipc.rb', line 27 def read_ipc( source, columns: nil, n_rows: nil, memory_map: true, storage_options: nil, row_count_name: nil, row_count_offset: 0, rechunk: true ) ||= {} _prepare_file_arg(source, **) do |data| _read_ipc_impl( data, columns: columns, n_rows: n_rows, row_count_name: row_count_name, row_count_offset: row_count_offset, rechunk: rechunk, memory_map: memory_map ) end end |
#read_ipc_schema(source) ⇒ Hash
Get a schema of the IPC file without reading data.
164 165 166 167 168 169 170 |
# File 'lib/polars/io/ipc.rb', line 164 def read_ipc_schema(source) if Utils.pathlike?(source) source = Utils.normalize_filepath(source) end Plr.ipc_schema(source) end |
#read_ipc_stream(source, columns: nil, n_rows: nil, storage_options: nil, row_index_name: nil, row_index_offset: 0, rechunk: true) ⇒ DataFrame
Read into a DataFrame from Arrow IPC record batch stream.
See "Streaming format" on https://arrow.apache.org/docs/python/ipc.html.
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
# File 'lib/polars/io/ipc.rb', line 108 def read_ipc_stream( source, columns: nil, n_rows: nil, storage_options: nil, row_index_name: nil, row_index_offset: 0, rechunk: true ) ||= {} _prepare_file_arg(source, **) do |data| _read_ipc_stream_impl( data, columns: columns, n_rows: n_rows, row_index_name: row_index_name, row_index_offset: row_index_offset, rechunk: rechunk ) end end |
#read_json(source, schema: nil, schema_overrides: nil, infer_schema_length: N_INFER_DEFAULT) ⇒ DataFrame
Read into a DataFrame from a JSON file.
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
# File 'lib/polars/io/json.rb', line 9 def read_json( source, schema: nil, schema_overrides: nil, infer_schema_length: N_INFER_DEFAULT ) if Utils.pathlike?(source) source = Utils.normalize_filepath(source) end rbdf = RbDataFrame.read_json( source, infer_schema_length, schema, schema_overrides ) Utils.wrap_df(rbdf) end |
#read_ndjson(source, schema: nil, schema_overrides: nil, ignore_errors: false) ⇒ DataFrame
Read into a DataFrame from a newline delimited JSON file.
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
# File 'lib/polars/io/ndjson.rb', line 9 def read_ndjson( source, schema: nil, schema_overrides: nil, ignore_errors: false ) if Utils.pathlike?(source) source = Utils.normalize_filepath(source) end rbdf = RbDataFrame.read_ndjson( source, ignore_errors, schema, schema_overrides ) Utils.wrap_df(rbdf) end |
#read_parquet(source, columns: nil, n_rows: nil, row_count_name: nil, row_count_offset: 0, parallel: "auto", use_statistics: true, hive_partitioning: nil, glob: true, schema: nil, hive_schema: nil, try_parse_hive_dates: true, rechunk: false, low_memory: false, storage_options: nil, credential_provider: nil, retries: 2, include_file_paths: nil, allow_missing_columns: false) ⇒ DataFrame
Read into a DataFrame from a parquet file.
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
# File 'lib/polars/io/parquet.rb', line 54 def read_parquet( source, columns: nil, n_rows: nil, row_count_name: nil, row_count_offset: 0, parallel: "auto", use_statistics: true, hive_partitioning: nil, glob: true, schema: nil, hive_schema: nil, try_parse_hive_dates: true, rechunk: false, low_memory: false, storage_options: nil, credential_provider: nil, retries: 2, include_file_paths: nil, allow_missing_columns: false ) lf = scan_parquet( source, n_rows: n_rows, row_count_name: row_count_name, row_count_offset: row_count_offset, parallel: parallel, use_statistics: use_statistics, hive_partitioning: hive_partitioning, schema: schema, hive_schema: hive_schema, try_parse_hive_dates: try_parse_hive_dates, rechunk: rechunk, low_memory: low_memory, cache: false, storage_options: , credential_provider: credential_provider, retries: retries, glob: glob, include_file_paths: include_file_paths, allow_missing_columns: allow_missing_columns ) if !columns.nil? if Utils.is_int_sequence(columns) lf = lf.select(F.nth(columns)) else lf = lf.select(columns) end end lf.collect end |
#read_parquet_schema(source) ⇒ Hash
Get a schema of the Parquet file without reading data.
115 116 117 118 119 120 121 |
# File 'lib/polars/io/parquet.rb', line 115 def read_parquet_schema(source) if Utils.pathlike?(source) source = Utils.normalize_filepath(source) end Plr.parquet_schema(source) end |
#scan_csv(source, has_header: true, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, missing_utf8_is_empty_string: false, ignore_errors: false, cache: true, with_column_names: nil, infer_schema_length: N_INFER_DEFAULT, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, parse_dates: false, eol_char: "\n", raise_if_empty: true, truncate_ragged_lines: false, decimal_comma: false, glob: true) ⇒ LazyFrame
Lazily read from a CSV file or multiple files via glob patterns.
This allows the query optimizer to push down predicates and projections to the scan level, thereby potentially reducing memory overhead.
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 |
# File 'lib/polars/io/csv.rb', line 545 def scan_csv( source, has_header: true, sep: ",", comment_char: nil, quote_char: '"', skip_rows: 0, dtypes: nil, null_values: nil, missing_utf8_is_empty_string: false, ignore_errors: false, cache: true, with_column_names: nil, infer_schema_length: N_INFER_DEFAULT, n_rows: nil, encoding: "utf8", low_memory: false, rechunk: true, skip_rows_after_header: 0, row_count_name: nil, row_count_offset: 0, parse_dates: false, eol_char: "\n", raise_if_empty: true, truncate_ragged_lines: false, decimal_comma: false, glob: true ) Utils._check_arg_is_1byte("sep", sep, false) Utils._check_arg_is_1byte("comment_char", comment_char, false) Utils._check_arg_is_1byte("quote_char", quote_char, true) if Utils.pathlike?(source) source = Utils.normalize_filepath(source) end _scan_csv_impl( source, has_header: has_header, sep: sep, comment_char: comment_char, quote_char: quote_char, skip_rows: skip_rows, dtypes: dtypes, null_values: null_values, ignore_errors: ignore_errors, cache: cache, with_column_names: with_column_names, infer_schema_length: infer_schema_length, n_rows: n_rows, low_memory: low_memory, rechunk: rechunk, skip_rows_after_header: skip_rows_after_header, encoding: encoding, row_count_name: row_count_name, row_count_offset: row_count_offset, parse_dates: parse_dates, eol_char: eol_char, truncate_ragged_lines: truncate_ragged_lines ) end |
#scan_ipc(source, n_rows: nil, cache: true, rechunk: true, row_count_name: nil, row_count_offset: 0, storage_options: nil, hive_partitioning: nil, hive_schema: nil, try_parse_hive_dates: true, include_file_paths: nil) ⇒ LazyFrame
Lazily read from an Arrow IPC (Feather v2) file or multiple files via glob patterns.
This allows the query optimizer to push down predicates and projections to the scan level, thereby potentially reducing memory overhead.
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
# File 'lib/polars/io/ipc.rb', line 206 def scan_ipc( source, n_rows: nil, cache: true, rechunk: true, row_count_name: nil, row_count_offset: 0, storage_options: nil, hive_partitioning: nil, hive_schema: nil, try_parse_hive_dates: true, include_file_paths: nil ) _scan_ipc_impl( source, n_rows: n_rows, cache: cache, rechunk: rechunk, row_count_name: row_count_name, row_count_offset: row_count_offset, storage_options: , hive_partitioning: hive_partitioning, hive_schema: hive_schema, try_parse_hive_dates: try_parse_hive_dates, include_file_paths: include_file_paths ) end |
#scan_ndjson(source, infer_schema_length: N_INFER_DEFAULT, batch_size: 1024, n_rows: nil, low_memory: false, rechunk: true, row_count_name: nil, row_count_offset: 0) ⇒ LazyFrame
Lazily read from a newline delimited JSON file.
This allows the query optimizer to push down predicates and projections to the scan level, thereby potentially reducing memory overhead.
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
# File 'lib/polars/io/ndjson.rb', line 53 def scan_ndjson( source, infer_schema_length: N_INFER_DEFAULT, batch_size: 1024, n_rows: nil, low_memory: false, rechunk: true, row_count_name: nil, row_count_offset: 0 ) sources = [] if Utils.pathlike?(source) source = Utils.normalize_filepath(source) elsif source.is_a?(::Array) if Utils.is_path_or_str_sequence(source) sources = source.map { |s| Utils.normalize_filepath(s) } else sources = source end source = nil end rblf = RbLazyFrame.new_from_ndjson( source, sources, infer_schema_length, batch_size, n_rows, low_memory, rechunk, Utils.parse_row_index_args(row_count_name, row_count_offset) ) Utils.wrap_ldf(rblf) end |
#scan_parquet(source, n_rows: nil, row_count_name: nil, row_count_offset: 0, parallel: "auto", use_statistics: true, hive_partitioning: nil, glob: true, schema: nil, hive_schema: nil, try_parse_hive_dates: true, rechunk: false, low_memory: false, cache: true, storage_options: nil, credential_provider: nil, retries: 2, include_file_paths: nil, allow_missing_columns: false) ⇒ LazyFrame
Lazily read from a parquet file or multiple files via glob patterns.
This allows the query optimizer to push down predicates and projections to the scan level, thereby potentially reducing memory overhead.
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
# File 'lib/polars/io/parquet.rb', line 176 def scan_parquet( source, n_rows: nil, row_count_name: nil, row_count_offset: 0, parallel: "auto", use_statistics: true, hive_partitioning: nil, glob: true, schema: nil, hive_schema: nil, try_parse_hive_dates: true, rechunk: false, low_memory: false, cache: true, storage_options: nil, credential_provider: nil, retries: 2, include_file_paths: nil, allow_missing_columns: false ) if Utils.pathlike?(source) source = Utils.normalize_filepath(source, check_not_directory: false) elsif Utils.is_path_or_str_sequence(source) source = source.map { |s| Utils.normalize_filepath(s, check_not_directory: false) } end if credential_provider raise Todo end _scan_parquet_impl( source, n_rows: n_rows, cache: cache, parallel: parallel, rechunk: rechunk, row_index_name: row_count_name, row_index_offset: row_count_offset, storage_options: , credential_provider: credential_provider, low_memory: low_memory, use_statistics: use_statistics, hive_partitioning: hive_partitioning, schema: schema, hive_schema: hive_schema, try_parse_hive_dates: try_parse_hive_dates, retries: retries, glob: glob, include_file_paths: include_file_paths, allow_missing_columns: allow_missing_columns ) end |