Module: PgHaMigrations::SafeStatements

Defined in:
lib/pg_ha_migrations/safe_statements.rb

Instance Method Summary collapse

Instance Method Details

#_check_postgres_adapter!Object



549
550
551
552
553
# File 'lib/pg_ha_migrations/safe_statements.rb', line 549

def _check_postgres_adapter!
  expected_adapter = "PostgreSQL"
  actual_adapter = ActiveRecord::Base.connection.adapter_name
  raise PgHaMigrations::UnsupportedAdapter, "This gem only works with the #{expected_adapter} adapter, found #{actual_adapter} instead" unless actual_adapter == expected_adapter
end

#_fully_qualified_table_name_for_partman(table) ⇒ Object



535
536
537
538
539
540
541
542
543
# File 'lib/pg_ha_migrations/safe_statements.rb', line 535

def _fully_qualified_table_name_for_partman(table)
  table = PgHaMigrations::Table.from_table_name(table)

  [table.schema, table.name].each do |identifier|
    if identifier.to_s !~ /^[a-z_][a-z_\d]*$/
      raise PgHaMigrations::InvalidMigrationError, "Partman requires schema / table names to be lowercase with underscores"
    end
  end.join(".")
end

#_per_migration_callerObject



545
546
547
# File 'lib/pg_ha_migrations/safe_statements.rb', line 545

def _per_migration_caller
  @_per_migration_caller ||= Kernel.caller
end

#_quoted_partman_schemaObject



522
523
524
525
526
527
528
529
530
531
532
533
# File 'lib/pg_ha_migrations/safe_statements.rb', line 522

def _quoted_partman_schema
  schema = connection.select_value(<<~SQL)
    SELECT nspname
    FROM pg_namespace JOIN pg_extension
      ON pg_namespace.oid = pg_extension.extnamespace
    WHERE pg_extension.extname = 'pg_partman'
  SQL

  raise PgHaMigrations::InvalidMigrationError, "The pg_partman extension is not installed" unless schema.present?

  connection.quote_schema_name(schema)
end

#_type_is_enum(type) ⇒ Object



555
556
557
# File 'lib/pg_ha_migrations/safe_statements.rb', line 555

def _type_is_enum(type)
  ActiveRecord::Base.connection.select_values("SELECT typname FROM pg_type JOIN pg_enum ON pg_type.oid = pg_enum.enumtypid").include?(type.to_s)
end

#adjust_lock_timeout(timeout_seconds = PgHaMigrations::LOCK_TIMEOUT_SECONDS, &block) ⇒ Object



661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
# File 'lib/pg_ha_migrations/safe_statements.rb', line 661

def adjust_lock_timeout(timeout_seconds = PgHaMigrations::LOCK_TIMEOUT_SECONDS, &block)
  _check_postgres_adapter!
  original_timeout = ActiveRecord::Base.value_from_sql("SHOW lock_timeout").sub(/s\Z/, '').to_i * 1000
  begin
    connection.execute("SET lock_timeout = #{PG::Connection.escape_string((timeout_seconds * 1000).to_s)};")
    block.call
  ensure
    begin
      connection.execute("SET lock_timeout = #{original_timeout};")
    rescue ActiveRecord::StatementInvalid => e
      if e.message =~ /PG::InFailedSqlTransaction/
        # If we're in a failed transaction the `SET lock_timeout` will be rolled back,
        # so we don't need to worry about cleaning up, and we can't execute SQL anyway.
      else
        raise e
      end
    end
  end
end

#adjust_statement_timeout(timeout_seconds, &block) ⇒ Object



681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
# File 'lib/pg_ha_migrations/safe_statements.rb', line 681

def adjust_statement_timeout(timeout_seconds, &block)
  _check_postgres_adapter!
  original_timeout = ActiveRecord::Base.value_from_sql("SHOW statement_timeout").sub(/s\Z/, '').to_i * 1000
  begin
    connection.execute("SET statement_timeout = #{PG::Connection.escape_string((timeout_seconds * 1000).to_s)};")
    block.call
  ensure
    begin
      connection.execute("SET statement_timeout = #{original_timeout};")
    rescue ActiveRecord::StatementInvalid => e
      if e.message =~ /PG::InFailedSqlTransaction/
        # If we're in a failed transaction the `SET lock_timeout` will be rolled back,
        # so we don't need to worry about cleaning up, and we can't execute SQL anyway.
      else
        raise e
      end
    end
  end
end

#ensure_small_table!(table, empty: false, threshold: PgHaMigrations::SMALL_TABLE_THRESHOLD_BYTES) ⇒ Object



701
702
703
704
705
706
707
708
709
710
711
# File 'lib/pg_ha_migrations/safe_statements.rb', line 701

def ensure_small_table!(table, empty: false, threshold: PgHaMigrations::SMALL_TABLE_THRESHOLD_BYTES)
  table = PgHaMigrations::Table.from_table_name(table)

  if empty && table.has_rows?
    raise PgHaMigrations::InvalidMigrationError, "Table #{table.inspect} has rows"
  end

  if table.total_bytes > threshold
    raise PgHaMigrations::InvalidMigrationError, "Table #{table.inspect} is larger than #{threshold} bytes"
  end
end

#exec_migration(conn, direction) ⇒ Object



567
568
569
570
# File 'lib/pg_ha_migrations/safe_statements.rb', line 567

def exec_migration(conn, direction)
  _check_postgres_adapter!
  super(conn, direction)
end

#migrate(direction) ⇒ Object



559
560
561
562
563
564
565
# File 'lib/pg_ha_migrations/safe_statements.rb', line 559

def migrate(direction)
  if respond_to?(:change)
    raise PgHaMigrations::UnsupportedMigrationError, "Tracking changes for automated rollback is not supported; use explicit #up instead."
  end

  super(direction)
end

#safe_add_column(table, column, type, **options) ⇒ Object



32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# File 'lib/pg_ha_migrations/safe_statements.rb', line 32

def safe_add_column(table, column, type, **options)
  # Note: we don't believe we need to consider the odd case where
  # `:default => nil` or `:default => -> { null }` (or similar) is
  # passed because:
  # - It's OK to exclude that case with an "unnecessary" `raise`
  #   below as it doesn't make semantic sense anyway.
  # - If `:null => false` is also passed we are assuming Postgres's
  #   seq scan of the table (to verify the NOT NULL constraint) will
  #   short-circuit (though we have not confirmed that).
  if options.has_key?(:default)
    if ActiveRecord::Base.connection.postgresql_version < 11_00_00
      raise PgHaMigrations::UnsafeMigrationError.new(":default is NOT SAFE! Use safe_change_column_default afterwards then backfill the data to prevent locking the table")
    elsif options[:default].is_a?(Proc) || (options[:default].is_a?(String) && !([:string, :text, :binary].include?(type.to_sym) || _type_is_enum(type)))
      raise PgHaMigrations::UnsafeMigrationError.new(":default is not safe if the default value is volatile. Use safe_change_column_default afterwards then backfill the data to prevent locking the table")
    end
  elsif options[:null] == false
    raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you want to do this, use safe_make_column_not_nullable")
  end

  unless options.has_key?(:default)
    self.safe_added_columns_without_default_value << [table.to_s, column.to_s]
  end

  unsafe_add_column(table, column, type, **options)
end

#safe_add_concurrent_index(table, columns, **options) ⇒ Object



237
238
239
240
241
242
243
244
# File 'lib/pg_ha_migrations/safe_statements.rb', line 237

def safe_add_concurrent_index(table, columns, **options)
  # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
  if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
    raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
  end

  unsafe_add_index(table, columns, **options.merge(:algorithm => :concurrently))
end

#safe_add_concurrent_partitioned_index(table, columns, name: nil, if_not_exists: nil, using: nil, unique: nil, where: nil, comment: nil, nulls_not_distinct: nil) ⇒ Object



258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
# File 'lib/pg_ha_migrations/safe_statements.rb', line 258

def safe_add_concurrent_partitioned_index(
  table,
  columns,
  name: nil,
  if_not_exists: nil,
  using: nil,
  unique: nil,
  where: nil,
  comment: nil,
  nulls_not_distinct: nil
)
  # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
  if !nulls_not_distinct.nil? && ActiveRecord::Base.connection.postgresql_version < 15_00_00
    raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
  end

  if ActiveRecord::Base.connection.postgresql_version < 11_00_00
    raise PgHaMigrations::InvalidMigrationError, "Concurrent partitioned index creation not supported on Postgres databases before version 11"
  end

  parent_table = PgHaMigrations::Table.from_table_name(table)

  raise PgHaMigrations::InvalidMigrationError, "Table #{parent_table.inspect} is not a partitioned table" unless parent_table.natively_partitioned?

  parent_index = if name.present?
    PgHaMigrations::Index.new(name, parent_table)
  else
    PgHaMigrations::Index.from_table_and_columns(parent_table, columns)
  end

  # Short-circuit when if_not_exists: true and index already valid
  return if if_not_exists && parent_index.valid?

  child_indexes = parent_table.partitions.map do |child_table|
    PgHaMigrations::Index.from_table_and_columns(child_table, columns)
  end

  # CREATE INDEX ON ONLY parent_table
  unsafe_add_index(
    parent_table.fully_qualified_name,
    columns,
    name: parent_index.name,
    if_not_exists: if_not_exists,
    using: using,
    unique: unique,
    nulls_not_distinct: nulls_not_distinct,
    where: where,
    comment: comment,
    algorithm: :only, # see lib/pg_ha_migrations/hacks/add_index_on_only.rb
  )

  child_indexes.each do |child_index|
    add_index_method = if child_index.table.natively_partitioned?
      :safe_add_concurrent_partitioned_index
    else
      :safe_add_concurrent_index
    end

    send(
      add_index_method,
      child_index.table.fully_qualified_name,
      columns,
      name: child_index.name,
      if_not_exists: if_not_exists,
      using: using,
      unique: unique,
      nulls_not_distinct: nulls_not_distinct,
      where: where,
    )
  end

  # Avoid taking out an unnecessary lock if there are no child tables to attach
  if child_indexes.present?
    safely_acquire_lock_for_table(parent_table.fully_qualified_name) do
      child_indexes.each do |child_index|
        say_with_time "Attaching index #{child_index.inspect} to #{parent_index.inspect}" do
          connection.execute(<<~SQL)
            ALTER INDEX #{parent_index.fully_qualified_name}
            ATTACH PARTITION #{child_index.fully_qualified_name}
          SQL
        end
      end
    end
  end

  raise PgHaMigrations::InvalidMigrationError, "Unexpected state. Parent index #{parent_index.inspect} is invalid" unless parent_index.valid?
end

#safe_add_enum_value(name, value) ⇒ Object



28
29
30
# File 'lib/pg_ha_migrations/safe_statements.rb', line 28

def safe_add_enum_value(name, value)
  raw_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} ADD VALUE '#{PG::Connection.escape_string(value)}'")
end

#safe_add_index_on_empty_table(table, columns, **options) ⇒ Object



216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
# File 'lib/pg_ha_migrations/safe_statements.rb', line 216

def safe_add_index_on_empty_table(table, columns, **options)
  if options[:algorithm] == :concurrently
    raise ArgumentError, "Cannot call safe_add_index_on_empty_table with :algorithm => :concurrently"
  end

  # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
  if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
    raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
  end

  # Avoids taking out an unnecessary SHARE lock if the table does have data
  ensure_small_table!(table, empty: true)

  safely_acquire_lock_for_table(table, mode: :share) do
    # Ensure data wasn't written in the split second after the first check
    ensure_small_table!(table, empty: true)

    unsafe_add_index(table, columns, **options)
  end
end

#safe_add_unvalidated_check_constraint(table, expression, name:) ⇒ Object



350
351
352
# File 'lib/pg_ha_migrations/safe_statements.rb', line 350

def safe_add_unvalidated_check_constraint(table, expression, name:)
  unsafe_add_check_constraint(table, expression, name: name, validate: false)
end

#safe_added_columns_without_default_valueObject



2
3
4
# File 'lib/pg_ha_migrations/safe_statements.rb', line 2

def safe_added_columns_without_default_value
  @safe_added_columns_without_default_value ||= []
end

#safe_change_column_default(table_name, column_name, default_value) ⇒ Object



58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# File 'lib/pg_ha_migrations/safe_statements.rb', line 58

def safe_change_column_default(table_name, column_name, default_value)
  if PgHaMigrations.config.prefer_single_step_column_addition_with_default &&
      ActiveRecord::Base.connection.postgresql_version >= 11_00_00 &&
      self.safe_added_columns_without_default_value.include?([table_name.to_s, column_name.to_s])
    raise PgHaMigrations::BestPracticeError, "On Postgres 11+ it's safe to set a constant default value when adding a new column; please set the default value as part of the column addition"
  end

  column = connection.send(:column_for, table_name, column_name)

  # In 5.2 we have an edge whereby passing in a string literal with an expression
  # results in confusing behavior because instead of being executed in the database
  # that expression is turned into a Ruby nil before being sent to the database layer;
  # this seems to be an expected side effect of a change that was targeted at a use
  # case unrelated to migrations: https://github.com/rails/rails/commit/7b2dfdeab6e4ef096e4dc1fe313056f08ccf7dc5
  #
  # On the other hand, the behavior in 5.1 is also confusing because it quotes the
  # expression (instead of maintaining the string as-is), which results in Postgres
  # evaluating the expression once when executing the DDL and setting the default to
  # the constant result of that evaluation rather than setting the default to the
  # expression itself.
  #
  # Therefore we want to disallow passing in an expression directly as a string and
  # require the use of a Proc instead with specific quoting rules to determine exact
  # behavior. It's fairly difficult (without relying on something like the PgQuery gem
  # which requires native extensions built with the Postgres dev packages installed)
  # to determine if a string literal represent an expression or just a constant. So
  # instead of trying to parse the expression, we employ a set of heuristics:
  # - If the column is text-like or binary, then we can allow anything in the default
  #   value since a Ruby string there will always coerce directly to the equivalent
  #   text/binary value rather than being interpreted as a DDL-time expression.
  # - Custom enum types are a special case: they also are treated like strings by
  #   Rails, so we want to allow those as-is.
  # - Otherwise, disallow any Ruby string values and instead require the Ruby object
  #   type that maps to the column type.
  #
  # These heuristics eliminate (virtually?) all ambiguity. In theory there's a
  # possiblity that some custom object could be coerced-Ruby side into a SQL string
  # that does something weird here, but that seems an odd enough case that we can
  # safely ignore it.
  if default_value.present? &&
     !default_value.is_a?(Proc) &&
     (
       connection.quote_default_expression(default_value, column) == "NULL" ||
       (
         ![:string, :text, :binary, :enum].include?(column..type) &&
         default_value.is_a?(String)
       )
     )
    raise PgHaMigrations::InvalidMigrationError, <<~ERROR
      Setting a default value to an expression using a string literal is ambiguous.

      If you want the default to be:
      * ...a constant scalar value, use the matching Ruby object type instead of a string if possible (e.g., `DateTime.new(...)`).
      * ...an expression evaluated at runtime for each row, then pass a Proc that returns the expression string (e.g., `-> { "NOW()" }`).
      * ...an expression evaluated at migration time, then pass a Proc that returns a quoted expression string (e.g., `-> { "'NOW()'" }`).
    ERROR
  end

  safely_acquire_lock_for_table(table_name) do
    unsafe_change_column_default(table_name, column_name, default_value)
  end
end

#safe_create_enum_type(name, values = nil) ⇒ Object



14
15
16
17
18
19
20
21
22
23
24
25
26
# File 'lib/pg_ha_migrations/safe_statements.rb', line 14

def safe_create_enum_type(name, values=nil)
  case values
  when nil
    raise ArgumentError, "safe_create_enum_type expects a set of values; if you want an enum with no values please pass an empty array"
  when []
    raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM ()")
  else
    escaped_values = values.map do |value|
      "'#{PG::Connection.escape_string(value.to_s)}'"
    end
    raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM (#{escaped_values.join(',')})")
  end
end

#safe_create_partitioned_table(table, partition_key:, type:, infer_primary_key: nil, **options, &block) ⇒ Object

Raises:

  • (ArgumentError)


382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
# File 'lib/pg_ha_migrations/safe_statements.rb', line 382

def safe_create_partitioned_table(table, partition_key:, type:, infer_primary_key: nil, **options, &block)
  raise ArgumentError, "Expected <partition_key> to be present" unless partition_key.present?

  unless PgHaMigrations::PARTITION_TYPES.include?(type)
    raise ArgumentError, "Expected <type> to be symbol in #{PgHaMigrations::PARTITION_TYPES} but received #{type.inspect}"
  end

  if ActiveRecord::Base.connection.postgresql_version < 10_00_00
    raise PgHaMigrations::InvalidMigrationError, "Native partitioning not supported on Postgres databases before version 10"
  end

  if type == :hash && ActiveRecord::Base.connection.postgresql_version < 11_00_00
    raise PgHaMigrations::InvalidMigrationError, "Hash partitioning not supported on Postgres databases before version 11"
  end

  if infer_primary_key.nil?
    infer_primary_key = PgHaMigrations.config.infer_primary_key_on_partitioned_tables
  end

  # Newer versions of Rails will set the primary key column to the type :primary_key.
  # This performs some extra logic that we can't easily undo which causes problems when
  # trying to inject the partition key into the PK. Now, it would be nice to lookup the
  # default primary key type instead of simply using :bigserial, but it doesn't appear
  # that we have access to the Rails configuration from within our migrations.
  if options[:id].nil? || options[:id] == :primary_key
    options[:id] = :bigserial
  end

  quoted_partition_key = if partition_key.is_a?(Proc)
    # Lambda syntax, like in other migration methods, implies an expression that
    # cannot be easily sanitized.
    #
    # e.g ->{ "(created_at::date)" }
    partition_key.call.to_s
  else
    # Otherwise, assume key is a column name or array of column names
    Array.wrap(partition_key).map { |col| connection.quote_column_name(col) }.join(",")
  end

  options[:options] = "PARTITION BY #{type.upcase} (#{quoted_partition_key})"

  safe_create_table(table, **options) do |td|
    block.call(td) if block

    next unless options[:id]

    pk_columns = td.columns.each_with_object([]) do |col, arr|
      next unless col.options[:primary_key]

      col.options[:primary_key] = false

      arr << col.name
    end

    if infer_primary_key && !partition_key.is_a?(Proc) && ActiveRecord::Base.connection.postgresql_version >= 11_00_00
      td.primary_keys(pk_columns.concat(Array.wrap(partition_key)).map(&:to_s).uniq)
    end
  end
end

#safe_create_table(table, **options, &block) ⇒ Object



6
7
8
9
10
11
12
# File 'lib/pg_ha_migrations/safe_statements.rb', line 6

def safe_create_table(table, **options, &block)
  if options[:force]
    raise PgHaMigrations::UnsafeMigrationError.new(":force is NOT SAFE! Explicitly call unsafe_drop_table first if you want to recreate an existing table")
  end

  unsafe_create_table(table, **options, &block)
end

#safe_make_column_not_nullable(table, column) ⇒ Object

Postgres 12+ can use a valid CHECK constraint to validate that no values of a column are null, avoiding a full table scan while holding an exclusive lock on the table when altering a column to NOT NULL

Source: dba.stackexchange.com/questions/267947/how-can-i-set-a-column-to-not-null-without-locking-the-table-during-a-table-scan/268128#268128 (archive.is/X55up)



136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
# File 'lib/pg_ha_migrations/safe_statements.rb', line 136

def safe_make_column_not_nullable(table, column)
  if ActiveRecord::Base.connection.postgresql_version < 12_00_00
    raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
  end

  validated_table = PgHaMigrations::Table.from_table_name(table)
  tmp_constraint_name = "tmp_not_null_constraint_#{OpenSSL::Digest::SHA256.hexdigest(column.to_s).first(7)}"

  if validated_table.check_constraints.any? { |c| c.name == tmp_constraint_name }
    raise PgHaMigrations::InvalidMigrationError, "A constraint #{tmp_constraint_name.inspect} already exists. " \
      "This implies that a previous invocation of this method failed and left behind a temporary constraint. " \
      "Please drop the constraint before attempting to run this method again."
  end

  safe_add_unvalidated_check_constraint(table, "#{connection.quote_column_name(column)} IS NOT NULL", name: tmp_constraint_name)
  safe_validate_check_constraint(table, name: tmp_constraint_name)

  # "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
  # valid CHECK constraint is found which proves no NULL can exist, then the table scan is
  # skipped."
  # See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
  unsafe_make_column_not_nullable(table, column)
  unsafe_remove_constraint(table, name: tmp_constraint_name)
end

#safe_make_column_not_nullable_from_check_constraint(table, column, constraint_name:, drop_constraint: true) ⇒ Object

This method is a variant of safe_make_column_not_nullable that is expected to always be fast; i.e., it will not perform a full table scan to check for null values.



163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
# File 'lib/pg_ha_migrations/safe_statements.rb', line 163

def safe_make_column_not_nullable_from_check_constraint(table, column, constraint_name:, drop_constraint: true)
  unless ActiveRecord::Base.connection.postgresql_version >= 12_00_00
    raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
  end

  unless constraint_name
    raise ArgumentError, "Expected <constraint_name> to be present"
  end
  constraint_name = constraint_name.to_s

  quoted_table_name = connection.quote_table_name(table)
  quoted_column_name = connection.quote_column_name(column)

  validated_table = PgHaMigrations::Table.from_table_name(table)
  constraint = validated_table.check_constraints.find do |c|
    c.name == constraint_name
  end

  unless constraint
    raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not exist"
  end

  unless constraint.validated
    raise PgHaMigrations::InvalidMigrationError, "The provided constraint is not validated"
  end

  # The constraint has to actually prove that no null values exist, so the
  # constraint condition can't simply include the `IS NOT NULL` check. We
  # don't try to handle all possible cases here. For example,
  # `a IS NOT NULL AND b IS NOT NULL` would prove what we need, but it would
  # be complicated to check. We must ensure, however, that we're not too
  # loose. For example, `a IS NOT NULL OR b IS NOT NULL` would not prove that
  # `a IS NOT NULL`.
  unless constraint.definition =~ /\ACHECK \(*(#{Regexp.escape(column.to_s)}|#{Regexp.escape(quoted_column_name)}) IS NOT NULL\)*\Z/i
    raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not enforce non-null values for the column"
  end

  # We don't want to acquire an exclusive lock on the table twice, and we also don't want it to be
  # posssible to have the NOT NULL constraint addition succeed while the constraint removal fails,
  # so we acquire the lock once and do both operations in the same block.
  safely_acquire_lock_for_table(table) do
    # "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
    # valid CHECK constraint is found which proves no NULL can exist, then the table scan is
    # skipped."
    # See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
    unsafe_make_column_not_nullable(table, column)

    if drop_constraint
      unsafe_remove_constraint(table, name: constraint_name)
    end
  end
end

#safe_make_column_nullable(table, column) ⇒ Object



121
122
123
124
125
126
127
128
# File 'lib/pg_ha_migrations/safe_statements.rb', line 121

def safe_make_column_nullable(table, column)
  quoted_table_name = connection.quote_table_name(table)
  quoted_column_name = connection.quote_column_name(column)

  safely_acquire_lock_for_table(table) do
    raw_execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quoted_column_name} DROP NOT NULL"
  end
end

#safe_partman_create_parent(table, partition_key:, interval:, infinite_time_partitions: true, inherit_privileges: true, premake: nil, start_partition: nil, template_table: nil, retention: nil, retention_keep_table: nil) ⇒ Object

Raises:

  • (ArgumentError)


442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
# File 'lib/pg_ha_migrations/safe_statements.rb', line 442

def safe_partman_create_parent(
  table,
  partition_key:,
  interval:,
  infinite_time_partitions: true,
  inherit_privileges: true,
  premake: nil,
  start_partition: nil,
  template_table: nil,
  retention: nil,
  retention_keep_table: nil
)
  raise ArgumentError, "Expected <partition_key> to be present" unless partition_key.present?
  raise ArgumentError, "Expected <interval> to be present" unless interval.present?

  if ActiveRecord::Base.connection.postgresql_version < 11_00_00
    raise PgHaMigrations::InvalidMigrationError, "Native partitioning with partman not supported on Postgres databases before version 11"
  end

  formatted_start_partition = nil

  if start_partition.present?
    if !start_partition.is_a?(Date) && !start_partition.is_a?(Time) && !start_partition.is_a?(DateTime)
      raise PgHaMigrations::InvalidMigrationError, "Expected <start_partition> to be Date, Time, or DateTime object but received #{start_partition.class}"
    end

    formatted_start_partition = if start_partition.respond_to?(:to_fs)
      start_partition.to_fs(:db)
    else
      start_partition.to_s(:db)
    end
  end

  create_parent_options = {
    parent_table: _fully_qualified_table_name_for_partman(table),
    template_table: template_table ? _fully_qualified_table_name_for_partman(template_table) : nil,
    control: partition_key,
    type: "native",
    interval: interval,
    premake: premake,
    start_partition: formatted_start_partition,
  }.compact

  create_parent_sql = create_parent_options.map { |k, v| "p_#{k} := #{connection.quote(v)}" }.join(", ")

  log_message = "partman_create_parent(#{table.inspect}, " \
    "partition_key: #{partition_key.inspect}, " \
    "interval: #{interval.inspect}, " \
    "premake: #{premake.inspect}, " \
    "start_partition: #{start_partition.inspect}, " \
    "template_table: #{template_table.inspect})"

  say_with_time(log_message) do
    connection.execute("SELECT #{_quoted_partman_schema}.create_parent(#{create_parent_sql})")
  end

  update_config_options = {
    infinite_time_partitions: infinite_time_partitions,
    inherit_privileges: inherit_privileges,
    retention: retention,
    retention_keep_table: retention_keep_table,
  }.compact

  unsafe_partman_update_config(table, **update_config_options)
end

#safe_partman_reapply_privileges(table) ⇒ Object



516
517
518
519
520
# File 'lib/pg_ha_migrations/safe_statements.rb', line 516

def safe_partman_reapply_privileges(table)
  say_with_time "partman_reapply_privileges(#{table.inspect})" do
    connection.execute("SELECT #{_quoted_partman_schema}.reapply_privileges('#{_fully_qualified_table_name_for_partman(table)}')")
  end
end

#safe_partman_update_config(table, **options) ⇒ Object



508
509
510
511
512
513
514
# File 'lib/pg_ha_migrations/safe_statements.rb', line 508

def safe_partman_update_config(table, **options)
  if options[:retention].present? || options[:retention_keep_table] == false
    raise PgHaMigrations::UnsafeMigrationError.new(":retention and/or :retention_keep_table => false can potentially result in data loss if misconfigured. Please use unsafe_partman_update_config if you want to set these options")
  end

  unsafe_partman_update_config(table, **options)
end

#safe_remove_concurrent_index(table, **options) ⇒ Object



246
247
248
249
250
251
252
253
254
255
256
# File 'lib/pg_ha_migrations/safe_statements.rb', line 246

def safe_remove_concurrent_index(table, **options)
  unless options.is_a?(Hash) && options.key?(:name)
    raise ArgumentError, "Expected safe_remove_concurrent_index to be called with arguments (table_name, :name => ...)"
  end
  unless ActiveRecord::Base.connection.postgresql_version >= 9_06_00
    raise PgHaMigrations::InvalidMigrationError, "Removing an index concurrently is not supported on Postgres 9.1 databases"
  end
  index_size = select_value("SELECT pg_size_pretty(pg_relation_size('#{options[:name]}'))")
  say "Preparing to drop index #{options[:name]} which is #{index_size} on disk..."
  unsafe_remove_index(table, **options.merge(:algorithm => :concurrently))
end

#safe_rename_constraint(table, from:, to:) ⇒ Object

Raises:

  • (ArgumentError)


366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
# File 'lib/pg_ha_migrations/safe_statements.rb', line 366

def safe_rename_constraint(table, from:, to:)
  raise ArgumentError, "Expected <from> to be present" unless from.present?
  raise ArgumentError, "Expected <to> to be present" unless to.present?

  quoted_table_name = connection.quote_table_name(table)
  quoted_constraint_from_name = connection.quote_table_name(from)
  quoted_constraint_to_name = connection.quote_table_name(to)
  sql = "ALTER TABLE #{quoted_table_name} RENAME CONSTRAINT #{quoted_constraint_from_name} TO #{quoted_constraint_to_name}"

  safely_acquire_lock_for_table(table) do
    say_with_time "rename_constraint(#{table.inspect}, from: #{from.inspect}, to: #{to.inspect})" do
      connection.execute(sql)
    end
  end
end

#safe_set_maintenance_work_mem_gb(gigabytes) ⇒ Object



346
347
348
# File 'lib/pg_ha_migrations/safe_statements.rb', line 346

def safe_set_maintenance_work_mem_gb(gigabytes)
  raw_execute("SET maintenance_work_mem = '#{PG::Connection.escape_string(gigabytes.to_s)} GB'")
end

#safe_validate_check_constraint(table, name:) ⇒ Object

Raises:

  • (ArgumentError)


354
355
356
357
358
359
360
361
362
363
364
# File 'lib/pg_ha_migrations/safe_statements.rb', line 354

def safe_validate_check_constraint(table, name:)
  raise ArgumentError, "Expected <name> to be present" unless name.present?

  quoted_table_name = connection.quote_table_name(table)
  quoted_constraint_name = connection.quote_table_name(name)
  sql = "ALTER TABLE #{quoted_table_name} VALIDATE CONSTRAINT #{quoted_constraint_name}"

  say_with_time "validate_check_constraint(#{table.inspect}, name: #{name.inspect})" do
    connection.execute(sql)
  end
end

#safely_acquire_lock_for_table(*tables, mode: :access_exclusive, &block) ⇒ Object



572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
# File 'lib/pg_ha_migrations/safe_statements.rb', line 572

def safely_acquire_lock_for_table(*tables, mode: :access_exclusive, &block)
  _check_postgres_adapter!

  target_tables = PgHaMigrations::TableCollection.from_table_names(tables, mode)

  if @parent_lock_tables
    if !target_tables.subset?(@parent_lock_tables)
      raise PgHaMigrations::InvalidMigrationError,
        "Nested lock detected! Cannot acquire lock on #{target_tables.to_sql} " \
        "while #{@parent_lock_tables.to_sql} is locked."
    end

    if @parent_lock_tables.mode < target_tables.mode
      raise PgHaMigrations::InvalidMigrationError,
        "Lock escalation detected! Cannot change lock level from :#{@parent_lock_tables.mode} " \
        "to :#{target_tables.mode} for #{target_tables.to_sql}."
    end

    # If in a nested context and all of the above checks have passed,
    # we have already acquired the lock (or a lock at a higher level),
    # and can simply execute the block and short-circuit.
    block.call

    return
  end

  successfully_acquired_lock = false

  until successfully_acquired_lock
    loop do
      blocking_transactions = PgHaMigrations::BlockingDatabaseTransactions.find_blocking_transactions("#{PgHaMigrations::LOCK_TIMEOUT_SECONDS} seconds")

      # Locking a partitioned table will also lock child tables (including sub-partitions),
      # so we need to check for blocking queries on those tables as well
      target_tables_with_partitions = target_tables.with_partitions

      break unless blocking_transactions.any? do |query|
        query.tables_with_locks.any? do |locked_table|
          target_tables_with_partitions.any? do |target_table|
            target_table.conflicts_with?(locked_table)
          end
        end
      end

      say "Waiting on blocking transactions:"
      blocking_transactions.each do |blocking_transaction|
        say blocking_transaction.description
      end
      sleep(PgHaMigrations::LOCK_TIMEOUT_SECONDS)
    end

    connection.transaction do
      begin
        # A lock timeout would apply to each individual table in the query,
        # so we made a conscious decision to use a statement timeout here
        # to keep behavior consistent in a multi-table lock scenario.
        adjust_statement_timeout(PgHaMigrations::LOCK_TIMEOUT_SECONDS) do
          connection.execute("LOCK #{target_tables.to_sql} IN #{target_tables.mode.to_sql} MODE;")
        end
        successfully_acquired_lock = true
      rescue ActiveRecord::StatementInvalid => e
        # It is still possible to hit a lock timeout if the session has
        # that value set to something less than LOCK_TIMEOUT_SECONDS.
        # We should retry when either of these exceptions are raised.
        if e.message =~ /PG::LockNotAvailable.+ lock timeout/ || e.message =~ /PG::QueryCanceled.+ statement timeout/
          sleep_seconds = PgHaMigrations::LOCK_FAILURE_RETRY_DELAY_MULTLIPLIER * PgHaMigrations::LOCK_TIMEOUT_SECONDS
          say "Timed out trying to acquire #{target_tables.mode.to_sql} lock on #{target_tables.to_sql}."
          say "Sleeping for #{sleep_seconds}s to allow potentially queued up queries to finish before continuing."
          sleep(sleep_seconds)

          raise ActiveRecord::Rollback
        else
          raise e
        end
      end

      if successfully_acquired_lock
        @parent_lock_tables = target_tables

        begin
          block.call
        ensure
          @parent_lock_tables = nil
        end
      end
    end
  end
end