Class: Delayed::Backend::ActiveRecord::Job

Inherits:
ActiveRecord::Base
  • Object
show all
Includes:
Base
Defined in:
lib/delayed/backend/active_record.rb

Overview

A job object that is persisted to the database. Contains the work object as a YAML field.

Direct Known Subclasses

Failed

Defined Under Namespace

Classes: Failed

Constant Summary

Constants included from Base

Base::ON_HOLD_COUNT, Base::ON_HOLD_LOCKED_BY

Class Method Summary collapse

Instance Method Summary collapse

Methods included from Base

#batch?, #expired?, #failed?, #full_name, #hold!, included, #initialize_defaults, #invoke_job, #locked?, #name, #on_hold?, #payload_object, #payload_object=, #permanent_failure, #reschedule, #reschedule_at, #unhold!, #unlock

Class Method Details

.all_available(queue = Delayed::Settings.queue, min_priority = nil, max_priority = nil, forced_latency: nil) ⇒ Object



302
303
304
305
306
307
308
309
310
311
312
313
314
315
# File 'lib/delayed/backend/active_record.rb', line 302

def self.all_available(queue = Delayed::Settings.queue,
                       min_priority = nil,
                       max_priority = nil,
                       forced_latency: nil)
  min_priority ||= Delayed::MIN_PRIORITY
  max_priority ||= Delayed::MAX_PRIORITY

  check_queue(queue)
  check_priorities(min_priority, max_priority)

  self.ready_to_run(forced_latency: forced_latency).
      where(:priority => min_priority..max_priority, :queue => queue).
      by_priority
end

.bulk_update(action, opts) ⇒ Object

perform a bulk update of a set of jobs action is :hold, :unhold, or :destroy to specify the jobs to act on, either pass opts = [list of job ids] or opts = <some flavor> to perform on all jobs of that flavor



148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
# File 'lib/delayed/backend/active_record.rb', line 148

def self.bulk_update(action, opts)
  raise("Can't #{action.to_s} failed jobs") if opts[:flavor].to_s == 'failed' && action.to_s != 'destroy'
  scope = if opts[:ids]
    if opts[:flavor] == 'failed'
      Delayed::Job::Failed.where(:id => opts[:ids])
    else
      self.where(:id => opts[:ids])
    end
  elsif opts[:flavor]

    self.scope_for_flavor(opts[:flavor], opts[:query])
  end

  return 0 unless scope

  case action.to_s
  when 'hold'
    scope = scope.where(locked_by: nil)
    scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
  when 'unhold'
    now = db_time_now
    scope = scope.where(locked_by: ON_HOLD_LOCKED_BY)
    scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
  when 'destroy'
    scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == 'failed'
    scope.delete_all
  end
end

.by_priorityObject



79
80
81
# File 'lib/delayed/backend/active_record.rb', line 79

def self.by_priority
  order(:priority, :run_at, :id)
end

.clear_locks!(worker_name) ⇒ Object

When a worker is exiting, make sure we don’t have any locked jobs.



84
85
86
# File 'lib/delayed/backend/active_record.rb', line 84

def self.clear_locks!(worker_name)
  where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
end

.create_singleton(options) ⇒ Object

Create the job on the specified strand, but only if there aren’t any other non-running jobs on that strand. (in other words, the job will still be created if there’s another job on the strand but it’s already running)



330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
# File 'lib/delayed/backend/active_record.rb', line 330

def self.create_singleton(options)
  strand = options[:strand]
  on_conflict = options.delete(:on_conflict) || :use_earliest
  transaction_for_singleton(strand) do
    job = self.where(:strand => strand, :locked_at => nil).order(:id).first
    new_job = new(options)
    if job
      new_job.initialize_defaults
      job.run_at =
        case on_conflict
        when :use_earliest
          [job.run_at, new_job.run_at].min
        when :overwrite
          new_job.run_at
        end
      job.save! if job.changed?
    else
      new_job.save!
    end
    job || new_job
  end
end

.currentObject



53
54
55
# File 'lib/delayed/backend/active_record.rb', line 53

def self.current
  where("run_at<=?", db_time_now)
end

.failedObject



61
62
63
# File 'lib/delayed/backend/active_record.rb', line 61

def self.failed
  where("failed_at IS NOT NULL")
end

.find_available(limit, queue = Delayed::Settings.queue, min_priority = nil, max_priority = nil) ⇒ Object



295
296
297
298
299
300
# File 'lib/delayed/backend/active_record.rb', line 295

def self.find_available(limit,
                        queue = Delayed::Settings.queue,
                        min_priority = nil,
                        max_priority = nil)
  all_available(queue, min_priority, max_priority).limit(limit).to_a
end

.futureObject



57
58
59
# File 'lib/delayed/backend/active_record.rb', line 57

def self.future
  where("run_at>?", db_time_now)
end

.get_and_lock_next_available(worker_names, queue = Delayed::Settings.queue, min_priority = nil, max_priority = nil, prefetch: 0, prefetch_owner: nil, forced_latency: nil) ⇒ Object



203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
# File 'lib/delayed/backend/active_record.rb', line 203

def self.get_and_lock_next_available(worker_names,
                                     queue = Delayed::Settings.queue,
                                     min_priority = nil,
                                     max_priority = nil,
                                     prefetch: 0,
                                     prefetch_owner: nil,
                                     forced_latency: nil)

  check_queue(queue)
  check_priorities(min_priority, max_priority)

  loop do
    jobs = maybe_silence_periodic_log do
      if connection.adapter_name == 'PostgreSQL' && !Settings.select_random_from_batch
        # In Postgres, we can lock a job and return which row was locked in a single
        # query by using RETURNING. Combine that with the ROW_NUMBER() window function
        # to assign a distinct locked_at value to each job locked, when doing multiple
        # jobs in a single query.
        effective_worker_names = Array(worker_names)

        target_jobs = all_available(queue,
                                    min_priority,
                                    max_priority,
                                    forced_latency: forced_latency).
            limit(effective_worker_names.length + prefetch).
            lock
        jobs_with_row_number = all.from(target_jobs).
            select("id, ROW_NUMBER() OVER () AS row_number")
        updates = "locked_by = CASE row_number "
        effective_worker_names.each_with_index do |worker, i|
          updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
        end
        if prefetch_owner
          updates << "ELSE #{connection.quote(prefetch_owner)} "
        end
        updates << "END, locked_at = #{connection.quote(db_time_now)}"

        # Originally this was done with a subquery, but this allows the query planner to
        # side-step the LIMIT. We use a CTE here to force the subquery to be materialized
        # before running the UPDATE.
        #
        # For more details, see:
        #  * https://dba.stackexchange.com/a/69497/55285
        #  * https://github.com/feikesteenbergen/demos/blob/b7ecee8b2a79bf04cbcd74972e6bfb81903aee5d/bugs/update_limit_bug.txt
        query = "WITH limited_jobs AS (#{jobs_with_row_number.to_sql}) " \
                "UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id " \
                "RETURNING #{quoted_table_name}.*"

        jobs = find_by_sql(query)
        # because this is an atomic query, we don't have to return more jobs than we needed
        # to try and lock them, nor is there a possibility we need to try again because
        # all of the jobs we tried to lock had already been locked by someone else
        if worker_names.is_a?(Array)
          result = jobs.index_by(&:locked_by)
          # all of the prefetched jobs can come back as an array
          result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
          return result
        else
          return jobs.first
        end
      else
        batch_size = Settings.fetch_batch_size
        batch_size *= worker_names.length if worker_names.is_a?(Array)
        find_available(batch_size, queue, min_priority, max_priority)
      end
    end
    if jobs.empty?
      return worker_names.is_a?(Array) ? {} : nil
    end
    if Settings.select_random_from_batch
      jobs = jobs.sort_by { rand }
    end
    if worker_names.is_a?(Array)
      result = {}
      jobs.each do |job|
        break if worker_names.empty?
        worker_name = worker_names.first
        if job.send(:lock_exclusively!, worker_name)
          result[worker_name] = job
          worker_names.shift
        end
      end
      return result
    else
      job = jobs.detect do |job|
        job.send(:lock_exclusively!, worker_names)
      end
      return job if job
    end
  end
end

.jobs_count(flavor, query = nil) ⇒ Object

get the total job count for the given flavor see list_jobs for documentation on arguments



138
139
140
141
142
# File 'lib/delayed/backend/active_record.rb', line 138

def self.jobs_count(flavor,
                    query = nil)
  scope = self.scope_for_flavor(flavor, query)
  scope.count
end

.list_jobs(flavor, limit, offset = 0, query = nil) ⇒ Object

get a list of jobs of the given flavor in the given queue flavor is :current, :future, :failed, :strand or :tag depending on the flavor, query has a different meaning: for :current and :future, it’s the queue name (defaults to Delayed::Settings.queue) for :strand it’s the strand name for :tag it’s the tag name for :failed it’s ignored



127
128
129
130
131
132
133
134
# File 'lib/delayed/backend/active_record.rb', line 127

def self.list_jobs(flavor,
                   limit,
                   offset = 0,
                   query = nil)
  scope = self.scope_for_flavor(flavor, query)
  order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
  scope.order(order).limit(limit).offset(offset).to_a
end

.maybe_silence_periodic_log(&block) ⇒ Object



195
196
197
198
199
200
201
# File 'lib/delayed/backend/active_record.rb', line 195

def self.maybe_silence_periodic_log(&block)
  if Settings.silence_periodic_log
    ::ActiveRecord::Base.logger.silence(&block)
  else
    block.call
  end
end

.n_strand_options(strand_name, num_strands) ⇒ Object

This overwrites the previous behavior so rather than changing the strand and balancing at queue time, this keeps the strand intact and uses triggers to limit the number running



49
50
51
# File 'lib/delayed/backend/active_record.rb', line 49

def self.n_strand_options(strand_name, num_strands)
  {:strand => strand_name, :max_concurrent => num_strands}
end

.processes_locked_locally(name: nil) ⇒ Object



353
354
355
356
# File 'lib/delayed/backend/active_record.rb', line 353

def self.processes_locked_locally(name: nil)
  name ||= Socket.gethostname rescue x
  where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map{|locked_by| locked_by.split(":").last.to_i}
end

.ready_to_run(forced_latency: nil) ⇒ Object

a nice stress test: 10_000.times { |i| Kernel.send_later_enqueue_args(:system, { :strand => ‘s1’, :run_at => (24.hours.ago + (rand(24.hours.to_i))) }, “echo #i >> test1.txt”) } 500.times { |i| “ohai”.send_later_enqueue_args(:reverse, { :run_at => (12.hours.ago + (rand(24.hours.to_i))) }) } then fire up your workers you can check out strand correctness: diff test1.txt <(sort -n test1.txt)



74
75
76
77
78
# File 'lib/delayed/backend/active_record.rb', line 74

def self.ready_to_run(forced_latency: nil)
  now = db_time_now
  now -= forced_latency if forced_latency
  where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
end

.reconnect!Object



20
21
22
# File 'lib/delayed/backend/active_record.rb', line 20

def self.reconnect!
  clear_all_connections!
end

.runningObject



65
66
67
# File 'lib/delayed/backend/active_record.rb', line 65

def self.running
  where("locked_at IS NOT NULL AND locked_by<>'on hold'")
end

.running_jobsObject



92
93
94
# File 'lib/delayed/backend/active_record.rb', line 92

def self.running_jobs()
  self.running.order(:locked_at)
end

.scope_for_flavor(flavor, query) ⇒ Object



96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# File 'lib/delayed/backend/active_record.rb', line 96

def self.scope_for_flavor(flavor, query)
  scope = case flavor.to_s
  when 'current'
    self.current
  when 'future'
    self.future
  when 'failed'
    Delayed::Job::Failed
  when 'strand'
    self.where(:strand => query)
  when 'tag'
    self.where(:tag => query)
  else
    raise ArgumentError, "invalid flavor: #{flavor.inspect}"
  end

  if %w(current future).include?(flavor.to_s)
    queue = query.presence || Delayed::Settings.queue
    scope = scope.where(:queue => queue)
  end

  scope
end

.strand_size(strand) ⇒ Object



88
89
90
# File 'lib/delayed/backend/active_record.rb', line 88

def self.strand_size(strand)
  self.where(:strand => strand).count
end

.tag_counts(flavor, limit, offset = 0) ⇒ Object

returns a list of hashes { :tag => tag_name, :count => current_count } in descending count order flavor is :current or :all

Raises:

  • (ArgumentError)


180
181
182
183
184
185
186
187
188
189
190
191
192
193
# File 'lib/delayed/backend/active_record.rb', line 180

def self.tag_counts(flavor,
                    limit,
                    offset = 0)
  raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
  scope = case flavor.to_s
    when 'current'
      self.current
    when 'all'
      self
    end

  scope = scope.group(:tag).offset(offset).limit(limit)
  scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t,c| { :tag => t, :count => c } }
end

.transaction_for_singleton(strand) ⇒ Object

used internally by create_singleton to take the appropriate lock depending on the db driver



319
320
321
322
323
324
# File 'lib/delayed/backend/active_record.rb', line 319

def self.transaction_for_singleton(strand)
  self.transaction do
    connection.execute(sanitize_sql(["SELECT pg_advisory_xact_lock(#{connection.quote_table_name('half_md5_as_bigint')}(?))", strand]))
    yield
  end
end

.unlock(jobs) ⇒ Object



363
364
365
366
367
# File 'lib/delayed/backend/active_record.rb', line 363

def self.unlock(jobs)
  unlocked = where(id: jobs).update_all(locked_at: nil, locked_by: nil)
  jobs.each(&:unlock)
  unlocked
end

.unlock_orphaned_prefetched_jobsObject



358
359
360
361
# File 'lib/delayed/backend/active_record.rb', line 358

def self.unlock_orphaned_prefetched_jobs
  horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
  where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
end

Instance Method Details

#create_and_lock!(worker) ⇒ Object



413
414
415
416
417
418
# File 'lib/delayed/backend/active_record.rb', line 413

def create_and_lock!(worker)
  raise "job already exists" unless new_record?
  self.locked_at = Delayed::Job.db_time_now
  self.locked_by = worker
  save!
end

#fail!Object



420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
# File 'lib/delayed/backend/active_record.rb', line 420

def fail!
  attrs = self.attributes
  attrs['original_job_id'] = attrs.delete('id')
  attrs['failed_at'] ||= self.class.db_time_now
  attrs.delete('next_in_strand')
  attrs.delete('max_concurrent')
  self.class.transaction do
    failed_job = Failed.create(attrs)
    self.destroy
    failed_job
  end
rescue
  # we got an error while failing the job -- we need to at least get
  # the job out of the queue
  self.destroy
  # re-raise so the worker logs the error, at least
  raise
end

#lock_strand_on_createObject



40
41
42
43
44
# File 'lib/delayed/backend/active_record.rb', line 40

def lock_strand_on_create
  if strand.present?
    self.class.connection.execute("SELECT pg_advisory_xact_lock(#{self.class.connection.quote_table_name('half_md5_as_bigint')}(#{self.class.connection.quote(strand)}))")
  end
end

#transfer_lock!(from:, to:) ⇒ Object



387
388
389
390
391
392
393
394
395
396
397
# File 'lib/delayed/backend/active_record.rb', line 387

def transfer_lock!(from:, to:)
  now = self.class.db_time_now
  # We don't own this job so we will update the locked_by name and the locked_at
  affected_rows = self.class.where(id: self, locked_by: from).update_all(locked_at: now, locked_by: to)
  if affected_rows == 1
    mark_as_locked!(now, to)
    return true
  else
    return false
  end
end