Class: Delayed::Worker
- Inherits:
-
Object
- Object
- Delayed::Worker
- Includes:
- Runnable
- Defined in:
- lib/delayed/worker.rb
Instance Attribute Summary collapse
-
#name ⇒ Object
Every worker has a unique name which by default is the pid of the process.
-
#name_prefix ⇒ Object
name_prefix is ignored if name is set directly.
Class Method Summary collapse
Instance Method Summary collapse
- #failed(job) ⇒ Object
-
#initialize ⇒ Worker
constructor
A new instance of Worker.
- #job_say(job, text, level = Delayed.default_log_level) ⇒ Object
- #max_attempts(job) ⇒ Object
- #max_run_time(job) ⇒ Object
- #on_exit! ⇒ Object
-
#reschedule(job, time = nil) ⇒ Object
Reschedule the job in the future (when a job fails).
- #run(job) ⇒ Object
- #run! ⇒ Object
- #run_thread_callbacks(job, &block) ⇒ Object
- #say(text, level = Delayed.default_log_level) ⇒ Object
-
#work_off(num = 100) ⇒ Object
Do num jobs and return stats on success/failure.
Methods included from Runnable
Constructor Details
#initialize ⇒ Worker
Returns a new instance of Worker.
45 46 47 48 49 50 51 |
# File 'lib/delayed/worker.rb', line 45 def initialize @failed_reserve_count = 0 # Reset lifecycle on the offhand chance that something lazily # triggered its creation before all plugins had been registered. Delayed.setup_lifecycle end |
Instance Attribute Details
#name ⇒ Object
Every worker has a unique name which by default is the pid of the process. There are some advantages to overriding this with something which survives worker restarts: Workers can safely resume working on tasks which are locked by themselves. The worker will assume that it crashed before.
57 58 59 60 61 62 63 64 65 |
# File 'lib/delayed/worker.rb', line 57 def name return @name unless @name.nil? begin "#{@name_prefix}host:#{Socket.gethostname} pid:#{Process.pid}" rescue StandardError "#{@name_prefix}pid:#{Process.pid}" end end |
#name_prefix ⇒ Object
name_prefix is ignored if name is set directly
30 31 32 |
# File 'lib/delayed/worker.rb', line 30 def name_prefix @name_prefix end |
Class Method Details
.delay_job?(job) ⇒ Boolean
37 38 39 40 41 42 43 |
# File 'lib/delayed/worker.rb', line 37 def self.delay_job?(job) if delay_jobs.is_a?(Proc) delay_jobs.arity == 1 ? delay_jobs.call(job) : delay_jobs.call else delay_jobs end end |
Instance Method Details
#failed(job) ⇒ Object
166 167 168 169 170 171 172 173 174 175 |
# File 'lib/delayed/worker.rb', line 166 def failed(job) self.class.lifecycle.run_callbacks(:failure, self, job) do job.hook(:failure) rescue StandardError => e say "Error when running failure callback: #{e}", 'error' say e.backtrace.join("\n"), 'error' ensure job.destroy_failed_jobs? ? job.destroy : job.fail! end end |
#job_say(job, text, level = Delayed.default_log_level) ⇒ Object
177 178 179 180 |
# File 'lib/delayed/worker.rb', line 177 def job_say(job, text, level = Delayed.default_log_level) text = "Job #{job.name} (id=#{job.id})#{say_queue(job.queue)} #{text}" say text, level end |
#max_attempts(job) ⇒ Object
187 188 189 |
# File 'lib/delayed/worker.rb', line 187 def max_attempts(job) job.max_attempts || self.class.max_attempts end |
#max_run_time(job) ⇒ Object
191 192 193 |
# File 'lib/delayed/worker.rb', line 191 def max_run_time(job) job.max_run_time || self.class.max_run_time end |
#on_exit! ⇒ Object
84 85 86 |
# File 'lib/delayed/worker.rb', line 84 def on_exit! Delayed::Job.clear_locks!(name) end |
#reschedule(job, time = nil) ⇒ Object
Reschedule the job in the future (when a job fails). Uses an exponential scale depending on the number of failed attempts.
154 155 156 157 158 159 160 161 162 163 164 |
# File 'lib/delayed/worker.rb', line 154 def reschedule(job, time = nil) if (job.attempts += 1) < max_attempts(job) time ||= job.reschedule_at job.run_at = time job.unlock job.save! else job_say job, "FAILED permanently because of #{job.attempts} consecutive failures", 'error' failed(job) end end |
#run(job) ⇒ Object
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
# File 'lib/delayed/worker.rb', line 119 def run(job) run_thread_callbacks(job) do = { status: 'RUNNING', name: job.name, run_at: job.run_at, created_at: job.created_at, priority: job.priority, queue: job.queue, attempts: job.attempts, enqueued_for: (Time.current - job.created_at).round, } job_say job, .to_json run_time = Benchmark.realtime do Timeout.timeout(max_run_time(job).to_i, WorkerTimeout) do job.invoke_job end job.destroy end job_say job, format('COMPLETED after %.4f seconds', run_time) end true # did work rescue DeserializationError => e job_say job, "FAILED permanently with #{e.class.name}: #{e.}", 'error' job.error = e failed(job) false # work failed rescue Exception => e # rubocop:disable Lint/RescueException self.class.lifecycle.run_callbacks(:error, self, job) { handle_failed_job(job, e) } false # work failed end |
#run! ⇒ Object
71 72 73 74 75 76 77 78 79 80 81 82 |
# File 'lib/delayed/worker.rb', line 71 def run! @realtime = Benchmark.realtime do @result = work_off end count = @result[0] + @result[1] say format("#{count} jobs processed at %.4f j/s, %d failed", count / @realtime, @result.last) if count.positive? interruptable_sleep(self.class.sleep_delay) if count < max_claims reload! unless stop? end |
#run_thread_callbacks(job, &block) ⇒ Object
115 116 117 |
# File 'lib/delayed/worker.rb', line 115 def run_thread_callbacks(job, &block) self.class.lifecycle.run_callbacks(:thread, self, job, &block) end |
#say(text, level = Delayed.default_log_level) ⇒ Object
182 183 184 185 |
# File 'lib/delayed/worker.rb', line 182 def say(text, level = Delayed.default_log_level) text = "[Worker(#{name})] #{text}" Delayed.say("#{Time.now.strftime('%FT%T%z')}: #{text}", level) end |
#work_off(num = 100) ⇒ Object
Do num jobs and return stats on success/failure. Exit early if interrupted.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
# File 'lib/delayed/worker.rb', line 90 def work_off(num = 100) success = Concurrent::AtomicFixnum.new(0) total = 0 while total < num jobs = reserve_jobs break if jobs.empty? total += jobs.length pool = Concurrent::FixedThreadPool.new(jobs.length) jobs.each do |job| pool.post do success.increment if run_job(job) end end pool.shutdown pool.wait_for_termination break if stop? # leave if we're exiting end [success.value, total - success.value] end |