debian-mirror-gitlab/app/workers/background_migration_worker.rb

114 lines
3.8 KiB
Ruby
Raw Normal View History

2018-11-08 19:23:39 +05:30
# frozen_string_literal: true
2020-04-08 14:13:33 +05:30
class BackgroundMigrationWorker # rubocop:disable Scalability/IdempotentWorker
2018-03-17 18:26:18 +05:30
include ApplicationWorker
2017-09-10 17:25:29 +05:30
2021-10-27 15:23:28 +05:30
data_consistency :always
2021-06-08 01:23:25 +05:30
sidekiq_options retry: 3
2020-04-22 19:07:51 +05:30
feature_category :database
urgency :throttled
2020-06-23 00:09:42 +05:30
loggable_arguments 0, 1
2019-12-21 20:55:43 +05:30
2018-03-17 18:26:18 +05:30
# The minimum amount of time between processing two jobs of the same migration
# class.
2017-09-10 17:25:29 +05:30
#
2018-11-18 11:00:15 +05:30
# This interval is set to 2 or 5 minutes so autovacuuming and other
# maintenance related tasks have plenty of time to clean up after a migration
# has been performed.
def self.minimum_interval
2018-11-20 20:47:30 +05:30
2.minutes.to_i
2018-11-18 11:00:15 +05:30
end
2018-03-17 18:26:18 +05:30
# Performs the background migration.
#
# See Gitlab::BackgroundMigration.perform for more information.
2017-09-10 17:25:29 +05:30
#
2018-03-17 18:26:18 +05:30
# class_name - The class name of the background migration to run.
# arguments - The arguments to pass to the migration class.
2021-01-03 14:25:43 +05:30
# lease_attempts - The number of times we will try to obtain an exclusive
2021-01-29 00:20:46 +05:30
# lease on the class before giving up. See MR for more discussion.
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45298#note_434304956
2021-01-03 14:25:43 +05:30
def perform(class_name, arguments = [], lease_attempts = 5)
2020-04-08 14:13:33 +05:30
with_context(caller_id: class_name.to_s) do
2021-01-29 00:20:46 +05:30
attempts_left = lease_attempts - 1
should_perform, ttl = perform_and_ttl(class_name, attempts_left)
break if should_perform.nil?
2018-03-17 18:26:18 +05:30
2020-04-08 14:13:33 +05:30
if should_perform
Gitlab::BackgroundMigration.perform(class_name, arguments)
else
# If the lease could not be obtained this means either another process is
# running a migration of this class or we ran one recently. In this case
# we'll reschedule the job in such a way that it is picked up again around
# the time the lease expires.
self.class
2021-01-29 00:20:46 +05:30
.perform_in(ttl || self.class.minimum_interval, class_name, arguments, attempts_left)
2020-04-08 14:13:33 +05:30
end
2018-03-17 18:26:18 +05:30
end
2017-09-10 17:25:29 +05:30
end
2021-01-29 00:20:46 +05:30
def perform_and_ttl(class_name, attempts_left)
# In test environments `perform_in` will run right away. This can then
# lead to stack level errors in the above `#perform`. To work around this
# we'll just perform the migration right away in the test environment.
return [true, nil] if always_perform?
lease = lease_for(class_name)
lease_obtained = !!lease.try_obtain
healthy_db = healthy_database?
perform = lease_obtained && healthy_db
database_unhealthy_counter.increment if lease_obtained && !healthy_db
2018-11-18 11:00:15 +05:30
2021-01-29 00:20:46 +05:30
# When the DB is unhealthy or the lease can't be obtained after several tries,
# then give up on the job and log a warning. Otherwise we could end up in
# an infinite rescheduling loop. Jobs can be tracked in the database with the
# use of Gitlab::Database::BackgroundMigrationJob
if !perform && attempts_left < 0
msg = if !lease_obtained
'Job could not get an exclusive lease after several tries. Giving up.'
else
'Database was unhealthy after several tries. Giving up.'
end
Sidekiq.logger.warn(class: class_name, message: msg, job_id: jid)
return [nil, nil]
2017-09-10 17:25:29 +05:30
end
2021-01-29 00:20:46 +05:30
[perform, lease.ttl]
2018-03-17 18:26:18 +05:30
end
2017-09-10 17:25:29 +05:30
2018-03-17 18:26:18 +05:30
def lease_for(class_name)
Gitlab::ExclusiveLease
2018-11-18 11:00:15 +05:30
.new(lease_key_for(class_name), timeout: self.class.minimum_interval)
end
def lease_key_for(class_name)
"#{self.class.name}:#{class_name}"
2017-09-10 17:25:29 +05:30
end
2018-03-17 18:26:18 +05:30
def always_perform?
Rails.env.test?
2017-09-10 17:25:29 +05:30
end
2018-11-18 11:00:15 +05:30
# Returns true if the database is healthy enough to allow the migration to be
# performed.
#
# class_name - The name of the background migration that we might want to
# run.
def healthy_database?
!Postgresql::ReplicationSlot.lag_too_great?
end
def database_unhealthy_counter
Gitlab::Metrics.counter(
:background_migration_database_health_reschedules,
'The number of times a background migration is rescheduled because the database is unhealthy.'
)
end
2017-09-10 17:25:29 +05:30
end