debian-mirror-gitlab/lib/gitlab/database/with_lock_retries.rb

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

201 lines
7.2 KiB
Ruby
Raw Normal View History

2020-03-13 15:44:24 +05:30
# frozen_string_literal: true
module Gitlab
module Database
2020-10-24 23:57:45 +05:30
# This class provides a way to automatically execute code that relies on acquiring a database lock in a way
# designed to minimize impact on a busy production database.
#
# A default timing configuration is provided that makes repeated attempts to acquire the necessary lock, with
# varying lock_timeout settings, and also serves to limit the maximum number of attempts.
2020-03-13 15:44:24 +05:30
class WithLockRetries
2020-10-24 23:57:45 +05:30
AttemptsExhaustedError = Class.new(StandardError)
2020-03-13 15:44:24 +05:30
NULL_LOGGER = Gitlab::JsonLogger.new('/dev/null')
# Each element of the array represents a retry iteration.
# - DEFAULT_TIMING_CONFIGURATION.size provides the iteration count.
# - First element: DB lock_timeout
# - Second element: Sleep time after unsuccessful lock attempt (LockWaitTimeout error raised)
# - Worst case, this configuration would retry for about 40 minutes.
DEFAULT_TIMING_CONFIGURATION = [
[0.1.seconds, 0.05.seconds], # short timings, lock_timeout: 100ms, sleep after LockWaitTimeout: 50ms
[0.1.seconds, 0.05.seconds],
[0.2.seconds, 0.05.seconds],
[0.3.seconds, 0.10.seconds],
[0.4.seconds, 0.15.seconds],
[0.5.seconds, 2.seconds],
[0.5.seconds, 2.seconds],
[0.5.seconds, 2.seconds],
[0.5.seconds, 2.seconds],
[1.second, 5.seconds], # probably high traffic, increase timings
[1.second, 1.minute],
[0.1.seconds, 0.05.seconds],
[0.1.seconds, 0.05.seconds],
[0.2.seconds, 0.05.seconds],
[0.3.seconds, 0.10.seconds],
[0.4.seconds, 0.15.seconds],
[0.5.seconds, 2.seconds],
[0.5.seconds, 2.seconds],
[0.5.seconds, 2.seconds],
[3.seconds, 3.minutes], # probably high traffic or long locks, increase timings
[0.1.seconds, 0.05.seconds],
[0.1.seconds, 0.05.seconds],
[0.5.seconds, 2.seconds],
[0.5.seconds, 2.seconds],
[5.seconds, 2.minutes],
[0.5.seconds, 0.5.seconds],
[0.5.seconds, 0.5.seconds],
[7.seconds, 5.minutes],
[0.5.seconds, 0.5.seconds],
[0.5.seconds, 0.5.seconds],
[7.seconds, 5.minutes],
[0.5.seconds, 0.5.seconds],
[0.5.seconds, 0.5.seconds],
[7.seconds, 5.minutes],
[0.1.seconds, 0.05.seconds],
[0.1.seconds, 0.05.seconds],
[0.5.seconds, 2.seconds],
[10.seconds, 10.minutes],
[0.1.seconds, 0.05.seconds],
[0.5.seconds, 2.seconds],
[10.seconds, 10.minutes]
].freeze
2022-04-04 11:22:00 +05:30
def initialize(logger: NULL_LOGGER, allow_savepoints: true, timing_configuration: DEFAULT_TIMING_CONFIGURATION, klass: nil, env: ENV, connection:)
2020-03-13 15:44:24 +05:30
@logger = logger
@klass = klass
2021-11-11 11:23:49 +05:30
@allow_savepoints = allow_savepoints
2020-03-13 15:44:24 +05:30
@timing_configuration = timing_configuration
@env = env
@current_iteration = 1
@log_params = { method: 'with_lock_retries', class: klass.to_s }
2021-11-11 11:23:49 +05:30
@connection = connection
2020-03-13 15:44:24 +05:30
end
2020-10-24 23:57:45 +05:30
# Executes a block of code, retrying it whenever a database lock can't be acquired in time
#
# When a database lock can't be acquired, ActiveRecord throws ActiveRecord::LockWaitTimeout
# exception which we intercept to re-execute the block of code, until it finishes or we reach the
# max attempt limit. The default behavior when max attempts have been reached is to make a final attempt with the
# lock_timeout disabled, but this can be altered with the raise_on_exhaustion parameter.
#
# @see DEFAULT_TIMING_CONFIGURATION for the timings used when attempting a retry
# @param [Boolean] raise_on_exhaustion whether to raise `AttemptsExhaustedError` when exhausting max attempts
# @param [Proc] block of code that will be executed
def run(raise_on_exhaustion: false, &block)
2022-08-27 11:52:29 +05:30
raise 'no block given' unless block
2020-03-13 15:44:24 +05:30
@block = block
if lock_retries_disabled?
log(message: 'DISABLE_LOCK_RETRIES environment variable is true, executing the block without retry')
return run_block
end
begin
2021-06-08 01:23:25 +05:30
run_block_with_lock_timeout
2020-03-13 15:44:24 +05:30
rescue ActiveRecord::LockWaitTimeout
if retry_with_lock_timeout?
2021-11-11 11:23:49 +05:30
disable_idle_in_transaction_timeout if connection.transaction_open?
2020-03-13 15:44:24 +05:30
wait_until_next_retry
2020-05-24 23:13:21 +05:30
reset_db_settings
2020-03-13 15:44:24 +05:30
retry
else
2020-05-24 23:13:21 +05:30
reset_db_settings
2020-10-24 23:57:45 +05:30
raise AttemptsExhaustedError, 'configured attempts to obtain locks are exhausted' if raise_on_exhaustion
2020-03-13 15:44:24 +05:30
run_block_without_lock_timeout
end
2020-05-24 23:13:21 +05:30
ensure
reset_db_settings
2020-03-13 15:44:24 +05:30
end
end
private
2021-11-11 11:23:49 +05:30
attr_reader :logger, :env, :block, :current_iteration, :log_params, :timing_configuration, :connection
2020-03-13 15:44:24 +05:30
def run_block
block.call
end
2021-06-08 01:23:25 +05:30
def run_block_with_lock_timeout
2021-11-11 11:23:49 +05:30
raise "WithLockRetries should not run inside already open transaction" if connection.transaction_open? && @allow_savepoints.blank?
connection.transaction(requires_new: true) do # rubocop:disable Performance/ActiveRecordSubtransactions
2020-03-13 15:44:24 +05:30
execute("SET LOCAL lock_timeout TO '#{current_lock_timeout_in_ms}ms'")
log(message: 'Lock timeout is set', current_iteration: current_iteration, lock_timeout_in_ms: current_lock_timeout_in_ms)
run_block
log(message: 'Migration finished', current_iteration: current_iteration, lock_timeout_in_ms: current_lock_timeout_in_ms)
end
end
def retry_with_lock_timeout?
current_iteration != retry_count
end
def wait_until_next_retry
log(message: 'ActiveRecord::LockWaitTimeout error, retrying after sleep', current_iteration: current_iteration, sleep_time_in_seconds: current_sleep_time_in_seconds)
sleep(current_sleep_time_in_seconds)
@current_iteration += 1
end
def run_block_without_lock_timeout
log(message: "Couldn't acquire lock to perform the migration", current_iteration: current_iteration)
log(message: "Executing the migration without lock timeout", current_iteration: current_iteration)
2021-11-11 11:23:49 +05:30
disable_lock_timeout if connection.transaction_open?
2020-03-13 15:44:24 +05:30
run_block
log(message: 'Migration finished', current_iteration: current_iteration)
end
def lock_retries_disabled?
Gitlab::Utils.to_boolean(env['DISABLE_LOCK_RETRIES'])
end
def log(params)
logger.info(log_params.merge(params))
end
def execute(statement)
2021-11-11 11:23:49 +05:30
connection.execute(statement)
2020-03-13 15:44:24 +05:30
end
def retry_count
timing_configuration.size
end
def current_lock_timeout_in_ms
Integer(timing_configuration[current_iteration - 1][0].in_milliseconds)
end
def current_sleep_time_in_seconds
timing_configuration[current_iteration - 1][1].to_f
end
2020-05-24 23:13:21 +05:30
def disable_idle_in_transaction_timeout
execute("SET LOCAL idle_in_transaction_session_timeout TO '0'")
end
2021-01-03 14:25:43 +05:30
def disable_lock_timeout
execute("SET LOCAL lock_timeout TO '0'")
end
2020-05-24 23:13:21 +05:30
def reset_db_settings
execute('RESET idle_in_transaction_session_timeout; RESET lock_timeout')
end
2020-03-13 15:44:24 +05:30
end
end
end