2020-04-08 14:13:33 +05:30
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2022-01-26 12:08:38 +05:30
|
|
|
require_relative '../config/bundler_setup'
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
require 'optparse'
|
|
|
|
require 'logger'
|
|
|
|
require 'time'
|
|
|
|
|
2021-12-11 22:18:48 +05:30
|
|
|
# In environments where code is preloaded and cached such as `spring`,
|
|
|
|
# we may run into "already initialized" warnings, hence the check.
|
|
|
|
require_relative '../lib/gitlab' unless Object.const_defined?('Gitlab')
|
|
|
|
require_relative '../lib/gitlab/utils'
|
|
|
|
require_relative '../lib/gitlab/sidekiq_config/cli_methods'
|
|
|
|
require_relative '../lib/gitlab/sidekiq_config/worker_matcher'
|
|
|
|
require_relative '../lib/gitlab/sidekiq_logging/json_formatter'
|
2022-01-26 12:08:38 +05:30
|
|
|
require_relative '../lib/gitlab/process_management'
|
|
|
|
require_relative '../metrics_server/metrics_server'
|
2021-12-11 22:18:48 +05:30
|
|
|
require_relative 'sidekiq_cluster'
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
module Gitlab
|
|
|
|
module SidekiqCluster
|
|
|
|
class CLI
|
2022-01-26 12:08:38 +05:30
|
|
|
THREAD_NAME = 'supervisor'
|
|
|
|
|
|
|
|
# The signals that should terminate both the master and workers.
|
|
|
|
TERMINATE_SIGNALS = %i(INT TERM).freeze
|
|
|
|
|
|
|
|
# The signals that should simply be forwarded to the workers.
|
|
|
|
FORWARD_SIGNALS = %i(TTIN USR1 USR2 HUP).freeze
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
CommandError = Class.new(StandardError)
|
|
|
|
|
2021-09-04 01:27:46 +05:30
|
|
|
def initialize(log_output = $stderr)
|
2020-04-08 14:13:33 +05:30
|
|
|
# As recommended by https://github.com/mperham/sidekiq/wiki/Advanced-Options#concurrency
|
|
|
|
@max_concurrency = 50
|
|
|
|
@min_concurrency = 0
|
|
|
|
@environment = ENV['RAILS_ENV'] || 'development'
|
2022-01-26 12:08:38 +05:30
|
|
|
@metrics_dir = ENV["prometheus_multiproc_dir"] || File.absolute_path("tmp/prometheus_multiproc_dir/sidekiq")
|
2020-04-08 14:13:33 +05:30
|
|
|
@pid = nil
|
|
|
|
@interval = 5
|
|
|
|
@alive = true
|
|
|
|
@processes = []
|
|
|
|
@logger = Logger.new(log_output)
|
|
|
|
@logger.formatter = ::Gitlab::SidekiqLogging::JSONFormatter.new
|
|
|
|
@rails_path = Dir.pwd
|
|
|
|
@dryrun = false
|
2021-10-27 15:23:28 +05:30
|
|
|
@list_queues = false
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
def run(argv = ARGV)
|
2022-01-26 12:08:38 +05:30
|
|
|
Thread.current.name = THREAD_NAME
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
if argv.empty?
|
|
|
|
raise CommandError,
|
|
|
|
'You must specify at least one queue to start a worker for'
|
|
|
|
end
|
|
|
|
|
|
|
|
option_parser.parse!(argv)
|
|
|
|
|
2021-10-27 15:23:28 +05:30
|
|
|
if @dryrun && @list_queues
|
|
|
|
raise CommandError,
|
|
|
|
'The --dryrun and --list-queues options are mutually exclusive'
|
|
|
|
end
|
|
|
|
|
2021-04-29 21:17:54 +05:30
|
|
|
worker_metadatas = SidekiqConfig::CliMethods.worker_metadatas(@rails_path)
|
|
|
|
worker_queues = SidekiqConfig::CliMethods.worker_queues(@rails_path)
|
2020-04-08 14:13:33 +05:30
|
|
|
|
2021-04-29 21:17:54 +05:30
|
|
|
queue_groups = argv.map do |queues_or_query_string|
|
2021-11-11 11:23:49 +05:30
|
|
|
if queues_or_query_string =~ /[\r\n]/
|
|
|
|
raise CommandError,
|
|
|
|
'The queue arguments cannot contain newlines'
|
|
|
|
end
|
|
|
|
|
2021-04-29 21:17:54 +05:30
|
|
|
next worker_queues if queues_or_query_string == SidekiqConfig::WorkerMatcher::WILDCARD_MATCH
|
2020-04-08 14:13:33 +05:30
|
|
|
|
2021-01-29 00:20:46 +05:30
|
|
|
# When using the queue query syntax, we treat each queue group
|
|
|
|
# as a worker attribute query, and resolve the queues for the
|
|
|
|
# queue group using this query.
|
|
|
|
|
2021-09-04 01:27:46 +05:30
|
|
|
if @queue_selector
|
2021-04-29 21:17:54 +05:30
|
|
|
SidekiqConfig::CliMethods.query_queues(queues_or_query_string, worker_metadatas)
|
2020-04-08 14:13:33 +05:30
|
|
|
else
|
2021-04-29 21:17:54 +05:30
|
|
|
SidekiqConfig::CliMethods.expand_queues(queues_or_query_string.split(','), worker_queues)
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
if @negate_queues
|
2021-04-29 21:17:54 +05:30
|
|
|
queue_groups.map! { |queues| worker_queues - queues }
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
if queue_groups.all?(&:empty?)
|
|
|
|
raise CommandError,
|
|
|
|
'No queues found, you must select at least one queue'
|
|
|
|
end
|
|
|
|
|
2021-10-27 15:23:28 +05:30
|
|
|
if @list_queues
|
|
|
|
puts queue_groups.map(&:sort) # rubocop:disable Rails/Output
|
|
|
|
|
|
|
|
return
|
|
|
|
end
|
|
|
|
|
2020-04-22 19:07:51 +05:30
|
|
|
unless @dryrun
|
|
|
|
@logger.info("Starting cluster with #{queue_groups.length} processes")
|
|
|
|
end
|
2020-04-08 14:13:33 +05:30
|
|
|
|
2022-01-26 12:08:38 +05:30
|
|
|
start_metrics_server(wipe_metrics_dir: true)
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
@processes = SidekiqCluster.start(
|
|
|
|
queue_groups,
|
|
|
|
env: @environment,
|
|
|
|
directory: @rails_path,
|
|
|
|
max_concurrency: @max_concurrency,
|
|
|
|
min_concurrency: @min_concurrency,
|
2020-04-22 19:07:51 +05:30
|
|
|
dryrun: @dryrun,
|
|
|
|
timeout: soft_timeout_seconds
|
2020-04-08 14:13:33 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
return if @dryrun
|
|
|
|
|
|
|
|
write_pid
|
|
|
|
trap_signals
|
|
|
|
start_loop
|
|
|
|
end
|
|
|
|
|
|
|
|
def write_pid
|
2022-01-26 12:08:38 +05:30
|
|
|
ProcessManagement.write_pid(@pid) if @pid
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
|
2020-04-22 19:07:51 +05:30
|
|
|
def soft_timeout_seconds
|
|
|
|
@soft_timeout_seconds || DEFAULT_SOFT_TIMEOUT_SECONDS
|
|
|
|
end
|
|
|
|
|
|
|
|
# The amount of time it'll wait for killing the alive Sidekiq processes.
|
|
|
|
def hard_timeout_seconds
|
|
|
|
soft_timeout_seconds + DEFAULT_HARD_TIMEOUT_SECONDS
|
|
|
|
end
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
def monotonic_time
|
|
|
|
Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_second)
|
|
|
|
end
|
|
|
|
|
|
|
|
def continue_waiting?(deadline)
|
2022-01-26 12:08:38 +05:30
|
|
|
ProcessManagement.any_alive?(@processes) && monotonic_time < deadline
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
def hard_stop_stuck_pids
|
2022-01-26 12:08:38 +05:30
|
|
|
ProcessManagement.signal_processes(ProcessManagement.pids_alive(@processes), "-KILL")
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
def wait_for_termination
|
2020-04-22 19:07:51 +05:30
|
|
|
deadline = monotonic_time + hard_timeout_seconds
|
2020-04-08 14:13:33 +05:30
|
|
|
sleep(CHECK_TERMINATE_INTERVAL_SECONDS) while continue_waiting?(deadline)
|
|
|
|
|
|
|
|
hard_stop_stuck_pids
|
|
|
|
end
|
|
|
|
|
|
|
|
def trap_signals
|
2022-01-26 12:08:38 +05:30
|
|
|
ProcessManagement.trap_signals(TERMINATE_SIGNALS) do |signal|
|
2020-04-08 14:13:33 +05:30
|
|
|
@alive = false
|
2022-01-26 12:08:38 +05:30
|
|
|
ProcessManagement.signal_processes(@processes, signal)
|
2020-04-08 14:13:33 +05:30
|
|
|
wait_for_termination
|
|
|
|
end
|
|
|
|
|
2022-01-26 12:08:38 +05:30
|
|
|
ProcessManagement.trap_signals(FORWARD_SIGNALS) do |signal|
|
|
|
|
ProcessManagement.signal_processes(@processes, signal)
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def start_loop
|
|
|
|
while @alive
|
|
|
|
sleep(@interval)
|
|
|
|
|
2022-01-26 12:08:38 +05:30
|
|
|
if metrics_server_enabled? && ProcessManagement.process_died?(@metrics_server_pid)
|
|
|
|
@logger.warn('Metrics server went away')
|
|
|
|
start_metrics_server(wipe_metrics_dir: false)
|
|
|
|
end
|
|
|
|
|
|
|
|
unless ProcessManagement.all_alive?(@processes)
|
2020-04-08 14:13:33 +05:30
|
|
|
# If a child process died we'll just terminate the whole cluster. It's up to
|
|
|
|
# runit and such to then restart the cluster.
|
|
|
|
@logger.info('A worker terminated, shutting down the cluster')
|
|
|
|
|
2022-01-26 12:08:38 +05:30
|
|
|
stop_metrics_server
|
|
|
|
ProcessManagement.signal_processes(@processes, :TERM)
|
2020-04-08 14:13:33 +05:30
|
|
|
break
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-01-26 12:08:38 +05:30
|
|
|
def start_metrics_server(wipe_metrics_dir: false)
|
|
|
|
return unless metrics_server_enabled?
|
|
|
|
|
|
|
|
@logger.info("Starting metrics server on port #{sidekiq_exporter_port}")
|
|
|
|
@metrics_server_pid = MetricsServer.spawn(
|
|
|
|
'sidekiq',
|
|
|
|
metrics_dir: @metrics_dir,
|
|
|
|
wipe_metrics_dir: wipe_metrics_dir,
|
|
|
|
trapped_signals: TERMINATE_SIGNALS + FORWARD_SIGNALS
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
def sidekiq_exporter_enabled?
|
2022-03-02 08:16:31 +05:30
|
|
|
::Settings.dig('monitoring', 'sidekiq_exporter', 'enabled')
|
2022-01-26 12:08:38 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
def exporter_has_a_unique_port?
|
|
|
|
# In https://gitlab.com/gitlab-org/gitlab/-/issues/345802 we added settings for sidekiq_health_checks.
|
|
|
|
# These settings default to the same values as sidekiq_exporter for backwards compatibility.
|
|
|
|
# If a different port for sidekiq_health_checks has been set up, we know that the
|
|
|
|
# user wants to serve health checks and metrics from different servers.
|
|
|
|
return false if sidekiq_health_check_port.nil? || sidekiq_exporter_port.nil?
|
|
|
|
|
|
|
|
sidekiq_exporter_port != sidekiq_health_check_port
|
|
|
|
end
|
|
|
|
|
|
|
|
def sidekiq_exporter_port
|
2022-03-02 08:16:31 +05:30
|
|
|
::Settings.dig('monitoring', 'sidekiq_exporter', 'port')
|
2022-01-26 12:08:38 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
def sidekiq_health_check_port
|
2022-03-02 08:16:31 +05:30
|
|
|
::Settings.dig('monitoring', 'sidekiq_health_checks', 'port')
|
2022-01-26 12:08:38 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
def metrics_server_enabled?
|
|
|
|
!@dryrun && sidekiq_exporter_enabled? && exporter_has_a_unique_port?
|
|
|
|
end
|
|
|
|
|
|
|
|
def stop_metrics_server
|
|
|
|
return unless @metrics_server_pid
|
|
|
|
|
|
|
|
@logger.info("Stopping metrics server (PID #{@metrics_server_pid})")
|
|
|
|
ProcessManagement.signal(@metrics_server_pid, :TERM)
|
|
|
|
end
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
def option_parser
|
|
|
|
OptionParser.new do |opt|
|
|
|
|
opt.banner = "#{File.basename(__FILE__)} [QUEUE,QUEUE] [QUEUE] ... [OPTIONS]"
|
|
|
|
|
|
|
|
opt.separator "\nOptions:\n"
|
|
|
|
|
|
|
|
opt.on('-h', '--help', 'Shows this help message') do
|
|
|
|
abort opt.to_s
|
|
|
|
end
|
|
|
|
|
|
|
|
opt.on('-m', '--max-concurrency INT', 'Maximum threads to use with Sidekiq (default: 50, 0 to disable)') do |int|
|
|
|
|
@max_concurrency = int.to_i
|
|
|
|
end
|
|
|
|
|
|
|
|
opt.on('--min-concurrency INT', 'Minimum threads to use with Sidekiq (default: 0)') do |int|
|
|
|
|
@min_concurrency = int.to_i
|
|
|
|
end
|
|
|
|
|
|
|
|
opt.on('-e', '--environment ENV', 'The application environment') do |env|
|
|
|
|
@environment = env
|
|
|
|
end
|
|
|
|
|
|
|
|
opt.on('-P', '--pidfile PATH', 'Path to the PID file') do |pid|
|
|
|
|
@pid = pid
|
|
|
|
end
|
|
|
|
|
|
|
|
opt.on('-r', '--require PATH', 'Location of the Rails application') do |path|
|
|
|
|
@rails_path = path
|
|
|
|
end
|
|
|
|
|
2021-01-29 00:20:46 +05:30
|
|
|
opt.on('--queue-selector', 'Run workers based on the provided selector') do |queue_selector|
|
|
|
|
@queue_selector = queue_selector
|
|
|
|
end
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
opt.on('-n', '--negate', 'Run workers for all queues in sidekiq_queues.yml except the given ones') do
|
|
|
|
@negate_queues = true
|
|
|
|
end
|
|
|
|
|
|
|
|
opt.on('-i', '--interval INT', 'The number of seconds to wait between worker checks') do |int|
|
|
|
|
@interval = int.to_i
|
|
|
|
end
|
|
|
|
|
2020-04-22 19:07:51 +05:30
|
|
|
opt.on('-t', '--timeout INT', 'Graceful timeout for all running processes') do |timeout|
|
|
|
|
@soft_timeout_seconds = timeout.to_i
|
|
|
|
end
|
|
|
|
|
2020-04-08 14:13:33 +05:30
|
|
|
opt.on('-d', '--dryrun', 'Print commands that would be run without this flag, and quit') do |int|
|
|
|
|
@dryrun = true
|
|
|
|
end
|
2021-10-27 15:23:28 +05:30
|
|
|
|
|
|
|
opt.on('--list-queues', 'List matching queues, and quit') do |int|
|
|
|
|
@list_queues = true
|
|
|
|
end
|
2020-04-08 14:13:33 +05:30
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|