debian-mirror-gitlab/config/initializers/7_prometheus_metrics.rb

103 lines
3.8 KiB
Ruby
Raw Normal View History

2017-09-10 17:25:29 +05:30
require 'prometheus/client'
2019-09-30 21:07:59 +05:30
# Keep separate directories for separate processes
def prometheus_default_multiproc_dir
return unless Rails.env.development? || Rails.env.test?
if Sidekiq.server?
Rails.root.join('tmp/prometheus_multiproc_dir/sidekiq')
elsif defined?(Unicorn::Worker)
Rails.root.join('tmp/prometheus_multiproc_dir/unicorn')
elsif defined?(::Puma)
Rails.root.join('tmp/prometheus_multiproc_dir/puma')
else
Rails.root.join('tmp/prometheus_multiproc_dir')
end
end
2017-09-10 17:25:29 +05:30
Prometheus::Client.configure do |config|
2019-09-30 21:07:59 +05:30
config.logger = Rails.logger # rubocop:disable Gitlab/RailsLogger
2017-09-10 17:25:29 +05:30
config.initial_mmap_file_size = 4 * 1024
2019-09-30 21:07:59 +05:30
config.multiprocess_files_dir = ENV['prometheus_multiproc_dir'] || prometheus_default_multiproc_dir
2017-09-10 17:25:29 +05:30
2019-10-12 21:52:04 +05:30
config.pid_provider = Prometheus::PidProvider.method(:worker_id)
2017-09-10 17:25:29 +05:30
end
2018-03-17 18:26:18 +05:30
Gitlab::Application.configure do |config|
# 0 should be Sentry to catch errors in this middleware
config.middleware.insert(1, Gitlab::Metrics::RequestsRackMiddleware)
end
2017-09-10 17:25:29 +05:30
Sidekiq.configure_server do |config|
config.on(:startup) do
2019-12-04 20:38:33 +05:30
# webserver metrics are cleaned up in config.ru: `warmup` block
Prometheus::CleanupMultiprocDirService.new.execute
2019-12-21 20:55:43 +05:30
# In production, sidekiq is run in a multi-process setup where processes might interfere
# with each other cleaning up and reinitializing prometheus database files, which is why
# we're re-doing the work every time here.
# A cleaner solution would be to run the cleanup pre-fork, and the initialization once
# after all workers have forked, but I don't know how at this point.
::Prometheus::Client.reinitialize_on_pid_change(force: true)
2019-12-04 20:38:33 +05:30
2019-12-21 20:55:43 +05:30
Gitlab::Metrics::Exporter::SidekiqExporter.instance.start
2017-09-10 17:25:29 +05:30
end
end
2018-03-17 18:26:18 +05:30
2018-11-08 19:23:39 +05:30
if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
2018-12-13 13:39:08 +05:30
Gitlab::Cluster::LifecycleEvents.on_worker_start do
defined?(::Prometheus::Client.reinitialize_on_pid_change) && Prometheus::Client.reinitialize_on_pid_change
Gitlab::Metrics::Samplers::RubySampler.initialize_instance(Settings.monitoring.ruby_sampler_interval).start
2018-03-17 18:26:18 +05:30
end
2019-09-04 21:01:54 +05:30
2019-09-30 21:07:59 +05:30
Gitlab::Cluster::LifecycleEvents.on_master_start do
2019-12-04 20:38:33 +05:30
::Prometheus::Client.reinitialize_on_pid_change(force: true)
2019-09-30 21:07:59 +05:30
if defined?(::Unicorn)
2019-12-04 20:38:33 +05:30
Gitlab::Metrics::Samplers::UnicornSampler.instance(Settings.monitoring.unicorn_sampler_interval).start
2019-09-30 21:07:59 +05:30
elsif defined?(::Puma)
2019-12-04 20:38:33 +05:30
Gitlab::Metrics::Samplers::PumaSampler.instance(Settings.monitoring.puma_sampler_interval).start
2019-09-04 21:01:54 +05:30
end
2019-12-21 20:55:43 +05:30
Gitlab::Metrics::RequestsRackMiddleware.initialize_http_request_duration_seconds
end
end
if defined?(::Unicorn) || defined?(::Puma)
Gitlab::Cluster::LifecycleEvents.on_master_start do
Gitlab::Metrics::Exporter::WebExporter.instance.start
end
2019-12-26 22:10:19 +05:30
# DEPRECATED: TO BE REMOVED
# This is needed to implement blackout period of `web_exporter`
# https://gitlab.com/gitlab-org/gitlab/issues/35343#note_238479057
Gitlab::Cluster::LifecycleEvents.on_before_blackout_period do
Gitlab::Metrics::Exporter::WebExporter.instance.mark_as_not_running!
end
Gitlab::Cluster::LifecycleEvents.on_before_graceful_shutdown do
# We need to ensure that before we re-exec or shutdown server
2019-12-21 20:55:43 +05:30
# we do stop the exporter
Gitlab::Metrics::Exporter::WebExporter.instance.stop
end
Gitlab::Cluster::LifecycleEvents.on_before_master_restart do
# We need to ensure that before we re-exec server
# we do stop the exporter
#
# We do it again, for being extra safe,
# but it should not be needed
Gitlab::Metrics::Exporter::WebExporter.instance.stop
end
Gitlab::Cluster::LifecycleEvents.on_worker_start do
# The `#close_on_exec=` takes effect only on `execve`
# but this does not happen for Ruby fork
#
# This does stop server, as it is running on master.
Gitlab::Metrics::Exporter::WebExporter.instance.stop
2019-09-04 21:01:54 +05:30
end
2018-12-13 13:39:08 +05:30
end