debian-mirror-gitlab/config/object_store_settings.rb

173 lines
5.7 KiB
Ruby
Raw Normal View History

2018-11-18 11:00:15 +05:30
# Set default values for object_store settings
class ObjectStoreSettings
2021-01-03 14:25:43 +05:30
SUPPORTED_TYPES = %w(artifacts external_diffs lfs uploads packages dependency_proxy terraform_state pages).freeze
2020-07-28 23:09:34 +05:30
ALLOWED_OBJECT_STORE_OVERRIDES = %w(bucket enabled proxy_download).freeze
2021-01-03 14:25:43 +05:30
# pages may be enabled but use legacy disk storage
# we don't need to raise an error in that case
ALLOWED_INCOMPLETE_TYPES = %w(pages).freeze
2020-07-28 23:09:34 +05:30
attr_accessor :settings
# Legacy parser
def self.legacy_parse(object_store)
2018-11-18 11:00:15 +05:30
object_store ||= Settingslogic.new({})
object_store['enabled'] = false if object_store['enabled'].nil?
object_store['remote_directory'] ||= nil
object_store['direct_upload'] = false if object_store['direct_upload'].nil?
object_store['background_upload'] = true if object_store['background_upload'].nil?
object_store['proxy_download'] = false if object_store['proxy_download'].nil?
2020-10-24 23:57:45 +05:30
object_store['storage_options'] ||= {}
2018-11-18 11:00:15 +05:30
# Convert upload connection settings to use string keys, to make Fog happy
object_store['connection']&.deep_stringify_keys!
object_store
end
2020-07-28 23:09:34 +05:30
def initialize(settings)
@settings = settings
end
# This method converts the common object storage settings to
# the legacy, internal representation.
#
# For example, with the folowing YAML:
#
# object_store:
# enabled: true
# connection:
# provider: AWS
# aws_access_key_id: minio
# aws_secret_access_key: gdk-minio
# region: gdk
# endpoint: 'http://127.0.0.1:9000'
# path_style: true
2020-10-24 23:57:45 +05:30
# storage_options:
# server_side_encryption: AES256
2020-07-28 23:09:34 +05:30
# proxy_download: true
# objects:
# artifacts:
# bucket: artifacts
# proxy_download: false
# lfs:
# bucket: lfs-objects
#
# This method then will essentially call:
#
# Settings.artifacts['object_store'] = {
# "enabled" => true,
2020-10-24 23:57:45 +05:30
# "connection" => {
2020-07-28 23:09:34 +05:30
# "provider" => "AWS",
# "aws_access_key_id" => "minio",
# "aws_secret_access_key" => "gdk-minio",
# "region" => "gdk",
# "endpoint" => "http://127.0.0.1:9000",
# "path_style" => true
# },
2020-10-24 23:57:45 +05:30
# "storage_options" => {
# "server_side_encryption" => "AES256"
# },
2020-07-28 23:09:34 +05:30
# "direct_upload" => true,
# "background_upload" => false,
# "proxy_download" => false,
# "remote_directory" => "artifacts"
# }
#
# Settings.lfs['object_store'] = {
# "enabled" => true,
# "connection" => {
# "provider" => "AWS",
# "aws_access_key_id" => "minio",
# "aws_secret_access_key" => "gdk-minio",
# "region" => "gdk",
# "endpoint" => "http://127.0.0.1:9000",
# "path_style" => true
# },
2020-10-24 23:57:45 +05:30
# "storage_options" => {
# "server_side_encryption" => "AES256"
# },
2020-07-28 23:09:34 +05:30
# "direct_upload" => true,
# "background_upload" => false,
# "proxy_download" => true,
# "remote_directory" => "lfs-objects"
# }
#
# Note that with the common config:
# 1. Only one object store credentials can now be used. This is
# necessary to limit configuration overhead when an object storage
# client (e.g. AWS S3) is used inside GitLab Workhorse.
# 2. However, a bucket has to be specified for each object
# type. Reusing buckets is not really supported, but we don't
# enforce that yet.
# 3. direct_upload and background_upload cannot be configured anymore.
def parse!
return unless use_consolidated_settings?
main_config = settings['object_store']
2020-10-24 23:57:45 +05:30
common_config = main_config.slice('enabled', 'connection', 'proxy_download', 'storage_options')
2020-07-28 23:09:34 +05:30
# Convert connection settings to use string keys, to make Fog happy
common_config['connection']&.deep_stringify_keys!
# These are no longer configurable if common config is used
common_config['direct_upload'] = true
common_config['background_upload'] = false
2020-10-24 23:57:45 +05:30
common_config['storage_options'] ||= {}
2020-07-28 23:09:34 +05:30
SUPPORTED_TYPES.each do |store_type|
overrides = main_config.dig('objects', store_type) || {}
target_config = common_config.merge(overrides.slice(*ALLOWED_OBJECT_STORE_OVERRIDES))
section = settings.try(store_type)
next unless section
2021-01-03 14:25:43 +05:30
if section['enabled'] && target_config['bucket'].blank?
missing_bucket_for(store_type)
2021-01-29 00:20:46 +05:30
next
2021-01-03 14:25:43 +05:30
end
2020-07-28 23:09:34 +05:30
# Map bucket (external name) -> remote_directory (internal representation)
target_config['remote_directory'] = target_config.delete('bucket')
target_config['consolidated_settings'] = true
section['object_store'] = target_config
end
end
private
# We only can use the common object storage settings if:
# 1. The common settings are defined
# 2. The legacy settings are not defined
def use_consolidated_settings?
return false unless settings.dig('object_store', 'enabled')
return false unless settings.dig('object_store', 'connection').present?
SUPPORTED_TYPES.each do |store|
# to_h is needed because something strange happens to
# Settingslogic#dig when stub_storage_settings is run in tests:
#
# (byebug) section.dig
# *** ArgumentError Exception: wrong number of arguments (given 0, expected 1+)
# (byebug) section.dig('object_store')
# *** ArgumentError Exception: wrong number of arguments (given 1, expected 0)
section = settings.try(store)&.to_h
next unless section
return false if section.dig('object_store', 'enabled')
# Omnibus defaults to an empty hash
return false if section.dig('object_store', 'connection').present?
end
true
end
2021-01-03 14:25:43 +05:30
def missing_bucket_for(store_type)
message = "Object storage for #{store_type} must have a bucket specified"
if ALLOWED_INCOMPLETE_TYPES.include?(store_type)
warn "[WARNING] #{message}"
else
raise message
end
end
2018-11-18 11:00:15 +05:30
end