2017-08-17 22:00:37 +05:30
|
|
|
module Banzai
|
|
|
|
# Extract references to issuables from multiple documents
|
|
|
|
|
|
|
|
# This populates RequestStore cache used in Banzai::ReferenceParser::IssueParser
|
|
|
|
# and Banzai::ReferenceParser::MergeRequestParser
|
|
|
|
# Populating the cache should happen before processing documents one-by-one
|
|
|
|
# so we can avoid N+1 queries problem
|
|
|
|
|
|
|
|
class IssuableExtractor
|
|
|
|
QUERY = %q(
|
|
|
|
descendant-or-self::a[contains(concat(" ", @class, " "), " gfm ")]
|
|
|
|
[@data-reference-type="issue" or @data-reference-type="merge_request"]
|
|
|
|
).freeze
|
|
|
|
|
2018-05-09 12:01:36 +05:30
|
|
|
attr_reader :context
|
2017-08-17 22:00:37 +05:30
|
|
|
|
2018-05-09 12:01:36 +05:30
|
|
|
# context - An instance of Banzai::RenderContext.
|
|
|
|
def initialize(context)
|
|
|
|
@context = context
|
2017-08-17 22:00:37 +05:30
|
|
|
end
|
|
|
|
|
|
|
|
# Returns Hash in the form { node => issuable_instance }
|
|
|
|
def extract(documents)
|
|
|
|
nodes = documents.flat_map do |document|
|
|
|
|
document.xpath(QUERY)
|
|
|
|
end
|
|
|
|
|
2018-05-09 12:01:36 +05:30
|
|
|
issue_parser = Banzai::ReferenceParser::IssueParser.new(context)
|
|
|
|
|
|
|
|
merge_request_parser =
|
|
|
|
Banzai::ReferenceParser::MergeRequestParser.new(context)
|
2017-08-17 22:00:37 +05:30
|
|
|
|
2018-03-17 18:26:18 +05:30
|
|
|
issuables_for_nodes = issue_parser.records_for_nodes(nodes).merge(
|
|
|
|
merge_request_parser.records_for_nodes(nodes)
|
2017-08-17 22:00:37 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
# The project for the issue/MR might be pending for deletion!
|
|
|
|
# Filter them out because we don't care about them.
|
|
|
|
issuables_for_nodes.select { |node, issuable| issuable.project }
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|