debian-mirror-gitlab/app/assets/javascripts/pipelines/components/parsing_utils.js

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

173 lines
4.9 KiB
JavaScript
Raw Normal View History

2021-09-30 23:02:18 +05:30
import { memoize } from 'lodash';
2021-11-11 11:23:49 +05:30
import { createNodeDict } from '../utils';
2022-03-02 08:16:31 +05:30
import { EXPLICIT_NEEDS_PROPERTY, NEEDS_PROPERTY } from '../constants';
2021-04-29 21:17:54 +05:30
import { createSankey } from './dag/drawing_utils';
2020-06-23 00:09:42 +05:30
/*
2021-11-11 11:23:49 +05:30
A peformant alternative to lodash's isEqual. Because findIndex always finds
the first instance of a match, if the found index is not the first, we know
it is in fact a duplicate.
*/
const deduplicate = (item, itemIndex, arr) => {
const foundIdx = arr.findIndex((test) => {
return test.source === item.source && test.target === item.target;
});
2020-06-23 00:09:42 +05:30
2021-11-11 11:23:49 +05:30
return foundIdx === itemIndex;
2020-06-23 00:09:42 +05:30
};
2022-03-02 08:16:31 +05:30
export const makeLinksFromNodes = (nodes, nodeDict, { needsKey = NEEDS_PROPERTY } = {}) => {
2020-06-23 00:09:42 +05:30
const constantLinkValue = 10; // all links are the same weight
return nodes
2021-09-04 01:27:46 +05:30
.map(({ jobs, name: groupName }) =>
2022-03-02 08:16:31 +05:30
jobs.map((job) => {
const needs = job[needsKey] || [];
return needs.reduce((acc, needed) => {
2021-09-04 01:27:46 +05:30
// It's possible that we have an optional job, which
// is being needed by another job. In that scenario,
// the needed job doesn't exist, so we don't want to
// create link for it.
if (nodeDict[needed]?.name) {
acc.push({
source: nodeDict[needed].name,
target: groupName,
value: constantLinkValue,
});
}
2020-06-23 00:09:42 +05:30
2021-09-04 01:27:46 +05:30
return acc;
2022-03-02 08:16:31 +05:30
}, []);
}),
2021-09-04 01:27:46 +05:30
)
2020-06-23 00:09:42 +05:30
.flat(2);
};
export const getAllAncestors = (nodes, nodeDict) => {
const needs = nodes
2021-03-08 18:12:59 +05:30
.map((node) => {
2021-09-04 01:27:46 +05:30
return nodeDict[node]?.needs || '';
2020-06-23 00:09:42 +05:30
})
.flat()
2021-11-11 11:23:49 +05:30
.filter(Boolean)
.filter(deduplicate);
2020-06-23 00:09:42 +05:30
if (needs.length) {
return [...needs, ...getAllAncestors(needs, nodeDict)];
}
return [];
};
export const filterByAncestors = (links, nodeDict) =>
links.filter(({ target, source }) => {
/*
for every link, check out it's target
for every target, get the target node's needs
then drop the current link source from that list
call a function to get all ancestors, recursively
is the current link's source in the list of all parents?
then we drop this link
*/
const targetNode = target;
const targetNodeNeeds = nodeDict[targetNode].needs;
2021-03-08 18:12:59 +05:30
const targetNodeNeedsMinusSource = targetNodeNeeds.filter((need) => need !== source);
2020-06-23 00:09:42 +05:30
const allAncestors = getAllAncestors(targetNodeNeedsMinusSource, nodeDict);
return !allAncestors.includes(source);
});
2022-03-02 08:16:31 +05:30
export const parseData = (nodes, { needsKey = NEEDS_PROPERTY } = {}) => {
const nodeDict = createNodeDict(nodes, { needsKey });
const allLinks = makeLinksFromNodes(nodes, nodeDict, { needsKey });
2021-11-11 11:23:49 +05:30
const filteredLinks = allLinks.filter(deduplicate);
const links = filterByAncestors(filteredLinks, nodeDict);
2020-06-23 00:09:42 +05:30
return { nodes, links };
};
/*
The number of nodes in the most populous generation drives the height of the graph.
*/
2021-03-08 18:12:59 +05:30
export const getMaxNodes = (nodes) => {
2020-06-23 00:09:42 +05:30
const counts = nodes.reduce((acc, { layer }) => {
if (!acc[layer]) {
acc[layer] = 0;
}
acc[layer] += 1;
return acc;
}, []);
return Math.max(...counts);
};
/*
Because we cannot know if a node is part of a relationship until after we
generate the links with createSankey, this function is used after the first call
to find nodes that have no relations.
*/
2021-03-08 18:12:59 +05:30
export const removeOrphanNodes = (sankeyfiedNodes) => {
return sankeyfiedNodes.filter((node) => node.sourceLinks.length || node.targetLinks.length);
2020-06-23 00:09:42 +05:30
};
2021-04-29 21:17:54 +05:30
/*
This utility accepts unwrapped pipeline data in the format returned from
our standard pipeline GraphQL query and returns a list of names by layer
for the layer view. It can be combined with the stageLookup on the pipeline
to generate columns by layer.
*/
export const listByLayers = ({ stages }) => {
const arrayOfJobs = stages.flatMap(({ groups }) => groups);
const parsedData = parseData(arrayOfJobs);
2022-03-02 08:16:31 +05:30
const explicitParsedData = parseData(arrayOfJobs, { needsKey: EXPLICIT_NEEDS_PROPERTY });
const dataWithLayers = createSankey()(explicitParsedData);
2021-04-29 21:17:54 +05:30
2021-10-27 15:23:28 +05:30
const pipelineLayers = dataWithLayers.nodes.reduce((acc, { layer, name }) => {
2021-04-29 21:17:54 +05:30
/* sort groups by layer */
if (!acc[layer]) {
acc[layer] = [];
}
acc[layer].push(name);
return acc;
}, []);
2021-10-27 15:23:28 +05:30
return {
linksData: parsedData.links,
numGroups: arrayOfJobs.length,
pipelineLayers,
};
2021-04-29 21:17:54 +05:30
};
2021-06-08 01:23:25 +05:30
export const generateColumnsFromLayersListBare = ({ stages, stagesLookup }, pipelineLayers) => {
return pipelineLayers.map((layers, idx) => {
/*
Look up the groups in each layer,
then add each set of layer groups to a stage-like object.
*/
const groups = layers.map((id) => {
const { stageIdx, groupIdx } = stagesLookup[id];
return stages[stageIdx]?.groups?.[groupIdx];
});
return {
name: '',
id: `layer-${idx}`,
status: { action: null },
groups: groups.filter(Boolean),
};
});
};
export const generateColumnsFromLayersListMemoized = memoize(generateColumnsFromLayersListBare);