2021-09-30 23:02:18 +05:30
|
|
|
import { memoize } from 'lodash';
|
2021-04-29 21:17:54 +05:30
|
|
|
import { createSankey } from './dag/drawing_utils';
|
2020-06-23 00:09:42 +05:30
|
|
|
|
|
|
|
/*
|
|
|
|
The following functions are the main engine in transforming the data as
|
|
|
|
received from the endpoint into the format the d3 graph expects.
|
|
|
|
|
|
|
|
Input is of the form:
|
2020-10-24 23:57:45 +05:30
|
|
|
[nodes]
|
|
|
|
nodes: [{category, name, jobs, size}]
|
|
|
|
category is the stage name
|
|
|
|
name is a group name; in the case that the group has one job, it is
|
|
|
|
also the job name
|
|
|
|
size is the number of parallel jobs
|
|
|
|
jobs: [{ name, needs}]
|
|
|
|
job name is either the same as the group name or group x/y
|
|
|
|
needs: [job-names]
|
|
|
|
needs is an array of job-name strings
|
2020-06-23 00:09:42 +05:30
|
|
|
|
|
|
|
Output is of the form:
|
|
|
|
{ nodes: [node], links: [link] }
|
|
|
|
node: { name, category }, + unused info passed through
|
|
|
|
link: { source, target, value }, with source & target being node names
|
|
|
|
and value being a constant
|
|
|
|
|
2020-10-24 23:57:45 +05:30
|
|
|
We create nodes in the GraphQL update function, and then here we create the node dictionary,
|
|
|
|
then create links, and then dedupe the links, so that in the case where
|
2020-06-23 00:09:42 +05:30
|
|
|
job 4 depends on job 1 and job 2, and job 2 depends on job 1, we show only a single link
|
|
|
|
from job 1 to job 2 then another from job 2 to job 4.
|
|
|
|
|
|
|
|
CREATE LINKS
|
2020-10-24 23:57:45 +05:30
|
|
|
nodes.name -> target
|
|
|
|
nodes.name.needs.each -> source (source is the name of the group, not the parallel job)
|
2020-06-23 00:09:42 +05:30
|
|
|
10 -> value (constant)
|
|
|
|
*/
|
|
|
|
|
2021-03-08 18:12:59 +05:30
|
|
|
export const createNodeDict = (nodes) => {
|
2020-06-23 00:09:42 +05:30
|
|
|
return nodes.reduce((acc, node) => {
|
|
|
|
const newNode = {
|
|
|
|
...node,
|
2021-03-08 18:12:59 +05:30
|
|
|
needs: node.jobs.map((job) => job.needs || []).flat(),
|
2020-06-23 00:09:42 +05:30
|
|
|
};
|
|
|
|
|
|
|
|
if (node.size > 1) {
|
2021-03-08 18:12:59 +05:30
|
|
|
node.jobs.forEach((job) => {
|
2020-06-23 00:09:42 +05:30
|
|
|
acc[job.name] = newNode;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
acc[node.name] = newNode;
|
|
|
|
return acc;
|
|
|
|
}, {});
|
|
|
|
};
|
|
|
|
|
|
|
|
export const makeLinksFromNodes = (nodes, nodeDict) => {
|
|
|
|
const constantLinkValue = 10; // all links are the same weight
|
|
|
|
return nodes
|
2021-09-04 01:27:46 +05:30
|
|
|
.map(({ jobs, name: groupName }) =>
|
|
|
|
jobs.map(({ needs = [] }) =>
|
|
|
|
needs.reduce((acc, needed) => {
|
|
|
|
// It's possible that we have an optional job, which
|
|
|
|
// is being needed by another job. In that scenario,
|
|
|
|
// the needed job doesn't exist, so we don't want to
|
|
|
|
// create link for it.
|
|
|
|
if (nodeDict[needed]?.name) {
|
|
|
|
acc.push({
|
|
|
|
source: nodeDict[needed].name,
|
|
|
|
target: groupName,
|
|
|
|
value: constantLinkValue,
|
|
|
|
});
|
|
|
|
}
|
2020-06-23 00:09:42 +05:30
|
|
|
|
2021-09-04 01:27:46 +05:30
|
|
|
return acc;
|
|
|
|
}, []),
|
|
|
|
),
|
|
|
|
)
|
2020-06-23 00:09:42 +05:30
|
|
|
.flat(2);
|
|
|
|
};
|
|
|
|
|
|
|
|
export const getAllAncestors = (nodes, nodeDict) => {
|
|
|
|
const needs = nodes
|
2021-03-08 18:12:59 +05:30
|
|
|
.map((node) => {
|
2021-09-04 01:27:46 +05:30
|
|
|
return nodeDict[node]?.needs || '';
|
2020-06-23 00:09:42 +05:30
|
|
|
})
|
|
|
|
.flat()
|
|
|
|
.filter(Boolean);
|
|
|
|
|
|
|
|
if (needs.length) {
|
|
|
|
return [...needs, ...getAllAncestors(needs, nodeDict)];
|
|
|
|
}
|
|
|
|
|
|
|
|
return [];
|
|
|
|
};
|
|
|
|
|
|
|
|
export const filterByAncestors = (links, nodeDict) =>
|
|
|
|
links.filter(({ target, source }) => {
|
|
|
|
/*
|
|
|
|
|
|
|
|
for every link, check out it's target
|
|
|
|
for every target, get the target node's needs
|
|
|
|
then drop the current link source from that list
|
|
|
|
|
|
|
|
call a function to get all ancestors, recursively
|
|
|
|
is the current link's source in the list of all parents?
|
|
|
|
then we drop this link
|
|
|
|
|
|
|
|
*/
|
|
|
|
const targetNode = target;
|
|
|
|
const targetNodeNeeds = nodeDict[targetNode].needs;
|
2021-03-08 18:12:59 +05:30
|
|
|
const targetNodeNeedsMinusSource = targetNodeNeeds.filter((need) => need !== source);
|
2020-06-23 00:09:42 +05:30
|
|
|
|
|
|
|
const allAncestors = getAllAncestors(targetNodeNeedsMinusSource, nodeDict);
|
|
|
|
return !allAncestors.includes(source);
|
|
|
|
});
|
|
|
|
|
2021-09-30 23:02:18 +05:30
|
|
|
/*
|
|
|
|
A peformant alternative to lodash's isEqual. Because findIndex always finds
|
|
|
|
the first instance of a match, if the found index is not the first, we know
|
|
|
|
it is in fact a duplicate.
|
|
|
|
*/
|
|
|
|
const deduplicate = (item, itemIndex, arr) => {
|
|
|
|
const foundIdx = arr.findIndex((test) => {
|
|
|
|
return test.source === item.source && test.target === item.target;
|
|
|
|
});
|
|
|
|
|
|
|
|
return foundIdx === itemIndex;
|
|
|
|
};
|
|
|
|
|
2021-03-08 18:12:59 +05:30
|
|
|
export const parseData = (nodes) => {
|
2020-10-24 23:57:45 +05:30
|
|
|
const nodeDict = createNodeDict(nodes);
|
2020-06-23 00:09:42 +05:30
|
|
|
const allLinks = makeLinksFromNodes(nodes, nodeDict);
|
|
|
|
const filteredLinks = filterByAncestors(allLinks, nodeDict);
|
2021-09-30 23:02:18 +05:30
|
|
|
const links = filteredLinks.filter(deduplicate);
|
2020-06-23 00:09:42 +05:30
|
|
|
|
|
|
|
return { nodes, links };
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
The number of nodes in the most populous generation drives the height of the graph.
|
|
|
|
*/
|
|
|
|
|
2021-03-08 18:12:59 +05:30
|
|
|
export const getMaxNodes = (nodes) => {
|
2020-06-23 00:09:42 +05:30
|
|
|
const counts = nodes.reduce((acc, { layer }) => {
|
|
|
|
if (!acc[layer]) {
|
|
|
|
acc[layer] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
acc[layer] += 1;
|
|
|
|
|
|
|
|
return acc;
|
|
|
|
}, []);
|
|
|
|
|
|
|
|
return Math.max(...counts);
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
Because we cannot know if a node is part of a relationship until after we
|
|
|
|
generate the links with createSankey, this function is used after the first call
|
|
|
|
to find nodes that have no relations.
|
|
|
|
*/
|
|
|
|
|
2021-03-08 18:12:59 +05:30
|
|
|
export const removeOrphanNodes = (sankeyfiedNodes) => {
|
|
|
|
return sankeyfiedNodes.filter((node) => node.sourceLinks.length || node.targetLinks.length);
|
2020-06-23 00:09:42 +05:30
|
|
|
};
|
2021-04-29 21:17:54 +05:30
|
|
|
|
|
|
|
/*
|
|
|
|
This utility accepts unwrapped pipeline data in the format returned from
|
|
|
|
our standard pipeline GraphQL query and returns a list of names by layer
|
|
|
|
for the layer view. It can be combined with the stageLookup on the pipeline
|
|
|
|
to generate columns by layer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
export const listByLayers = ({ stages }) => {
|
|
|
|
const arrayOfJobs = stages.flatMap(({ groups }) => groups);
|
|
|
|
const parsedData = parseData(arrayOfJobs);
|
|
|
|
const dataWithLayers = createSankey()(parsedData);
|
|
|
|
|
2021-10-27 15:23:28 +05:30
|
|
|
const pipelineLayers = dataWithLayers.nodes.reduce((acc, { layer, name }) => {
|
2021-04-29 21:17:54 +05:30
|
|
|
/* sort groups by layer */
|
|
|
|
|
|
|
|
if (!acc[layer]) {
|
|
|
|
acc[layer] = [];
|
|
|
|
}
|
|
|
|
|
|
|
|
acc[layer].push(name);
|
|
|
|
|
|
|
|
return acc;
|
|
|
|
}, []);
|
2021-10-27 15:23:28 +05:30
|
|
|
|
|
|
|
return {
|
|
|
|
linksData: parsedData.links,
|
|
|
|
numGroups: arrayOfJobs.length,
|
|
|
|
pipelineLayers,
|
|
|
|
};
|
2021-04-29 21:17:54 +05:30
|
|
|
};
|
2021-06-08 01:23:25 +05:30
|
|
|
|
|
|
|
export const generateColumnsFromLayersListBare = ({ stages, stagesLookup }, pipelineLayers) => {
|
|
|
|
return pipelineLayers.map((layers, idx) => {
|
|
|
|
/*
|
|
|
|
Look up the groups in each layer,
|
|
|
|
then add each set of layer groups to a stage-like object.
|
|
|
|
*/
|
|
|
|
|
|
|
|
const groups = layers.map((id) => {
|
|
|
|
const { stageIdx, groupIdx } = stagesLookup[id];
|
|
|
|
return stages[stageIdx]?.groups?.[groupIdx];
|
|
|
|
});
|
|
|
|
|
|
|
|
return {
|
|
|
|
name: '',
|
|
|
|
id: `layer-${idx}`,
|
|
|
|
status: { action: null },
|
|
|
|
groups: groups.filter(Boolean),
|
|
|
|
};
|
|
|
|
});
|
|
|
|
};
|
|
|
|
|
|
|
|
export const generateColumnsFromLayersListMemoized = memoize(generateColumnsFromLayersListBare);
|