debian-mirror-gitlab/app/assets/javascripts/pipelines/utils.js

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

177 lines
5.4 KiB
JavaScript
Raw Normal View History

2021-04-29 21:17:54 +05:30
import * as Sentry from '@sentry/browser';
2020-06-23 00:09:42 +05:30
import { pickBy } from 'lodash';
2022-07-16 23:28:13 +05:30
import { getParameterValues } from '~/lib/utils/url_utility';
import {
NEEDS_PROPERTY,
SUPPORTED_FILTER_PARAMETERS,
TAB_QUERY_PARAM,
validPipelineTabNames,
} from './constants';
2021-11-11 11:23:49 +05:30
/*
The following functions are the main engine in transforming the data as
received from the endpoint into the format the d3 graph expects.
Input is of the form:
[nodes]
nodes: [{category, name, jobs, size}]
category is the stage name
name is a group name; in the case that the group has one job, it is
also the job name
size is the number of parallel jobs
jobs: [{ name, needs}]
job name is either the same as the group name or group x/y
needs: [job-names]
needs is an array of job-name strings
Output is of the form:
{ nodes: [node], links: [link] }
node: { name, category }, + unused info passed through
link: { source, target, value }, with source & target being node names
and value being a constant
We create nodes in the GraphQL update function, and then here we create the node dictionary,
then create links, and then dedupe the links, so that in the case where
job 4 depends on job 1 and job 2, and job 2 depends on job 1, we show only a single link
from job 1 to job 2 then another from job 2 to job 4.
CREATE LINKS
nodes.name -> target
nodes.name.needs.each -> source (source is the name of the group, not the parallel job)
10 -> value (constant)
*/
2022-03-02 08:16:31 +05:30
export const createNodeDict = (nodes, { needsKey = NEEDS_PROPERTY } = {}) => {
2021-11-11 11:23:49 +05:30
return nodes.reduce((acc, node) => {
const newNode = {
...node,
2022-03-02 08:16:31 +05:30
needs: node.jobs.map((job) => job[needsKey] || []).flat(),
2021-11-11 11:23:49 +05:30
};
if (node.size > 1) {
node.jobs.forEach((job) => {
acc[job.name] = newNode;
});
}
acc[node.name] = newNode;
return acc;
}, {});
};
2021-03-08 18:12:59 +05:30
export const validateParams = (params) => {
2020-06-23 00:09:42 +05:30
return pickBy(params, (val, key) => SUPPORTED_FILTER_PARAMETERS.includes(key) && val);
};
2020-11-24 15:15:51 +05:30
/**
2021-02-22 17:27:13 +05:30
* This function takes the stages array and transform it
* into a hash where each key is a job name and the job data
* is associated to that key.
* @param {Array} stages
* @returns {Object} - Hash of jobs
2020-11-24 15:15:51 +05:30
*/
2021-02-22 17:27:13 +05:30
export const createJobsHash = (stages = []) => {
2021-03-08 18:12:59 +05:30
const nodes = stages.flatMap(({ groups }) => groups);
return createNodeDict(nodes);
2021-01-03 14:25:43 +05:30
};
2021-02-22 17:27:13 +05:30
/**
* This function takes the jobs hash generated by
* `createJobsHash` function and returns an easier
* structure to work with for needs relationship
* where the key is the job name and the value is an
* array of all the needs this job has recursively
* (includes the needs of the needs)
* @param {Object} jobs
* @returns {Object} - Hash of jobs and array of needs
*/
export const generateJobNeedsDict = (jobs = {}) => {
2021-01-03 14:25:43 +05:30
const arrOfJobNames = Object.keys(jobs);
return arrOfJobNames.reduce((acc, value) => {
2021-03-08 18:12:59 +05:30
const recursiveNeeds = (jobName) => {
2021-01-03 14:25:43 +05:30
if (!jobs[jobName]?.needs) {
return [];
}
return jobs[jobName].needs
2021-09-04 01:27:46 +05:30
.reduce((needsAcc, job) => {
// It's possible that a needs refer to an optional job
// that is not defined in which case we don't add that entry
if (!jobs[job]) {
return needsAcc;
}
2021-01-03 14:25:43 +05:30
// If we already have the needs of a job in the accumulator,
// then we use the memoized data instead of the recursive call
// to save some performance.
2021-02-22 17:27:13 +05:30
const newNeeds = acc[job] ?? recursiveNeeds(job);
2021-01-03 14:25:43 +05:30
2021-03-08 18:12:59 +05:30
// In case it's a parallel job (size > 1), the name of the group
// and the job will be different. This mean we also need to add the group name
// to the list of `needs` to ensure we can properly reference it.
const group = jobs[job];
if (group.size > 1) {
2021-09-04 01:27:46 +05:30
return [...needsAcc, job, group.name, newNeeds];
2021-03-08 18:12:59 +05:30
}
2021-09-04 01:27:46 +05:30
return [...needsAcc, job, newNeeds];
}, [])
2021-01-03 14:25:43 +05:30
.flat(Infinity);
};
// To ensure we don't have duplicates job relationship when 2 jobs
// needed by another both depends on the same jobs, we remove any
// duplicates from the array.
const uniqueValues = Array.from(new Set(recursiveNeeds(value)));
2021-02-22 17:27:13 +05:30
return { ...acc, [value]: uniqueValues };
2021-01-03 14:25:43 +05:30
}, {});
2020-11-24 15:15:51 +05:30
};
2021-04-29 21:17:54 +05:30
export const reportToSentry = (component, failureType) => {
Sentry.withScope((scope) => {
scope.setTag('component', component);
Sentry.captureException(failureType);
});
};
2021-06-08 01:23:25 +05:30
export const reportMessageToSentry = (component, message, context) => {
Sentry.withScope((scope) => {
// eslint-disable-next-line @gitlab/require-i18n-strings
scope.setContext('Vue data', context);
scope.setTag('component', component);
Sentry.captureMessage(message);
});
};
2022-07-16 23:28:13 +05:30
export const getPipelineDefaultTab = (url) => {
const [tabQueryValue] = getParameterValues(TAB_QUERY_PARAM, url);
if (tabQueryValue && validPipelineTabNames.includes(tabQueryValue)) {
return tabQueryValue;
}
return null;
};
2022-08-13 15:12:31 +05:30
export const calculateJobStats = (jobs, sortField) => {
const jobNodes = [...jobs.nodes];
const sorted = jobNodes.sort((a, b) => {
return b[sortField] - a[sortField];
});
return sorted[0];
};
export const calculateSlowestFiveJobs = (jobs) => {
const jobNodes = [...jobs.nodes];
const limit = 5;
return jobNodes
.sort((a, b) => {
return b.duration - a.duration;
})
.slice(0, limit);
};