2020-06-23 00:09:42 +05:30
|
|
|
import { pickBy } from 'lodash';
|
2021-03-08 18:12:59 +05:30
|
|
|
import { createNodeDict } from './components/parsing_utils';
|
2021-03-11 19:13:27 +05:30
|
|
|
import { SUPPORTED_FILTER_PARAMETERS } from './constants';
|
2020-06-23 00:09:42 +05:30
|
|
|
|
2021-03-08 18:12:59 +05:30
|
|
|
export const validateParams = (params) => {
|
2020-06-23 00:09:42 +05:30
|
|
|
return pickBy(params, (val, key) => SUPPORTED_FILTER_PARAMETERS.includes(key) && val);
|
|
|
|
};
|
2020-11-24 15:15:51 +05:30
|
|
|
|
|
|
|
/**
|
2021-02-22 17:27:13 +05:30
|
|
|
* This function takes the stages array and transform it
|
|
|
|
* into a hash where each key is a job name and the job data
|
|
|
|
* is associated to that key.
|
|
|
|
* @param {Array} stages
|
|
|
|
* @returns {Object} - Hash of jobs
|
2020-11-24 15:15:51 +05:30
|
|
|
*/
|
2021-02-22 17:27:13 +05:30
|
|
|
export const createJobsHash = (stages = []) => {
|
2021-03-08 18:12:59 +05:30
|
|
|
const nodes = stages.flatMap(({ groups }) => groups);
|
|
|
|
return createNodeDict(nodes);
|
2021-01-03 14:25:43 +05:30
|
|
|
};
|
|
|
|
|
2021-02-22 17:27:13 +05:30
|
|
|
/**
|
|
|
|
* This function takes the jobs hash generated by
|
|
|
|
* `createJobsHash` function and returns an easier
|
|
|
|
* structure to work with for needs relationship
|
|
|
|
* where the key is the job name and the value is an
|
|
|
|
* array of all the needs this job has recursively
|
|
|
|
* (includes the needs of the needs)
|
|
|
|
* @param {Object} jobs
|
|
|
|
* @returns {Object} - Hash of jobs and array of needs
|
|
|
|
*/
|
|
|
|
export const generateJobNeedsDict = (jobs = {}) => {
|
2021-01-03 14:25:43 +05:30
|
|
|
const arrOfJobNames = Object.keys(jobs);
|
|
|
|
|
|
|
|
return arrOfJobNames.reduce((acc, value) => {
|
2021-03-08 18:12:59 +05:30
|
|
|
const recursiveNeeds = (jobName) => {
|
2021-01-03 14:25:43 +05:30
|
|
|
if (!jobs[jobName]?.needs) {
|
|
|
|
return [];
|
|
|
|
}
|
|
|
|
|
|
|
|
return jobs[jobName].needs
|
2021-03-08 18:12:59 +05:30
|
|
|
.map((job) => {
|
2021-01-03 14:25:43 +05:30
|
|
|
// If we already have the needs of a job in the accumulator,
|
|
|
|
// then we use the memoized data instead of the recursive call
|
|
|
|
// to save some performance.
|
2021-02-22 17:27:13 +05:30
|
|
|
const newNeeds = acc[job] ?? recursiveNeeds(job);
|
2021-01-03 14:25:43 +05:30
|
|
|
|
2021-03-08 18:12:59 +05:30
|
|
|
// In case it's a parallel job (size > 1), the name of the group
|
|
|
|
// and the job will be different. This mean we also need to add the group name
|
|
|
|
// to the list of `needs` to ensure we can properly reference it.
|
|
|
|
const group = jobs[job];
|
|
|
|
if (group.size > 1) {
|
2021-03-11 19:13:27 +05:30
|
|
|
return [job, group.name, newNeeds];
|
2021-03-08 18:12:59 +05:30
|
|
|
}
|
|
|
|
|
2021-03-11 19:13:27 +05:30
|
|
|
return [job, newNeeds];
|
2021-01-03 14:25:43 +05:30
|
|
|
})
|
|
|
|
.flat(Infinity);
|
|
|
|
};
|
|
|
|
|
|
|
|
// To ensure we don't have duplicates job relationship when 2 jobs
|
|
|
|
// needed by another both depends on the same jobs, we remove any
|
|
|
|
// duplicates from the array.
|
|
|
|
const uniqueValues = Array.from(new Set(recursiveNeeds(value)));
|
|
|
|
|
2021-02-22 17:27:13 +05:30
|
|
|
return { ...acc, [value]: uniqueValues };
|
2021-01-03 14:25:43 +05:30
|
|
|
}, {});
|
2020-11-24 15:15:51 +05:30
|
|
|
};
|