Skip to content

Commit

Permalink
Merge pull request #796 from CMSgov/QPPSE-2132
Browse files Browse the repository at this point in the history
QPPSE-2132: 2nd Dry Run for PY23 Performance Benchmarks in Prod - 5/6
  • Loading branch information
VenkataChittariIcf authored Jun 5, 2024
2 parents c7f213b + 351cd0f commit 752366b
Show file tree
Hide file tree
Showing 9 changed files with 16,186 additions and 3,593 deletions.
3,388 changes: 2,802 additions & 586 deletions benchmarks/2023.json

Large diffs are not rendered by default.

3,412 changes: 1,032 additions & 2,380 deletions benchmarks/2023/benchmark-exclusion-reasons.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion benchmarks/2023/benchmarks-schema.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,5 +77,5 @@ definitions:
enum: [claims, registry, cmsWebInterface, administrativeClaims, electronicHealthRecord, certifiedSurveyVendor]
averagePerformanceRate:
description: The Average Performance Rate for the Measure and CollectionType.
type: [number, 'null']
type: [number, string, 'null']
required: [measureId, performanceYear, benchmarkYear, submissionMethod, percentiles]
2 changes: 1 addition & 1 deletion scripts/benchmarks/csv-json-converter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ export function convertCsvToJson(csvPath: string, performanceYear: number, jsonF

//populate some default values if they are not found in the csv.
benchmark.isToppedOutByProgram = benchmark.isToppedOutByProgram || false;
benchmark.averagePerformanceRate = benchmark.averagePerformanceRate
benchmark.averagePerformanceRate = (benchmark.averagePerformanceRate != null)
? +benchmark.averagePerformanceRate
: null;

Expand Down
92 changes: 79 additions & 13 deletions scripts/benchmarks/merge-benchmark-files.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,34 +4,100 @@ import appRoot from 'app-root-path';

import { Benchmark } from './benchmarks.types';
import { writeToFile } from './util';
import _ from 'lodash';
import { BENCHMARKS_ORDER } from '../constants';

// command to use this file:
// node ./dist/benchmarks/merge-benchmark-files.js ./util/2023/benchmarks/json/ > ./benchmarks/2023.json
export function mergeBenchmarkFiles(benchmarksPath: string, performanceYear: number) {
let combinedBenchmarks: Benchmark[] = [];
const mergedBenchmarks = new Map();
const mergeConflicts: any[] = [];

const fileNames = fs.readdirSync(path.join(appRoot + '', benchmarksPath));

const benchmarkLayerFiles = fileNames
.sort((left, right) => {
if (left.indexOf('performance-benchmarks.json') > -1) {
return 1;
} else if (right.indexOf('performance-benchmarks.json') > -1) {
return -1;
} else {
return 0;
}
});
// Run through all the files in the directory, pull their data into JSON arrays,
// then add their benchmarks to the final array.
fileNames.forEach(fileName => {
benchmarkLayerFiles.forEach(fileName => {
const jsonFile = JSON.parse(
fs.readFileSync(path.join(appRoot + '', `${benchmarksPath}${fileName}`), 'utf8')
);
//remove the deciles column (which is still sometimes included in 3rd party files)
const isPerformanceBenchmark = fileName.indexOf('performance-benchmarks.json') > -1;

jsonFile.forEach((benchmark: Benchmark) => {
benchmark = Object.assign({}, BENCHMARKS_ORDER, benchmark);
if (isPerformanceBenchmark) {
benchmark = processPerformanceBenchmark(benchmark);
}
const benchmarkKey = getBenchmarkKey(!isPerformanceBenchmark ? benchmark : { ...benchmark, benchmarkYear: benchmark.performanceYear - 2 });

if (mergedBenchmarks.has(benchmarkKey) && !_.isEqual(mergedBenchmarks.get(benchmarkKey), benchmark)) {
if (!isPerformanceBenchmark) {
mergeConflicts.push({
existing: mergedBenchmarks.get(benchmarkKey),
conflicting: benchmark,
conflictingFile: fileName
});
}
} else {
mergedBenchmarks.set(benchmarkKey, benchmark);
}
//remove the deciles column (which is still sometimes included in 3rd party files)
delete benchmark.deciles;
});
combinedBenchmarks.push(...jsonFile);
});

// sort by measureId, then by submissionMethod.
combinedBenchmarks.sort((a, b) =>
a.measureId.localeCompare(b.measureId) ||
a.submissionMethod.localeCompare(b.submissionMethod)
);

writeToFile(combinedBenchmarks, `benchmarks/${performanceYear}.json`);

if (mergeConflicts.length > 0) {
throw new Error('Merge Conflicts: \n' + JSON.stringify(mergeConflicts, null, 2));
} else {
const orderedBenchmarks = _.sortBy([...mergedBenchmarks.values()], ['measureId', 'submissionMethod']);
writeToFile(orderedBenchmarks, `benchmarks/${performanceYear}.json`);
};
};

function processPerformanceBenchmark(benchmark) {
// Determines how to round Performance Benchmarks should be rounded per circumstance and business rules. Should mostly be 2.
const acMeasures2021 = ['479', '480'];
let decimalPlaces: number;
if (acMeasures2021.includes(benchmark.measureId) && benchmark.performanceYear >= 2021) {
decimalPlaces = 4;
} else {
decimalPlaces = 2;
}

Object.entries(benchmark.percentiles).forEach(([key, value]) => {
if (typeof value === 'number') {
benchmark.percentiles[key] = +value.toFixed(decimalPlaces)
}
})

return {
...benchmark,
isToppedOut: false,
isToppedOutByProgram: false,
averagePerformanceRate: (benchmark.averagePerformanceRate != null) ? benchmark.averagePerformanceRate : null
};
};

function getBenchmarkKey(benchmark) {
let benchmarkKey = '';
[ 'measureId', 'benchmarkYear', 'performanceYear', 'submissionMethod' ].forEach((keyName) => {
if (keyName in benchmark) {
benchmarkKey = `${benchmarkKey}${benchmark[keyName]}|`;
} else {
throw new Error('Key is missing: ' + keyName);
}
});

return benchmarkKey;
};

/* c8 ignore next */
Expand Down
Loading

0 comments on commit 752366b

Please sign in to comment.