Skip to content

Commit

Permalink
Merge branch 'e2e/do-not-fail-exceution-if-one-test-failed' into vit-…
Browse files Browse the repository at this point in the history
…51248copy3
  • Loading branch information
mountiny committed Oct 28, 2024
2 parents 652d2ff + d254f8f commit e175136
Show file tree
Hide file tree
Showing 6 changed files with 162 additions and 82 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/e2ePerformanceTests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,36 @@ jobs:
env:
GITHUB_TOKEN: ${{ github.token }}

- name: Check if test has skipped tests
id: checkIfSkippedTestsDetected
run: |
if grep -q '⚠️' "./Host_Machine_Files/\$WORKING_DIRECTORY/output.md"; then
# Create an output to the GH action that the tests were skipped:
echo "skippedTestsDetected=true" >> "$GITHUB_OUTPUT"
else
echo "skippedTestsDetected=false" >> "$GITHUB_OUTPUT"
echo '✅ no skipped tests detected'
fi
env:
GITHUB_TOKEN: ${{ github.token }}

- name: 'Announce skipped tests in Slack'
if: ${{ steps.checkIfSkippedTestsDetected.outputs.skippedTestsDetected == 'true' }}
uses: 8398a7/action-slack@v3
with:
status: custom
custom_payload: |
{
channel: '#e2e-announce',
attachments: [{
color: 'danger',
text: `⚠️ ${process.env.AS_REPO} Some of E2E tests were skipped on <https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}|${{ github.workflow }}> workflow ⚠️`,
}]
}
env:
GITHUB_TOKEN: ${{ github.token }}
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}

- name: 'Announce regression in Slack'
if: ${{ steps.checkIfRegressionDetected.outputs.performanceRegressionDetected == 'true' }}
uses: 8398a7/action-slack@v3
Expand Down
13 changes: 10 additions & 3 deletions tests/e2e/compare/compare.ts
Original file line number Diff line number Diff line change
Expand Up @@ -91,16 +91,23 @@ function compareResults(baselineEntries: Metric | string, compareEntries: Metric
};
}

export default (main: Metric | string, delta: Metric | string, outputFile: string, outputFormat = 'all', metricForTest = {}) => {
type Options = {
outputFile: string;
outputFormat: 'console' | 'markdown' | 'all';
metricForTest: Record<string, Unit>;
skippedTests: string[];
};

export default (main: Metric | string, delta: Metric | string, {outputFile, outputFormat = 'all', metricForTest = {}, skippedTests}: Options) => {
// IMPORTANT NOTE: make sure you are passing the main/baseline results first, then the delta/compare results:
const outputData = compareResults(main, delta, metricForTest);

if (outputFormat === 'console' || outputFormat === 'all') {
printToConsole(outputData);
printToConsole(outputData, skippedTests);
}

if (outputFormat === 'markdown' || outputFormat === 'all') {
return writeToMarkdown(outputFile, outputData);
return writeToMarkdown(outputFile, outputData, skippedTests);
}
};
export {compareResults};
6 changes: 5 additions & 1 deletion tests/e2e/compare/output/console.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ const printRegularLine = (entry: Entry) => {
/**
* Prints the result simply to console.
*/
export default (data: Data) => {
export default (data: Data, skippedTests: string[]) => {
// No need to log errors or warnings as these were be logged on the fly
console.debug('');
console.debug('❇️ Performance comparison results:');
Expand All @@ -38,6 +38,10 @@ export default (data: Data) => {
data.meaningless.forEach(printRegularLine);

console.debug('');

if (skippedTests.length > 0) {
console.debug(`⚠️ Some tests did not pass successfully, so some results are omitted from final report: ${skippedTests.join(', ')}`);
}
};

export type {Data, Entry};
11 changes: 8 additions & 3 deletions tests/e2e/compare/output/markdown.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ const buildSummaryTable = (entries: Entry[], collapse = false) => {
return collapse ? collapsibleSection('Show entries', content) : content;
};

const buildMarkdown = (data: Data) => {
const buildMarkdown = (data: Data, skippedTests: string[]) => {
let result = '## Performance Comparison Report 📊';

if (data.errors?.length) {
Expand All @@ -92,6 +92,10 @@ const buildMarkdown = (data: Data) => {
result += `\n${buildDetailsTable(data.meaningless)}`;
result += '\n';

if (skippedTests.length > 0) {
result += `⚠️ Some tests did not pass successfully, so some results are omitted from final report: ${skippedTests.join(', ')}`;
}

return result;
};

Expand All @@ -109,8 +113,9 @@ const writeToFile = (filePath: string, content: string) =>
throw error;
});

const writeToMarkdown = (filePath: string, data: Data) => {
const markdown = buildMarkdown(data);
const writeToMarkdown = (filePath: string, data: Data, skippedTests: string[]) => {
const markdown = buildMarkdown(data, skippedTests);
Logger.info('Markdown was built successfully, writing to file...', markdown);
return writeToFile(filePath, markdown).catch((error) => {
console.error(error);
throw error;
Expand Down
182 changes: 108 additions & 74 deletions tests/e2e/testRunner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,20 @@ const runTests = async (): Promise<void> => {
}
};

const skippedTests: string[] = [];
const clearTestResults = (test: TestConfig) => {
skippedTests.push(test.name);

Object.keys(results).forEach((branch: string) => {
Object.keys(results[branch]).forEach((metric: string) => {
if (!metric.startsWith(test.name)) {
return;
}
delete results[branch][metric];
});
});
};

// Collect results while tests are being executed
server.addTestResultListener((testResult) => {
const {isCritical = true} = testResult;
Expand Down Expand Up @@ -151,7 +165,7 @@ const runTests = async (): Promise<void> => {
await launchApp('android', appPackage, config.ACTIVITY_PATH, launchArgs);

const {promise, resetTimeout} = withFailTimeout(
new Promise<void>((resolve) => {
new Promise<void>((resolve, reject) => {
const removeListener = server.addTestDoneListener(() => {
Logger.success(iterationText);

Expand Down Expand Up @@ -201,9 +215,14 @@ const runTests = async (): Promise<void> => {
removeListener();
// something went wrong, let's wait a little bit and try again
await sleep(5000);
// simply restart the test
await runTestIteration(appPackage, iterationText, branch, launchArgs);
resolve();
try {
// simply restart the test
await runTestIteration(appPackage, iterationText, branch, launchArgs);
resolve();
} catch (e) {
// okay, give up and throw the exception further
reject(e);
}
},
});
}),
Expand Down Expand Up @@ -244,88 +263,103 @@ const runTests = async (): Promise<void> => {
server.setTestConfig(test as TestConfig);
server.setReadyToAcceptTestResults(false);

const warmupText = `Warmup for test '${test?.name}' [${testIndex + 1}/${tests.length}]`;

// For each warmup we allow the warmup to fail three times before we stop the warmup run:
const errorCountWarmupRef = {
errorCount: 0,
allowedExceptions: 3,
};

// by default we do 2 warmups:
// - first warmup to pass a login flow
// - second warmup to pass an actual flow and cache network requests
const iterations = 2;
for (let i = 0; i < iterations; i++) {
try {
// Warmup the main app:
await runTestIteration(config.MAIN_APP_PACKAGE, `[MAIN] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_MAIN);

// Warmup the delta app:
await runTestIteration(config.DELTA_APP_PACKAGE, `[DELTA] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_DELTA);
} catch (e) {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.error(`Warmup failed with error: ${e}`);

errorCountWarmupRef.errorCount++;
i--; // repeat warmup again

if (errorCountWarmupRef.errorCount === errorCountWarmupRef.allowedExceptions) {
Logger.error("There was an error running the warmup and we've reached the maximum number of allowed exceptions. Stopping the test run.");
throw e;
try {
const warmupText = `Warmup for test '${test?.name}' [${testIndex + 1}/${tests.length}]`;

// For each warmup we allow the warmup to fail three times before we stop the warmup run:
const errorCountWarmupRef = {
errorCount: 0,
allowedExceptions: 3,
};

// by default we do 2 warmups:
// - first warmup to pass a login flow
// - second warmup to pass an actual flow and cache network requests
const iterations = 2;
for (let i = 0; i < iterations; i++) {
try {
// Warmup the main app:
await runTestIteration(config.MAIN_APP_PACKAGE, `[MAIN] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_MAIN);

// Warmup the delta app:
await runTestIteration(config.DELTA_APP_PACKAGE, `[DELTA] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_DELTA);
} catch (e) {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.error(`Warmup failed with error: ${e}`);

MeasureUtils.stop('error-warmup');
server.clearAllTestDoneListeners();

errorCountWarmupRef.errorCount++;
i--; // repeat warmup again

if (errorCountWarmupRef.errorCount === errorCountWarmupRef.allowedExceptions) {
Logger.error("There was an error running the warmup and we've reached the maximum number of allowed exceptions. Stopping the test run.");
throw e;
}
}
}
}

server.setReadyToAcceptTestResults(true);

// For each test case we allow the test to fail three times before we stop the test run:
const errorCountRef = {
errorCount: 0,
allowedExceptions: 3,
};

// We run each test multiple time to average out the results
for (let testIteration = 0; testIteration < config.RUNS; testIteration++) {
const onError = (e: Error) => {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.error(`Unexpected error during test execution: ${e}. `);
MeasureUtils.stop('error');
server.clearAllTestDoneListeners();
errorCountRef.errorCount += 1;
if (testIteration === 0 || errorCountRef.errorCount === errorCountRef.allowedExceptions) {
Logger.error("There was an error running the test and we've reached the maximum number of allowed exceptions. Stopping the test run.");
// If the error happened on the first test run, the test is broken
// and we should not continue running it. Or if we have reached the
// maximum number of allowed exceptions, we should stop the test run.
throw e;
}
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.warn(`There was an error running the test. Continuing the test run. Error: ${e}`);
};
server.setReadyToAcceptTestResults(true);

const launchArgs = {
mockNetwork: true,
// For each test case we allow the test to fail three times before we stop the test run:
const errorCountRef = {
errorCount: 0,
allowedExceptions: 3,
};

const iterationText = `Test '${test?.name}' [${testIndex + 1}/${tests.length}], iteration [${testIteration + 1}/${config.RUNS}]`;
const mainIterationText = `[MAIN] ${iterationText}`;
const deltaIterationText = `[DELTA] ${iterationText}`;
try {
// Run the test on the main app:
await runTestIteration(config.MAIN_APP_PACKAGE, mainIterationText, config.BRANCH_MAIN, launchArgs);

// Run the test on the delta app:
await runTestIteration(config.DELTA_APP_PACKAGE, deltaIterationText, config.BRANCH_DELTA, launchArgs);
} catch (e) {
onError(e as Error);
// We run each test multiple time to average out the results
for (let testIteration = 0; testIteration < config.RUNS; testIteration++) {
const onError = (e: Error) => {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.error(`Unexpected error during test execution: ${e}. `);
MeasureUtils.stop('error');
server.clearAllTestDoneListeners();
errorCountRef.errorCount += 1;
if (testIteration === 0 || errorCountRef.errorCount === errorCountRef.allowedExceptions) {
Logger.error("There was an error running the test and we've reached the maximum number of allowed exceptions. Stopping the test run.");
// If the error happened on the first test run, the test is broken
// and we should not continue running it. Or if we have reached the
// maximum number of allowed exceptions, we should stop the test run.
throw e;
}
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.warn(`There was an error running the test. Continuing the test run. Error: ${e}`);
};

const launchArgs = {
mockNetwork: true,
};

const iterationText = `Test '${test?.name}' [${testIndex + 1}/${tests.length}], iteration [${testIteration + 1}/${config.RUNS}]`;
const mainIterationText = `[MAIN] ${iterationText}`;
const deltaIterationText = `[DELTA] ${iterationText}`;
try {
// Run the test on the main app:
await runTestIteration(config.MAIN_APP_PACKAGE, mainIterationText, config.BRANCH_MAIN, launchArgs);

// Run the test on the delta app:
await runTestIteration(config.DELTA_APP_PACKAGE, deltaIterationText, config.BRANCH_DELTA, launchArgs);
} catch (e) {
onError(e as Error);
}
}
} catch (exception) {
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
Logger.warn(`Test ${test?.name} can not be finished due to error: ${exception}`);
clearTestResults(test as TestConfig);
}
}

// Calculate statistics and write them to our work file
Logger.info('Calculating statics and writing results');
compare(results.main, results.delta, `${config.OUTPUT_DIR}/output.md`, 'all', metricForTest);
await compare(results.main, results.delta, {
outputFile: `${config.OUTPUT_DIR}/output.md`,
outputFormat: 'all',
metricForTest,
skippedTests,
});
Logger.info('Finished calculating statics and writing results, stopping the test server');

await server.stop();
};
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/E2EMarkdownTest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@ const results = {
describe('markdown formatter', () => {
it('should format significant changes properly', () => {
const data = compareResults(results.main, results.delta, {commentLinking: 'ms'});
expect(buildMarkdown(data)).toMatchSnapshot();
expect(buildMarkdown(data, [])).toMatchSnapshot();
});
});

0 comments on commit e175136

Please sign in to comment.