diff --git a/.github/workflows/e2ePerformanceTests.yml b/.github/workflows/e2ePerformanceTests.yml index c92ab83d1178..ea84be58e145 100644 --- a/.github/workflows/e2ePerformanceTests.yml +++ b/.github/workflows/e2ePerformanceTests.yml @@ -229,6 +229,36 @@ jobs: env: GITHUB_TOKEN: ${{ github.token }} + - name: Check if test has skipped tests + id: checkIfSkippedTestsDetected + run: | + if grep -q '⚠️' "./Host_Machine_Files/\$WORKING_DIRECTORY/output.md"; then + # Create an output to the GH action that the tests were skipped: + echo "skippedTestsDetected=true" >> "$GITHUB_OUTPUT" + else + echo "skippedTestsDetected=false" >> "$GITHUB_OUTPUT" + echo '✅ no skipped tests detected' + fi + env: + GITHUB_TOKEN: ${{ github.token }} + + - name: 'Announce skipped tests in Slack' + if: ${{ steps.checkIfSkippedTestsDetected.outputs.skippedTestsDetected == 'true' }} + uses: 8398a7/action-slack@v3 + with: + status: custom + custom_payload: | + { + channel: '#e2e-announce', + attachments: [{ + color: 'danger', + text: `⚠️ ${process.env.AS_REPO} Some of E2E tests were skipped on workflow ⚠️`, + }] + } + env: + GITHUB_TOKEN: ${{ github.token }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} + - name: 'Announce regression in Slack' if: ${{ steps.checkIfRegressionDetected.outputs.performanceRegressionDetected == 'true' }} uses: 8398a7/action-slack@v3 diff --git a/tests/e2e/compare/compare.ts b/tests/e2e/compare/compare.ts index 40a728545668..ad38c249bff3 100644 --- a/tests/e2e/compare/compare.ts +++ b/tests/e2e/compare/compare.ts @@ -91,16 +91,23 @@ function compareResults(baselineEntries: Metric | string, compareEntries: Metric }; } -export default (main: Metric | string, delta: Metric | string, outputFile: string, outputFormat = 'all', metricForTest = {}) => { +type Options = { + outputFile: string; + outputFormat: 'console' | 'markdown' | 'all'; + metricForTest: Record; + skippedTests: string[]; +}; + +export default (main: Metric | string, delta: Metric | string, {outputFile, outputFormat = 'all', metricForTest = {}, skippedTests}: Options) => { // IMPORTANT NOTE: make sure you are passing the main/baseline results first, then the delta/compare results: const outputData = compareResults(main, delta, metricForTest); if (outputFormat === 'console' || outputFormat === 'all') { - printToConsole(outputData); + printToConsole(outputData, skippedTests); } if (outputFormat === 'markdown' || outputFormat === 'all') { - return writeToMarkdown(outputFile, outputData); + return writeToMarkdown(outputFile, outputData, skippedTests); } }; export {compareResults}; diff --git a/tests/e2e/compare/output/console.ts b/tests/e2e/compare/output/console.ts index 41ae5a4e0ccf..2e303ffa1538 100644 --- a/tests/e2e/compare/output/console.ts +++ b/tests/e2e/compare/output/console.ts @@ -26,7 +26,7 @@ const printRegularLine = (entry: Entry) => { /** * Prints the result simply to console. */ -export default (data: Data) => { +export default (data: Data, skippedTests: string[]) => { // No need to log errors or warnings as these were be logged on the fly console.debug(''); console.debug('❇️ Performance comparison results:'); @@ -38,6 +38,10 @@ export default (data: Data) => { data.meaningless.forEach(printRegularLine); console.debug(''); + + if (skippedTests.length > 0) { + console.debug(`⚠️ Some tests did not pass successfully, so some results are omitted from final report: ${skippedTests.join(', ')}`); + } }; export type {Data, Entry}; diff --git a/tests/e2e/compare/output/markdown.ts b/tests/e2e/compare/output/markdown.ts index 32af6c5e22ad..bd32d2d99ab2 100644 --- a/tests/e2e/compare/output/markdown.ts +++ b/tests/e2e/compare/output/markdown.ts @@ -67,7 +67,7 @@ const buildSummaryTable = (entries: Entry[], collapse = false) => { return collapse ? collapsibleSection('Show entries', content) : content; }; -const buildMarkdown = (data: Data) => { +const buildMarkdown = (data: Data, skippedTests: string[]) => { let result = '## Performance Comparison Report 📊'; if (data.errors?.length) { @@ -92,6 +92,10 @@ const buildMarkdown = (data: Data) => { result += `\n${buildDetailsTable(data.meaningless)}`; result += '\n'; + if (skippedTests.length > 0) { + result += `⚠️ Some tests did not pass successfully, so some results are omitted from final report: ${skippedTests.join(', ')}`; + } + return result; }; @@ -109,8 +113,9 @@ const writeToFile = (filePath: string, content: string) => throw error; }); -const writeToMarkdown = (filePath: string, data: Data) => { - const markdown = buildMarkdown(data); +const writeToMarkdown = (filePath: string, data: Data, skippedTests: string[]) => { + const markdown = buildMarkdown(data, skippedTests); + Logger.info('Markdown was built successfully, writing to file...', markdown); return writeToFile(filePath, markdown).catch((error) => { console.error(error); throw error; diff --git a/tests/e2e/testRunner.ts b/tests/e2e/testRunner.ts index 58fb6b9cdae1..5485385ad8c9 100644 --- a/tests/e2e/testRunner.ts +++ b/tests/e2e/testRunner.ts @@ -123,6 +123,20 @@ const runTests = async (): Promise => { } }; + const skippedTests: string[] = []; + const clearTestResults = (test: TestConfig) => { + skippedTests.push(test.name); + + Object.keys(results).forEach((branch: string) => { + Object.keys(results[branch]).forEach((metric: string) => { + if (!metric.startsWith(test.name)) { + return; + } + delete results[branch][metric]; + }); + }); + }; + // Collect results while tests are being executed server.addTestResultListener((testResult) => { const {isCritical = true} = testResult; @@ -151,7 +165,7 @@ const runTests = async (): Promise => { await launchApp('android', appPackage, config.ACTIVITY_PATH, launchArgs); const {promise, resetTimeout} = withFailTimeout( - new Promise((resolve) => { + new Promise((resolve, reject) => { const removeListener = server.addTestDoneListener(() => { Logger.success(iterationText); @@ -201,9 +215,14 @@ const runTests = async (): Promise => { removeListener(); // something went wrong, let's wait a little bit and try again await sleep(5000); - // simply restart the test - await runTestIteration(appPackage, iterationText, branch, launchArgs); - resolve(); + try { + // simply restart the test + await runTestIteration(appPackage, iterationText, branch, launchArgs); + resolve(); + } catch (e) { + // okay, give up and throw the exception further + reject(e); + } }, }); }), @@ -244,88 +263,103 @@ const runTests = async (): Promise => { server.setTestConfig(test as TestConfig); server.setReadyToAcceptTestResults(false); - const warmupText = `Warmup for test '${test?.name}' [${testIndex + 1}/${tests.length}]`; - - // For each warmup we allow the warmup to fail three times before we stop the warmup run: - const errorCountWarmupRef = { - errorCount: 0, - allowedExceptions: 3, - }; - - // by default we do 2 warmups: - // - first warmup to pass a login flow - // - second warmup to pass an actual flow and cache network requests - const iterations = 2; - for (let i = 0; i < iterations; i++) { - try { - // Warmup the main app: - await runTestIteration(config.MAIN_APP_PACKAGE, `[MAIN] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_MAIN); - - // Warmup the delta app: - await runTestIteration(config.DELTA_APP_PACKAGE, `[DELTA] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_DELTA); - } catch (e) { - // eslint-disable-next-line @typescript-eslint/restrict-template-expressions - Logger.error(`Warmup failed with error: ${e}`); - - errorCountWarmupRef.errorCount++; - i--; // repeat warmup again - - if (errorCountWarmupRef.errorCount === errorCountWarmupRef.allowedExceptions) { - Logger.error("There was an error running the warmup and we've reached the maximum number of allowed exceptions. Stopping the test run."); - throw e; + try { + const warmupText = `Warmup for test '${test?.name}' [${testIndex + 1}/${tests.length}]`; + + // For each warmup we allow the warmup to fail three times before we stop the warmup run: + const errorCountWarmupRef = { + errorCount: 0, + allowedExceptions: 3, + }; + + // by default we do 2 warmups: + // - first warmup to pass a login flow + // - second warmup to pass an actual flow and cache network requests + const iterations = 2; + for (let i = 0; i < iterations; i++) { + try { + // Warmup the main app: + await runTestIteration(config.MAIN_APP_PACKAGE, `[MAIN] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_MAIN); + + // Warmup the delta app: + await runTestIteration(config.DELTA_APP_PACKAGE, `[DELTA] ${warmupText}. Iteration ${i + 1}/${iterations}`, config.BRANCH_DELTA); + } catch (e) { + // eslint-disable-next-line @typescript-eslint/restrict-template-expressions + Logger.error(`Warmup failed with error: ${e}`); + + MeasureUtils.stop('error-warmup'); + server.clearAllTestDoneListeners(); + + errorCountWarmupRef.errorCount++; + i--; // repeat warmup again + + if (errorCountWarmupRef.errorCount === errorCountWarmupRef.allowedExceptions) { + Logger.error("There was an error running the warmup and we've reached the maximum number of allowed exceptions. Stopping the test run."); + throw e; + } } } - } - server.setReadyToAcceptTestResults(true); - - // For each test case we allow the test to fail three times before we stop the test run: - const errorCountRef = { - errorCount: 0, - allowedExceptions: 3, - }; - - // We run each test multiple time to average out the results - for (let testIteration = 0; testIteration < config.RUNS; testIteration++) { - const onError = (e: Error) => { - // eslint-disable-next-line @typescript-eslint/restrict-template-expressions - Logger.error(`Unexpected error during test execution: ${e}. `); - MeasureUtils.stop('error'); - server.clearAllTestDoneListeners(); - errorCountRef.errorCount += 1; - if (testIteration === 0 || errorCountRef.errorCount === errorCountRef.allowedExceptions) { - Logger.error("There was an error running the test and we've reached the maximum number of allowed exceptions. Stopping the test run."); - // If the error happened on the first test run, the test is broken - // and we should not continue running it. Or if we have reached the - // maximum number of allowed exceptions, we should stop the test run. - throw e; - } - // eslint-disable-next-line @typescript-eslint/restrict-template-expressions - Logger.warn(`There was an error running the test. Continuing the test run. Error: ${e}`); - }; + server.setReadyToAcceptTestResults(true); - const launchArgs = { - mockNetwork: true, + // For each test case we allow the test to fail three times before we stop the test run: + const errorCountRef = { + errorCount: 0, + allowedExceptions: 3, }; - const iterationText = `Test '${test?.name}' [${testIndex + 1}/${tests.length}], iteration [${testIteration + 1}/${config.RUNS}]`; - const mainIterationText = `[MAIN] ${iterationText}`; - const deltaIterationText = `[DELTA] ${iterationText}`; - try { - // Run the test on the main app: - await runTestIteration(config.MAIN_APP_PACKAGE, mainIterationText, config.BRANCH_MAIN, launchArgs); - - // Run the test on the delta app: - await runTestIteration(config.DELTA_APP_PACKAGE, deltaIterationText, config.BRANCH_DELTA, launchArgs); - } catch (e) { - onError(e as Error); + // We run each test multiple time to average out the results + for (let testIteration = 0; testIteration < config.RUNS; testIteration++) { + const onError = (e: Error) => { + // eslint-disable-next-line @typescript-eslint/restrict-template-expressions + Logger.error(`Unexpected error during test execution: ${e}. `); + MeasureUtils.stop('error'); + server.clearAllTestDoneListeners(); + errorCountRef.errorCount += 1; + if (testIteration === 0 || errorCountRef.errorCount === errorCountRef.allowedExceptions) { + Logger.error("There was an error running the test and we've reached the maximum number of allowed exceptions. Stopping the test run."); + // If the error happened on the first test run, the test is broken + // and we should not continue running it. Or if we have reached the + // maximum number of allowed exceptions, we should stop the test run. + throw e; + } + // eslint-disable-next-line @typescript-eslint/restrict-template-expressions + Logger.warn(`There was an error running the test. Continuing the test run. Error: ${e}`); + }; + + const launchArgs = { + mockNetwork: true, + }; + + const iterationText = `Test '${test?.name}' [${testIndex + 1}/${tests.length}], iteration [${testIteration + 1}/${config.RUNS}]`; + const mainIterationText = `[MAIN] ${iterationText}`; + const deltaIterationText = `[DELTA] ${iterationText}`; + try { + // Run the test on the main app: + await runTestIteration(config.MAIN_APP_PACKAGE, mainIterationText, config.BRANCH_MAIN, launchArgs); + + // Run the test on the delta app: + await runTestIteration(config.DELTA_APP_PACKAGE, deltaIterationText, config.BRANCH_DELTA, launchArgs); + } catch (e) { + onError(e as Error); + } } + } catch (exception) { + // eslint-disable-next-line @typescript-eslint/restrict-template-expressions + Logger.warn(`Test ${test?.name} can not be finished due to error: ${exception}`); + clearTestResults(test as TestConfig); } } // Calculate statistics and write them to our work file Logger.info('Calculating statics and writing results'); - compare(results.main, results.delta, `${config.OUTPUT_DIR}/output.md`, 'all', metricForTest); + await compare(results.main, results.delta, { + outputFile: `${config.OUTPUT_DIR}/output.md`, + outputFormat: 'all', + metricForTest, + skippedTests, + }); + Logger.info('Finished calculating statics and writing results, stopping the test server'); await server.stop(); }; diff --git a/tests/unit/E2EMarkdownTest.ts b/tests/unit/E2EMarkdownTest.ts index 74c5659c9487..766ec708f31b 100644 --- a/tests/unit/E2EMarkdownTest.ts +++ b/tests/unit/E2EMarkdownTest.ts @@ -13,6 +13,6 @@ const results = { describe('markdown formatter', () => { it('should format significant changes properly', () => { const data = compareResults(results.main, results.delta, {commentLinking: 'ms'}); - expect(buildMarkdown(data)).toMatchSnapshot(); + expect(buildMarkdown(data, [])).toMatchSnapshot(); }); });