From accf52fa213d4170a782d2e4f98a9efb7c3e14aa Mon Sep 17 00:00:00 2001 From: Laszlo Voros Date: Mon, 5 Aug 2024 11:43:59 +0200 Subject: [PATCH] Improve CI Test with jit-no-reg-alloc Signed-off-by: Laszlo Voros --- .github/workflows/actions.yml | 35 +++++++++++++++++++++++++++++-- test/wasmBenchmarker/benchmark.py | 28 ++++++++++++------------- tools/run-tests.py | 30 +++++++++++++++++++++----- 3 files changed, 72 insertions(+), 21 deletions(-) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index a883ed6f5..48f2685e2 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -89,8 +89,10 @@ jobs: rm $GITHUB_WORKSPACE/test/wasm-spec/core/call_indirect.wast $RUNNER --engine="$GITHUB_WORKSPACE/out/clang/x86/walrus" $RUNNER --jit --engine="$GITHUB_WORKSPACE/out/clang/x64/walrus" - $RUNNER --jit --engine="$GITHUB_WORKSPACE/out/pure/x86/walrus" basic-tests wasm-test-core jit + $RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/clang/x64/walrus" $RUNNER --engine="$GITHUB_WORKSPACE/out/pure/x64/walrus" basic-tests wasm-test-core jit + $RUNNER --jit --engine="$GITHUB_WORKSPACE/out/pure/x86/walrus" basic-tests wasm-test-core jit + $RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/pure/x86/walrus" basic-tests wasm-test-core jit build-test-on-x86: runs-on: ubuntu-latest @@ -112,6 +114,7 @@ jobs: run: | $RUNNER --engine="$GITHUB_WORKSPACE/out/linux/x86/walrus" $RUNNER --jit --engine="$GITHUB_WORKSPACE/out/linux/x86/walrus" + $RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/linux/x86/walrus" build-test-on-x64: runs-on: ubuntu-latest @@ -133,6 +136,7 @@ jobs: run: | $RUNNER --engine="$GITHUB_WORKSPACE/out/linux/x64/walrus" $RUNNER --jit --engine="$GITHUB_WORKSPACE/out/linux/x64/walrus" + $RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/linux/x64/walrus" build-on-x64-with-perf: runs-on: ubuntu-latest @@ -180,8 +184,10 @@ jobs: rm ./test/wasm-spec/core/call_indirect.wast python3 ./tools/run-tests.py --engine="./out/debug/walrus" python3 ./tools/run-tests.py --jit --engine="./out/debug/walrus" + python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/debug/walrus" python3 ./tools/run-tests.py --engine="./out/pure/walrus" basic-tests wasm-test-core jit python3 ./tools/run-tests.py --jit --engine="./out/pure/walrus" basic-tests wasm-test-core jit + python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/pure/walrus" basic-tests wasm-test-core jit build-test-on-aarch64: runs-on: ubuntu-latest @@ -209,8 +215,10 @@ jobs: ninja -Cout/pure python3 ./tools/run-tests.py --engine="./out/release/walrus" python3 ./tools/run-tests.py --jit --engine="./out/release/walrus" + python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/release/walrus" python3 ./tools/run-tests.py --engine="./out/pure/walrus" basic-tests wasm-test-core jit python3 ./tools/run-tests.py --jit --engine="./out/pure/walrus" basic-tests wasm-test-core jit + python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/pure/walrus" basic-tests wasm-test-core jit test-on-windows-x86-x64: runs-on: windows-2022 @@ -272,7 +280,7 @@ jobs: run: | $RUNNER --engine="$GITHUB_WORKSPACE/out/extended/walrus" wasm-test-extended - build-test-performance: + build-test-performance-x64: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -295,6 +303,29 @@ jobs: run: | test/wasmBenchmarker/benchmark.py --engines $GITHUB_WORKSPACE/out/linux/x64/walrus --iterations 2 --verbose --summary --results i j2i n2i j n2j + build-test-performance-x86: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: true + - name: Install Packages + run: | + sudo apt update + sudo apt install -y ninja-build gcc-multilib g++-multilib + sudo pip install pandas + sudo pip install py-markdown-table + sudo pip install tqdm + - name: Build x86 + env: + BUILD_OPTIONS: -DWALRUS_ARCH=x86 -DWALRUS_HOST=linux -DWALRUS_MODE=release -DWALRUS_OUTPUT=shell -GNinja + run: | + cmake -H. -Bout/linux/x86 $BUILD_OPTIONS + ninja -Cout/linux/x86 + - name: Run Tests + run: | + test/wasmBenchmarker/benchmark.py --engines $GITHUB_WORKSPACE/out/linux/x86/walrus --iterations 2 --verbose --summary --results i j2i n2i j n2j + built-test-wasm-c-api: runs-on: ubuntu-latest steps: diff --git a/test/wasmBenchmarker/benchmark.py b/test/wasmBenchmarker/benchmark.py index a2934fa52..10eeb143c 100755 --- a/test/wasmBenchmarker/benchmark.py +++ b/test/wasmBenchmarker/benchmark.py @@ -115,17 +115,17 @@ def parse_args(): return args engine_path = 0 -def engine_display_name(engine): +def engine_display_name(engine): if not engine in engine_map: if engine_path == -2: engine_map[engine] = str(len(engine_map)) - + if engine_path == -1: engine_map[engine] = "" - + if engine_path == 0: engine_map[engine] = engine - + if engine_path > 0: engine_map[engine] = "/".join(engine.split("/")[0-engine_path:]) @@ -143,7 +143,7 @@ def check_programs(engines, verbose): def get_emcc(verbose, system_emcc=True): - emcc_path = None + emcc_path = None if system_emcc and os.system("emcc --version >/dev/null") == 0: if (verbose): print("Emscripten already installed on system") @@ -319,7 +319,7 @@ def run_tests(path, test_names, engines, number_of_runs, mem, no_time, jit, jit_ if sum(time_results[engine["name"]]) < 0: record[engine["name"]] = -1 continue - try: + try: value = (sum(time_results[engine["name"]]) / len(time_results[engine["name"]])) / 1e9 except ZeroDivisionError: value = -1 @@ -332,7 +332,7 @@ def run_tests(path, test_names, engines, number_of_runs, mem, no_time, jit, jit_ if sum(mem_results[engine["name"]]) < 0: record[engine["name"]] = -1 continue - try: + try: value = (sum(mem_results[engine["name"]]) / len(mem_results[engine["name"]])) except ZeroDivisionError: value = -1 @@ -345,7 +345,7 @@ def run_tests(path, test_names, engines, number_of_runs, mem, no_time, jit, jit_ def generate_report(data, summary, file_name=None): if summary: df = pd.DataFrame.from_records(data) - for col in df.columns: + for col in df.columns: if col == "test": continue @@ -353,7 +353,7 @@ def generate_report(data, summary, file_name=None): df[col] = df[col].str.split(' ').str[-1].str[1:-2] df[col] = df[col].astype(float) - + df = df.describe().loc[["mean"]].to_dict('records') df[0]["test"] = "MEAN" separator = [{}] @@ -368,7 +368,7 @@ def generate_report(data, summary, file_name=None): print("\n\n# Engines") for engine, serial in engine_map.items(): print(f"{serial}: {engine}") - + return with open(file_name, "w") as file: if file_name.endswith(".csv"): @@ -388,7 +388,7 @@ def generate_report(data, summary, file_name=None): line += "\n" file.write(line) - + if engine_path == -2: file.write("\n\n# Engines\n") for engine, serial in engine_map.items(): @@ -415,7 +415,7 @@ def compare(data, engines, jit_to_interpreter, jit_no_reg_alloc_to_interpreter, jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"] interpreter = data[i][f"{engine_display_name(engine)} INTERPRETER"] data[i][f"{engine_display_name(engine)} INTERPRETER/JIT_NO_REG_ALLOC"] = f"{jit_no_reg_alloc} ({'{:.2f}'.format(-1 if float(interpreter) < 0 or float(jit_no_reg_alloc) < 0 else float(interpreter) / float(jit_no_reg_alloc))}x)" - + if jit_no_reg_alloc_to_jit: jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"] jit = data[i][f"{engine_display_name(engine)} JIT"] @@ -430,7 +430,7 @@ def compare(data, engines, jit_to_interpreter, jit_no_reg_alloc_to_interpreter, interpreter = data[i][f"{engine_display_name(engine)} INTERPRETER"] jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"] data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC/INTERPRETER"] = f"{interpreter} ({'{:.2f}'.format(-1 if float(jit_no_reg_alloc) < 0 or float(interpreter) < 0 else float(jit_no_reg_alloc) / float(interpreter))}x)" - + if jit_to_jit_no_reg_alloc: jit = data[i][f"{engine_display_name(engine)} JIT"] jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"] @@ -499,6 +499,6 @@ def main(): if len(errorList) > 0: print(errorList) exit(1) - + if __name__ == "__main__": main() diff --git a/tools/run-tests.py b/tools/run-tests.py index 00767f94f..de5227a38 100755 --- a/tools/run-tests.py +++ b/tools/run-tests.py @@ -47,6 +47,7 @@ DEFAULT_RUNNERS = [] JIT_EXCLUDE_FILES = [] jit = False +jit_no_reg_alloc = False class runner(object): @@ -64,12 +65,18 @@ def __call__(self, fn): def _run_wast_tests(engine, files, is_fail): fails = 0 for file in files: - if jit: - filename = os.path.basename(file) + if jit or jit_no_reg_alloc: + filename = os.path.basename(file) if filename in JIT_EXCLUDE_FILES: continue - proc = Popen([engine, "--mapdirs", "./test/wasi", "/var", file], stdout=PIPE) if not jit else Popen([engine, "--mapdirs", "./test/wasi", "/var", "--jit", file], stdout=PIPE) + openParams = [engine, "--mapdirs", "./test/wasi", "/var", file] + if jit: + openParams = [engine, "--mapdirs", "./test/wasi", "/var", "--jit", file] + elif jit_no_reg_alloc: + openParams = [engine, "--mapdirs", "./test/wasi", "/var", "--jit", "--jit-no-reg-alloc", file] + + proc = Popen(openParams, stdout=PIPE) out, _ = proc.communicate() if is_fail and proc.returncode or not is_fail and not proc.returncode: @@ -178,10 +185,18 @@ def main(): parser.add_argument('suite', metavar='SUITE', nargs='*', default=sorted(DEFAULT_RUNNERS), help='test suite to run (%s; default: %s)' % (', '.join(sorted(RUNNERS.keys())), ' '.join(sorted(DEFAULT_RUNNERS)))) parser.add_argument('--jit', action='store_true', help='test with JIT') + parser.add_argument('--no-reg-alloc', action='store_true', help='test with JIT without register allocation') args = parser.parse_args() global jit jit = args.jit - if jit: + + global jit_no_reg_alloc + jit_no_reg_alloc = args.no_reg_alloc + + if jit and jit_no_reg_alloc: + parser.error('jit and no-reg-alloc cannot be used together') + + if jit or jit_no_reg_alloc: exclude_list_file = join(PROJECT_SOURCE_DIR, 'tools', 'jit_exclude_list.txt') with open(exclude_list_file) as f: global JIT_EXCLUDE_FILES @@ -195,7 +210,12 @@ def main(): success, fail = [], [] for suite in args.suite: - print(COLOR_PURPLE + f'running test suite{ " with jit" if jit else ""}: ' + suite + COLOR_RESET) + text = "" + if jit: + text = " with jit" + elif jit_no_reg_alloc: + text = " with jit without register allocation" + print(COLOR_PURPLE + f'running test suite{text}: ' + suite + COLOR_RESET) try: RUNNERS[suite](args.engine) success += [suite]