Skip to content

Commit

Permalink
Improve CI
Browse files Browse the repository at this point in the history
Test with jit-no-reg-alloc

Signed-off-by: Laszlo Voros <vorosl@inf.u-szeged.hu>
  • Loading branch information
vorosl committed Aug 5, 2024
1 parent 7800a31 commit accf52f
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 21 deletions.
35 changes: 33 additions & 2 deletions .github/workflows/actions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,10 @@ jobs:
rm $GITHUB_WORKSPACE/test/wasm-spec/core/call_indirect.wast
$RUNNER --engine="$GITHUB_WORKSPACE/out/clang/x86/walrus"
$RUNNER --jit --engine="$GITHUB_WORKSPACE/out/clang/x64/walrus"
$RUNNER --jit --engine="$GITHUB_WORKSPACE/out/pure/x86/walrus" basic-tests wasm-test-core jit
$RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/clang/x64/walrus"
$RUNNER --engine="$GITHUB_WORKSPACE/out/pure/x64/walrus" basic-tests wasm-test-core jit
$RUNNER --jit --engine="$GITHUB_WORKSPACE/out/pure/x86/walrus" basic-tests wasm-test-core jit
$RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/pure/x86/walrus" basic-tests wasm-test-core jit
build-test-on-x86:
runs-on: ubuntu-latest
Expand All @@ -112,6 +114,7 @@ jobs:
run: |
$RUNNER --engine="$GITHUB_WORKSPACE/out/linux/x86/walrus"
$RUNNER --jit --engine="$GITHUB_WORKSPACE/out/linux/x86/walrus"
$RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/linux/x86/walrus"
build-test-on-x64:
runs-on: ubuntu-latest
Expand All @@ -133,6 +136,7 @@ jobs:
run: |
$RUNNER --engine="$GITHUB_WORKSPACE/out/linux/x64/walrus"
$RUNNER --jit --engine="$GITHUB_WORKSPACE/out/linux/x64/walrus"
$RUNNER --no-reg-alloc --engine="$GITHUB_WORKSPACE/out/linux/x64/walrus"
build-on-x64-with-perf:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -180,8 +184,10 @@ jobs:
rm ./test/wasm-spec/core/call_indirect.wast
python3 ./tools/run-tests.py --engine="./out/debug/walrus"
python3 ./tools/run-tests.py --jit --engine="./out/debug/walrus"
python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/debug/walrus"
python3 ./tools/run-tests.py --engine="./out/pure/walrus" basic-tests wasm-test-core jit
python3 ./tools/run-tests.py --jit --engine="./out/pure/walrus" basic-tests wasm-test-core jit
python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/pure/walrus" basic-tests wasm-test-core jit
build-test-on-aarch64:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -209,8 +215,10 @@ jobs:
ninja -Cout/pure
python3 ./tools/run-tests.py --engine="./out/release/walrus"
python3 ./tools/run-tests.py --jit --engine="./out/release/walrus"
python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/release/walrus"
python3 ./tools/run-tests.py --engine="./out/pure/walrus" basic-tests wasm-test-core jit
python3 ./tools/run-tests.py --jit --engine="./out/pure/walrus" basic-tests wasm-test-core jit
python3 ./tools/run-tests.py --no-reg-alloc --engine="./out/pure/walrus" basic-tests wasm-test-core jit
test-on-windows-x86-x64:
runs-on: windows-2022
Expand Down Expand Up @@ -272,7 +280,7 @@ jobs:
run: |
$RUNNER --engine="$GITHUB_WORKSPACE/out/extended/walrus" wasm-test-extended
build-test-performance:
build-test-performance-x64:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
Expand All @@ -295,6 +303,29 @@ jobs:
run: |
test/wasmBenchmarker/benchmark.py --engines $GITHUB_WORKSPACE/out/linux/x64/walrus --iterations 2 --verbose --summary --results i j2i n2i j n2j
build-test-performance-x86:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Packages
run: |
sudo apt update
sudo apt install -y ninja-build gcc-multilib g++-multilib
sudo pip install pandas
sudo pip install py-markdown-table
sudo pip install tqdm
- name: Build x86
env:
BUILD_OPTIONS: -DWALRUS_ARCH=x86 -DWALRUS_HOST=linux -DWALRUS_MODE=release -DWALRUS_OUTPUT=shell -GNinja
run: |
cmake -H. -Bout/linux/x86 $BUILD_OPTIONS
ninja -Cout/linux/x86
- name: Run Tests
run: |
test/wasmBenchmarker/benchmark.py --engines $GITHUB_WORKSPACE/out/linux/x86/walrus --iterations 2 --verbose --summary --results i j2i n2i j n2j
built-test-wasm-c-api:
runs-on: ubuntu-latest
steps:
Expand Down
28 changes: 14 additions & 14 deletions test/wasmBenchmarker/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,17 +115,17 @@ def parse_args():
return args

engine_path = 0
def engine_display_name(engine):
def engine_display_name(engine):
if not engine in engine_map:
if engine_path == -2:
engine_map[engine] = str(len(engine_map))

if engine_path == -1:
engine_map[engine] = ""

if engine_path == 0:
engine_map[engine] = engine

if engine_path > 0:
engine_map[engine] = "/".join(engine.split("/")[0-engine_path:])

Expand All @@ -143,7 +143,7 @@ def check_programs(engines, verbose):


def get_emcc(verbose, system_emcc=True):
emcc_path = None
emcc_path = None

if system_emcc and os.system("emcc --version >/dev/null") == 0:
if (verbose): print("Emscripten already installed on system")
Expand Down Expand Up @@ -319,7 +319,7 @@ def run_tests(path, test_names, engines, number_of_runs, mem, no_time, jit, jit_
if sum(time_results[engine["name"]]) < 0:
record[engine["name"]] = -1
continue
try:
try:
value = (sum(time_results[engine["name"]]) / len(time_results[engine["name"]])) / 1e9
except ZeroDivisionError:
value = -1
Expand All @@ -332,7 +332,7 @@ def run_tests(path, test_names, engines, number_of_runs, mem, no_time, jit, jit_
if sum(mem_results[engine["name"]]) < 0:
record[engine["name"]] = -1
continue
try:
try:
value = (sum(mem_results[engine["name"]]) / len(mem_results[engine["name"]]))
except ZeroDivisionError:
value = -1
Expand All @@ -345,15 +345,15 @@ def run_tests(path, test_names, engines, number_of_runs, mem, no_time, jit, jit_
def generate_report(data, summary, file_name=None):
if summary:
df = pd.DataFrame.from_records(data)
for col in df.columns:
for col in df.columns:
if col == "test":
continue

if "/" in col.split(' ')[-1]:
df[col] = df[col].str.split(' ').str[-1].str[1:-2]

df[col] = df[col].astype(float)

df = df.describe().loc[["mean"]].to_dict('records')
df[0]["test"] = "MEAN"
separator = [{}]
Expand All @@ -368,7 +368,7 @@ def generate_report(data, summary, file_name=None):
print("\n\n# Engines")
for engine, serial in engine_map.items():
print(f"{serial}: {engine}")

return
with open(file_name, "w") as file:
if file_name.endswith(".csv"):
Expand All @@ -388,7 +388,7 @@ def generate_report(data, summary, file_name=None):

line += "\n"
file.write(line)

if engine_path == -2:
file.write("\n\n# Engines\n")
for engine, serial in engine_map.items():
Expand All @@ -415,7 +415,7 @@ def compare(data, engines, jit_to_interpreter, jit_no_reg_alloc_to_interpreter,
jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"]
interpreter = data[i][f"{engine_display_name(engine)} INTERPRETER"]
data[i][f"{engine_display_name(engine)} INTERPRETER/JIT_NO_REG_ALLOC"] = f"{jit_no_reg_alloc} ({'{:.2f}'.format(-1 if float(interpreter) < 0 or float(jit_no_reg_alloc) < 0 else float(interpreter) / float(jit_no_reg_alloc))}x)"

if jit_no_reg_alloc_to_jit:
jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"]
jit = data[i][f"{engine_display_name(engine)} JIT"]
Expand All @@ -430,7 +430,7 @@ def compare(data, engines, jit_to_interpreter, jit_no_reg_alloc_to_interpreter,
interpreter = data[i][f"{engine_display_name(engine)} INTERPRETER"]
jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"]
data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC/INTERPRETER"] = f"{interpreter} ({'{:.2f}'.format(-1 if float(jit_no_reg_alloc) < 0 or float(interpreter) < 0 else float(jit_no_reg_alloc) / float(interpreter))}x)"

if jit_to_jit_no_reg_alloc:
jit = data[i][f"{engine_display_name(engine)} JIT"]
jit_no_reg_alloc = data[i][f"{engine_display_name(engine)} JIT_NO_REG_ALLOC"]
Expand Down Expand Up @@ -499,6 +499,6 @@ def main():
if len(errorList) > 0:
print(errorList)
exit(1)

if __name__ == "__main__":
main()
30 changes: 25 additions & 5 deletions tools/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
DEFAULT_RUNNERS = []
JIT_EXCLUDE_FILES = []
jit = False
jit_no_reg_alloc = False


class runner(object):
Expand All @@ -64,12 +65,18 @@ def __call__(self, fn):
def _run_wast_tests(engine, files, is_fail):
fails = 0
for file in files:
if jit:
filename = os.path.basename(file)
if jit or jit_no_reg_alloc:
filename = os.path.basename(file)
if filename in JIT_EXCLUDE_FILES:
continue

proc = Popen([engine, "--mapdirs", "./test/wasi", "/var", file], stdout=PIPE) if not jit else Popen([engine, "--mapdirs", "./test/wasi", "/var", "--jit", file], stdout=PIPE)
openParams = [engine, "--mapdirs", "./test/wasi", "/var", file]
if jit:
openParams = [engine, "--mapdirs", "./test/wasi", "/var", "--jit", file]
elif jit_no_reg_alloc:
openParams = [engine, "--mapdirs", "./test/wasi", "/var", "--jit", "--jit-no-reg-alloc", file]

proc = Popen(openParams, stdout=PIPE)
out, _ = proc.communicate()

if is_fail and proc.returncode or not is_fail and not proc.returncode:
Expand Down Expand Up @@ -178,10 +185,18 @@ def main():
parser.add_argument('suite', metavar='SUITE', nargs='*', default=sorted(DEFAULT_RUNNERS),
help='test suite to run (%s; default: %s)' % (', '.join(sorted(RUNNERS.keys())), ' '.join(sorted(DEFAULT_RUNNERS))))
parser.add_argument('--jit', action='store_true', help='test with JIT')
parser.add_argument('--no-reg-alloc', action='store_true', help='test with JIT without register allocation')
args = parser.parse_args()
global jit
jit = args.jit
if jit:

global jit_no_reg_alloc
jit_no_reg_alloc = args.no_reg_alloc

if jit and jit_no_reg_alloc:
parser.error('jit and no-reg-alloc cannot be used together')

if jit or jit_no_reg_alloc:
exclude_list_file = join(PROJECT_SOURCE_DIR, 'tools', 'jit_exclude_list.txt')
with open(exclude_list_file) as f:
global JIT_EXCLUDE_FILES
Expand All @@ -195,7 +210,12 @@ def main():
success, fail = [], []

for suite in args.suite:
print(COLOR_PURPLE + f'running test suite{ " with jit" if jit else ""}: ' + suite + COLOR_RESET)
text = ""
if jit:
text = " with jit"
elif jit_no_reg_alloc:
text = " with jit without register allocation"
print(COLOR_PURPLE + f'running test suite{text}: ' + suite + COLOR_RESET)
try:
RUNNERS[suite](args.engine)
success += [suite]
Expand Down

0 comments on commit accf52f

Please sign in to comment.