diff --git a/experiments/regression_exp/commands b/experiments/regression_exp/commands index fa53d52..61fc1e0 100644 --- a/experiments/regression_exp/commands +++ b/experiments/regression_exp/commands @@ -3,4 +3,11 @@ python experiments/regression_exp/sweep_regression_exp.py --data_source pendulum python experiments/regression_exp/sweep_regression_exp.py --data_source pendulum --exp_name may15 --model BNN_FSVGD_SimPrior_gp --num_hparam_samples 30 --yes && python experiments/regression_exp/sweep_regression_exp.py --data_source pendulum --exp_name may15 --model BNN_FSVGD_SimPrior_ssge --num_hparam_samples 30 --yes && python experiments/regression_exp/sweep_regression_exp.py --data_source pendulum --exp_name may15 --model BNN_FSVGD_SimPrior_kde --num_hparam_samples 20 --yes && -python experiments/regression_exp/sweep_regression_exp.py --data_source pendulum --exp_name may15 --model BNN_FSVGD_SimPrior_nu-method --num_hparam_samples 30 --gpu --yes --num_cpus 4 \ No newline at end of file +python experiments/regression_exp/sweep_regression_exp.py --data_source pendulum --exp_name may15 --model BNN_FSVGD_SimPrior_nu-method --num_hparam_samples 30 --gpu --yes --num_cpus 4 + + +python experiments/regression_exp/sweep_regression_exp.py --data_source racecar --exp_name jan09 --model BNN_SVGD --num_hparam_samples 10 --pred_diff 1 --yes && +python experiments/regression_exp/sweep_regression_exp.py --data_source racecar --exp_name jan09 --model BNN_FSVGD --num_hparam_samples 20 --pred_diff 1 --yes && +python experiments/regression_exp/sweep_regression_exp.py --data_source racecar --exp_name jan08 --model BNN_FSVGD_SimPrior_gp --num_hparam_samples 20 --yes + +python experiments/regression_exp/sweep_regression_exp.py --data_source racecar --exp_name jan09 --model BNN_FSVGD_SimPrior_kde --pred_diff 1 --num_hparam_samples 20 -gpu --yes --num_cpus 4 \ No newline at end of file diff --git a/experiments/regression_exp/commands_num_data b/experiments/regression_exp/commands_num_data new file mode 100644 index 0000000..79974e5 --- /dev/null +++ b/experiments/regression_exp/commands_num_data @@ -0,0 +1,34 @@ +# racecar (fixed likelihood) +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model BNN_FSVGD_SimPrior_gp --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model BNN_FSVGD_SimPrior_nu-method --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model BNN_FSVGD_SimPrior_kde --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model BNN_FSVGD --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model BNN_SVGD --yes --num_cpus 4 && +python experiments/meta_learning_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model PACOH --yes --num_cpus 4 +python experiments/meta_learning_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source pendulum --learn_likelihood_std 0 --model NP --yes --num_cpus 4 --gpu --long + +# racecar (learned likelihood) +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source pendulum --learn_likelihood_std 1 --model BNN_FSVGD_SimPrior_gp --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source pendulum --learn_likelihood_std 1 --model BNN_FSVGD_SimPrior_nu-method --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source pendulum --learn_likelihood_std 1 --model BNN_FSVGD_SimPrior_kde --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source pendulum --learn_likelihood_std 1 --model BNN_FSVGD --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source pendulum --learn_likelihood_std 1 --model BNN_SVGD --gpu --yes --num_cpus 4 + + + +# racecar (fixed likelihood) +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model BNN_FSVGD_SimPrior_gp --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model BNN_FSVGD_SimPrior_nu-method --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model BNN_FSVGD_SimPrior_kde --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model BNN_FSVGD --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model BNN_SVGD --yes --num_cpus 4 && +python experiments/meta_learning_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model PACOH --yes --num_cpus 4 +python experiments/meta_learning_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data --data_source racecar --learn_likelihood_std 0 --model NP --yes --num_cpus 4 --gpu --long + + +# racecar (learned likelihood) +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source racecar --learn_likelihood_std 1 --model BNN_FSVGD_SimPrior_gp --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source racecar --learn_likelihood_std 1 --model BNN_FSVGD_SimPrior_nu-method --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source racecar --learn_likelihood_std 1 --model BNN_FSVGD_SimPrior_kde --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source racecar --learn_likelihood_std 1 --model BNN_FSVGD --gpu --yes --num_cpus 4 && +python experiments/regression_exp/sweep_regression_exp_num_data.py --exp_name jan10_num_data_std --data_source racecar --learn_likelihood_std 1 --model BNN_SVGD --gpu --yes --num_cpus 4 \ No newline at end of file diff --git a/experiments/regression_exp/plots_num_data.py b/experiments/regression_exp/plots_num_data.py new file mode 100644 index 0000000..ed6ffc9 --- /dev/null +++ b/experiments/regression_exp/plots_num_data.py @@ -0,0 +1,125 @@ +import pandas as pd +import numpy as np +import argparse + +from typing import Tuple +from matplotlib import pyplot as plt +from experiments.util import collect_exp_results, ucb, lcb, median, count +import math + + +def different_method_plot(df_agg: pd.DataFrame, metric: str = 'nll', display: bool = True, + filter_std_higher_than: float = math.inf) \ + -> Tuple[pd.DataFrame, plt.Figure]: + models = set(df_agg['model']) + + best_rows = [] + for model in models: + df_agg_model = df_agg.loc[df_agg['model'] == model] + df_agg_model = df_agg_model[df_agg_model[(metric, 'std')] < filter_std_higher_than] + df_agg_model.sort_values(by=(metric, 'mean'), ascending=True, inplace=True) + best_rows.append(df_agg_model.iloc[0]) + + best_rows_df = pd.DataFrame(best_rows) + + fig, ax = plt.subplots() + x_pos = 2 * np.arange(len(models)) + ax.bar(x_pos, best_rows_df[(metric, 'mean')], color='#ADD8E6') + ax.errorbar(x_pos, best_rows_df[(metric, 'mean')], yerr=best_rows_df[(metric, 'std')], + linestyle='', capsize=4., color='black') + ax.set_xticks(x_pos) + ax.set_xticklabels(best_rows_df['model'], rotation=-20) + ax.set_ylabel(metric) + fig.tight_layout() + + if display: + plt.show() + print(best_rows_df[[('model', ''), ('nll', 'mean'), ('nll', 'std'), + ('rmse', 'mean'), ('rmse', 'std'), ('dirname', '')]].to_string()) + + return best_rows_df, fig + + +def main(args, drop_nan=False): + df_full, param_names = collect_exp_results(exp_name=args.exp_name) + df_full = df_full[df_full['data_source'] == args.data_source] + #df_full = df_full[df_full['model'] == args.data_source] + + df_full = df_full[df_full['num_samples_train'] >= 10] + + for col in ['bandwidth_kde', 'bandwidth_ssge', 'bandwidth_score_estim']: + if col in df_full.columns: + df_full[col] = df_full[col].fillna(value='auto') + + if drop_nan: + result_is_nan = df_full['nll'].isna() + df_full_dropped = df_full[result_is_nan] + print(f'Dropped results due to NaNs: {df_full_dropped.shape[0]}') + df_full = df_full[~result_is_nan] + else: + # replace nan with bad values + for col, noise_std in [('nll', 1000.), ('rmse', 5.)]: + series = df_full[col] + max_value = series.max() + is_nan = series.isna() + noise = np.random.normal(0, noise_std, size=is_nan.sum()) # Adjust mean and stddev as needed + series[is_nan] = max_value + noise + df_full[col] = series + + + # group over everything except seeds and aggregate over the seeds + groupby_names = list(set(param_names) - {'model_seed', 'data_seed'}) + + # rmove the likelihood_std column since it's a constant list which is not hashable + groupby_names.remove('likelihood_std') + # groupby_names.remove('added_gp_outputscale') + #df_full['added_gp_outputscale'] = df_full['added_gp_outputscale'].apply(lambda x: x[0]) + + # replace all the nans in hyperparameter columns with 'N/A' + for column in groupby_names: + df_full[column] = df_full[column].fillna('N/A') + + # first take mean over the data seeds + df_mean = df_full.groupby(by=groupby_names + ['model_seed'], axis=0).mean() + df_mean.reset_index(drop=False, inplace=True) + + # then compute the stats over the model seeds + df_agg = df_mean.groupby(by=groupby_names, axis=0).aggregate(['mean', 'std', ucb, lcb, median, count], axis=0) + df_agg.reset_index(drop=False, inplace=True) + # df_agg.sort_values(by=[('nll', 'mean')], ascending=True, inplace=True) + + # filter all the rows where the count is less than 3 + df_agg = df_agg[df_agg['rmse']['count'] >= 3] + + available_models = set(df_agg['model']) + for metric in ['nll', 'rmse']: + fig, ax = plt.subplots() + for model in available_models: + df_model = df_agg[df_agg['model'] == model].sort_values(by=[('num_samples_train', '')], ascending=True) + if args.quantile_cis: + ax.plot(df_model[('num_samples_train', '')], df_model[(metric, 'median')], label=model) + lower_ci = df_model[(metric, 'lcb')] + upper_ci = df_model[(metric, 'ucb')] + else: + ax.plot(df_model[('num_samples_train', '')], df_model[(metric, 'mean')], label=model) + CI_width = 2 * np.sqrt(df_model[(metric, 'count')]) + lower_ci = df_model[(metric, 'mean')] - CI_width * df_model[(metric, 'std')] + upper_ci = df_model[(metric, 'mean')] + CI_width * df_model[(metric, 'std')] + ax.fill_between(df_model[('num_samples_train', '')], lower_ci, upper_ci, alpha=0.3) + ax.set_title(f'{args.data_source} - {metric}') + #ax.set_xscale('log') + #ax.set_ylim((-3.8, -2.)) + + ax.legend() + fig.show() + print('Models:', set(df_agg['model'])) + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Inspect results of a regression experiment.') + parser.add_argument('--exp_name', type=str, default='jan10_num_data') + parser.add_argument('--quantile_cis', type=int, default=1) + parser.add_argument('--data_source', type=str, default='pendulum') + args = parser.parse_args() + main(args) \ No newline at end of file diff --git a/experiments/regression_exp/run_regression_exp.py b/experiments/regression_exp/run_regression_exp.py index f40dc36..04c3379 100644 --- a/experiments/regression_exp/run_regression_exp.py +++ b/experiments/regression_exp/run_regression_exp.py @@ -74,11 +74,11 @@ def regression_experiment( # provide data and sim x_train, y_train, x_test, y_test, sim = provide_data_and_sim( data_source=data_source, - data_spec={'num_samples_train': 1000}, + data_spec={'num_samples_train': 10000}, data_seed=data_seed) # only take num_samples_train datapoints - assert num_samples_train <= 1000 + assert num_samples_train <= 10000 x_train, y_train = x_train[:num_samples_train], y_train[:num_samples_train] # handle pred diff mode @@ -241,12 +241,12 @@ def main(args): # data parameters parser.add_argument('--data_source', type=str, default='racecar') - parser.add_argument('--num_samples_train', type=int, default=10) + parser.add_argument('--num_samples_train', type=int, default=100) parser.add_argument('--data_seed', type=int, default=77698) parser.add_argument('--pred_diff', type=int, default=1) # standard BNN parameters - parser.add_argument('--model', type=str, default='BNN_FSVGD_SimPrior_gp') + parser.add_argument('--model', type=str, default='BNN_SVGD') parser.add_argument('--model_seed', type=int, default=892616) parser.add_argument('--likelihood_std', type=float, default=None) parser.add_argument('--learn_likelihood_std', type=int, default=0) @@ -273,7 +273,7 @@ def main(args): # FSVGD_SimPrior parameters parser.add_argument('--bandwidth_score_estim', type=float, default=10.) parser.add_argument('--ssge_kernel_type', type=str, default='IMQ') - parser.add_argument('--num_f_samples', type=int, default=64) + parser.add_argument('--num_f_samples', type=int, default=1024) parser.add_argument('--switch_score_estimator_frac', type=float, default=0.6667) # FSVGD_SimPrior parameters diff --git a/experiments/regression_exp/sweep_regression_exp.py b/experiments/regression_exp/sweep_regression_exp.py index 3ebf243..5a62237 100644 --- a/experiments/regression_exp/sweep_regression_exp.py +++ b/experiments/regression_exp/sweep_regression_exp.py @@ -17,13 +17,13 @@ 'bandwidth_svgd': {'distribution': 'log_uniform_10', 'min': -1.0, 'max': 0.0}, 'bandwidth_gp_prior': {'distribution': 'log_uniform', 'min': -2., 'max': 0.}, 'num_train_steps': {'values': [5000, 10000, 20000]}, - 'num_measurement_points': {'values': [32, 64, 128]}, + 'num_measurement_points': {'values': [32]}, }, 'BNN_FSVGD_SimPrior_gp': { 'bandwidth_svgd': {'distribution': 'log_uniform_10', 'min': -1.0, 'max': 0.0}, 'num_train_steps': {'values': [40000]}, - 'num_measurement_points': {'values': [16, 32]}, - 'num_f_samples': {'values': [512, 1024]}, + 'num_measurement_points': {'values': [32]}, + 'num_f_samples': {'values': [1024]}, }, 'BNN_FSVGD_SimPrior_ssge': { 'bandwidth_svgd': {'distribution': 'log_uniform_10', 'min': -1.0, 'max': 1.0}, @@ -38,7 +38,13 @@ 'num_measurement_points': {'values': [32]}, 'num_f_samples': {'values': [512]}, #'bandwidth_score_estim': {'distribution': 'log_uniform_10', 'min': -0.5, 'max': 0.5}, - 'bandwidth_score_estim': {'distribution': 'uniform', 'min': 0.8, 'max': 2.0}, + 'bandwidth_score_estim': {'distribution': 'uniform', 'min': 1.0, 'max': 2.0}, + }, + 'BNN_FSVGD_SimPrior_kde': { + 'bandwidth_svgd': {'distribution': 'log_uniform_10', 'min': -1.0, 'max': 1.0}, + 'num_train_steps': {'values': [60000, 80000, 100000]}, + 'num_measurement_points': {'values': [16, 32]}, + 'num_f_samples': {'values': [1024, 2056]}, }, 'BNN_FSVGD_SimPrior_gp+nu-method': { 'bandwidth_svgd': {'distribution': 'log_uniform_10', 'min': -1.0, 'max': 0.0}, @@ -48,17 +54,6 @@ 'switch_score_estimator_frac': {'values': [0.6667]}, 'bandwidth_score_estim': {'distribution': 'log_uniform_10', 'min': 0.0, 'max': 0.5}, }, - 'BNN_FSVGD_SimPrior_kde': { - 'bandwidth_svgd': {'distribution': 'log_uniform', 'min': -2., 'max': 2.}, - 'num_train_steps': {'values': [40000]}, - 'num_measurement_points': {'values': [16, 32]}, - 'num_f_samples': {'values': [512, 1024, 2056]}, - }, - 'BNN_MMD_SimPrior': { - 'num_train_steps': {'values': [20000, 40000]}, - 'num_measurement_points': {'values': [8, 16, 32, 64]}, - 'num_f_samples': {'values': [64, 128, 256, 512]}, - }, 'BNN_SVGD_DistillPrior': { 'bandwidth_svgd': {'distribution': 'log_uniform', 'min': -2., 'max': 2.}, 'num_train_steps': {'values': [20000, 40000]}, @@ -80,7 +75,7 @@ def main(args): 'num_samples_train': DATASET_CONFIGS[args.data_source]['num_samples_train'], 'model': {'value': args.model}, 'learn_likelihood_std': {'value': args.learn_likelihood_std}, - #'likelihood_std': {'value': None}, + 'pred_diff': {'value': args.pred_diff}, 'num_particles': {'value': 20}, 'data_batch_size': {'value': 8}, } @@ -118,7 +113,7 @@ def main(args): # sweep args parser.add_argument('--num_hparam_samples', type=int, default=20) parser.add_argument('--num_model_seeds', type=int, default=3, help='number of model seeds per hparam') - parser.add_argument('--num_data_seeds', type=int, default=4, help='number of model seeds per hparam') + parser.add_argument('--num_data_seeds', type=int, default=3, help='number of model seeds per hparam') parser.add_argument('--num_cpus', type=int, default=1, help='number of cpus to use') parser.add_argument('--run_mode', type=str, default='euler') @@ -132,8 +127,9 @@ def main(args): parser.add_argument('--data_source', type=str, default='racecar') # # standard BNN parameters - parser.add_argument('--model', type=str, default='BNN_SVGD') + parser.add_argument('--model', type=str, default='BNN_FSVGD_SimPrior_kde') parser.add_argument('--learn_likelihood_std', type=int, default=0) + parser.add_argument('--pred_diff', type=int, default=1) args = parser.parse_args() main(args) diff --git a/experiments/regression_exp/sweep_regression_exp_num_data.py b/experiments/regression_exp/sweep_regression_exp_num_data.py new file mode 100644 index 0000000..67a95f3 --- /dev/null +++ b/experiments/regression_exp/sweep_regression_exp_num_data.py @@ -0,0 +1,136 @@ +from experiments.util import (generate_run_commands, generate_base_command, RESULT_DIR, sample_param_flags, hash_dict) +from experiments.data_provider import DATASET_CONFIGS + +import experiments.regression_exp.run_regression_exp +import numpy as np +import datetime +import itertools +import argparse +import os + +MODEL_SPECIFIC_CONFIG = { + 'BNN_SVGD': { + 'bandwidth_svgd': {'values': [5.]}, + 'num_train_steps': {'values': [20000]}, + }, + 'BNN_FSVGD': { + 'bandwidth_svgd': {'values': [0.2]}, + 'bandwidth_gp_prior': {'values': [0.4]}, + 'num_train_steps': {'values': [20000]}, + 'num_measurement_points': {'values': [32]}, + }, + 'BNN_FSVGD_SimPrior_gp': { + 'bandwidth_svgd': {'values': [0.2]}, + 'num_train_steps': {'values': [40000]}, + 'num_measurement_points': {'values': [32]}, + 'num_f_samples': {'values': [1024]}, + }, + 'BNN_FSVGD_SimPrior_ssge': { + 'bandwidth_svgd': {'distribution': 'log_uniform_10', 'min': -1.0, 'max': 1.0}, + 'num_train_steps': {'values': [10000, 20000, 40000]}, + 'num_measurement_points': {'values': [8, 16, 32]}, + 'num_f_samples': {'values': [512]}, + 'bandwidth_score_estim': {'distribution': 'log_uniform_10', 'min': -0.5, 'max': 1.}, + }, + 'BNN_FSVGD_SimPrior_nu-method': { + 'bandwidth_svgd': {'values': [0.2]}, + 'num_train_steps': {'values': [60000]}, + 'num_measurement_points': {'values': [32]}, + 'num_f_samples': {'values': [512]}, + 'bandwidth_score_estim': {'values': [1.2]}, + }, + 'BNN_FSVGD_SimPrior_kde': { + 'bandwidth_svgd': {'values': [0.2]}, + 'num_train_steps': {'values': [80000]}, + 'num_measurement_points': {'values': [32]}, + 'num_f_samples': {'values': [2056]}, + }, + 'BNN_SVGD_DistillPrior': { + 'bandwidth_svgd': {'distribution': 'log_uniform', 'min': -2., 'max': 2.}, + 'num_train_steps': {'values': [20000, 40000]}, + 'num_measurement_points': {'values': [8, 16, 32]}, + 'num_f_samples': {'values': [64, 128, 256]}, + 'num_distill_steps': {'values': [30000, 60000]}, + }, +} + + +def main(args): + # setup random seeds + rds = np.random.RandomState(args.seed) + model_seeds = list(rds.randint(0, 10**6, size=(100,))) + data_seeds = list(rds.randint(0, 10**6, size=(100,))) + + sweep_config = { + 'data_source': {'value': args.data_source}, + 'model': {'value': args.model}, + 'learn_likelihood_std': {'value': args.learn_likelihood_std}, + 'pred_diff': {'value': args.pred_diff}, + 'num_particles': {'value': 20}, + 'data_batch_size': {'value': 8}, + } + # update with model specific sweep ranges + assert args.model in MODEL_SPECIFIC_CONFIG + sweep_config.update(MODEL_SPECIFIC_CONFIG[args.model]) + + # determine name of experiment + exp_base_path = os.path.join(RESULT_DIR, args.exp_name) + exp_path = os.path.join(exp_base_path, f'{args.data_source}_{args.model}') + + if args.data_source == 'racecar': + N_SAMPLES_LIST = [50, 100, 200, 400, 800, 1600, 3200] + elif args.data_source == 'pendulum': + N_SAMPLES_LIST = [10, 20, 40, 80, 160, 320, 640] + else: + raise NotImplementedError(f'Unknown data source {args.data_source}.') + + command_list = [] + output_file_list = [] + for _ in range(args.num_hparam_samples): + flags = sample_param_flags(sweep_config) + exp_hash = hash_dict(flags) + + for num_samples_train in N_SAMPLES_LIST: + + exp_result_folder = os.path.join(exp_path, f'{exp_hash}_{num_samples_train}') + flags['exp_result_folder'] = exp_result_folder + + for model_seed, data_seed in itertools.product(model_seeds[:args.num_model_seeds], + data_seeds[:args.num_data_seeds]): + cmd = generate_base_command(experiments.regression_exp.run_regression_exp, + flags=dict(**flags, **{'model_seed': model_seed, 'data_seed': data_seed, + 'num_samples_train': num_samples_train})) + command_list.append(cmd) + output_file_list.append(os.path.join(exp_result_folder, f'{model_seed}_{data_seed}.out')) + + generate_run_commands(command_list, output_file_list, num_cpus=args.num_cpus, + num_gpus=1 if args.gpu else 0, mode=args.run_mode, prompt=not args.yes) + + +if __name__ == '__main__': + current_date = datetime.datetime.now().strftime("%b%d").lower() + parser = argparse.ArgumentParser(description='Meta-BO run') + + # sweep args + parser.add_argument('--num_hparam_samples', type=int, default=1) + parser.add_argument('--num_model_seeds', type=int, default=5, help='number of model seeds per hparam') + parser.add_argument('--num_data_seeds', type=int, default=5, help='number of model seeds per hparam') + parser.add_argument('--num_cpus', type=int, default=1, help='number of cpus to use') + parser.add_argument('--run_mode', type=str, default='euler') + + # general args + parser.add_argument('--exp_name', type=str, default=f'test_{current_date}') + parser.add_argument('--seed', type=int, default=94563) + parser.add_argument('--gpu', default=False, action='store_true') + parser.add_argument('--yes', default=False, action='store_true') + + # data parameters + parser.add_argument('--data_source', type=str, default='racecar') + + # # standard BNN parameters + parser.add_argument('--model', type=str, default='BNN_SVGD') + parser.add_argument('--learn_likelihood_std', type=int, default=0) + parser.add_argument('--pred_diff', type=int, default=1) + + args = parser.parse_args() + main(args)