From 2cf20b733a8b1a60c0c845901d6c4dd485f3ada2 Mon Sep 17 00:00:00 2001 From: skirui-source Date: Mon, 7 Aug 2023 13:17:33 -0700 Subject: [PATCH 1/5] update metadata tags --- .../xgboost-rf-gpu-cpu-benchmark/notebook.ipynb | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb b/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb index 0823ac3d..7bfd6f5d 100644 --- a/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb +++ b/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb @@ -3,7 +3,16 @@ { "cell_type": "markdown", "id": "a51c95d1-b447-4f1b-9571-cf597ca93ef4", - "metadata": {}, + "metadata": { + "tags": [ + "cloud/aws/ec2", + "data-storage/s3", + "workflow/randomforest", + "workflow/hpo", + "workflow/xgboost", + "library/dask" + ] + }, "source": [ "# HPO Benchmarking with RAPIDS and Dask\n", "\n", From 0371810619622408f413da87fa385c2c1a8bac03 Mon Sep 17 00:00:00 2001 From: skirui-source Date: Mon, 7 Aug 2023 14:18:30 -0700 Subject: [PATCH 2/5] update with latest benchmarking results --- source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb b/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb index 7bfd6f5d..a488a4e6 100644 --- a/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb +++ b/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb @@ -20,7 +20,7 @@ "\n", "In the notebook demo below, we compare benchmarking results to show how GPU can accelerate HPO tuning jobs relative to CPU.\n", "\n", - "For instance, we find a 26x speedup in wall clock time (0.71 hrs vs. 18.9 hrs) when comparing between GPU and CPU EC2 instances on 100 XGBoost HPO trials using no parallel workers on 3 years of the Airline Dataset.\n" + "For instance, we find a 48x speedup in wall clock time (0.71 hrs vs 34.6 hrs) for XGBoost and 16x (3.86 hrs vs 63.2 hrs) for RandomForest when comparing between GPU and CPU EC2 instances on 100 HPO trials on 3 years of the Airline Dataset.\n" ] }, { From be312d85abb6d260a03f462af185c78cee83a2dc Mon Sep 17 00:00:00 2001 From: skirui-source Date: Mon, 7 Aug 2023 14:34:44 -0700 Subject: [PATCH 3/5] add related files- dockerfile, hpo script --- .../Dockerfile.txt | 7 + .../xgboost-randomforest-gpu-hpo-dask/hpo.py | 205 ++++++++++++++++++ 2 files changed, 212 insertions(+) create mode 100644 source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt create mode 100644 source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py diff --git a/source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt b/source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt new file mode 100644 index 00000000..6932cd7f --- /dev/null +++ b/source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt @@ -0,0 +1,7 @@ +# FROM nvcr.io/nvidia/rapidsai/rapidsai-core:23.06-cuda11.5-runtime-ubuntu20.04-py3.10 + +# FROM rapidsai/rapidsai:23.06-cuda11.5-runtime-ubuntu20.04-py3.10 + +FROM rapidsai/rapidsai:23.06-cuda11.8-runtime-ubuntu22.04-py3.10 + +RUN mamba install -y -n rapids optuna diff --git a/source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py b/source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py new file mode 100644 index 00000000..7898974a --- /dev/null +++ b/source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py @@ -0,0 +1,205 @@ +import argparse +import glob +import os +import time + +import dask +import optuna +import xgboost as xgb +from dask.distributed import Client, LocalCluster, wait +from dask_cuda import LocalCUDACluster +from dask_ml.model_selection import train_test_split +from optuna.samplers import RandomSampler +from sklearn.ensemble import RandomForestClassifier as RF_cpu +from sklearn.metrics import accuracy_score as accuracy_score_cpu + +n_cv_folds = 5 + +label_column = "ArrDel15" +feature_columns = [ + "Year", + "Quarter", + "Month", + "DayOfWeek", + "Flight_Number_Reporting_Airline", + "DOT_ID_Reporting_Airline", + "OriginCityMarketID", + "DestCityMarketID", + "DepTime", + "DepDelay", + "DepDel15", + "ArrDel15", + "AirTime", + "Distance", +] + + +def ingest_data(mode): + if mode == "gpu": + import dask_cudf + + dataset = dask_cudf.read_parquet( + glob.glob("./data/*.parquet"), + columns=feature_columns, + ) + else: + dataset = dask.dataframe.read_parquet( + glob.glob("./data/*.parquet"), + columns=feature_columns, + ).repartition(npartitions=1000) + return dataset + + +def preprocess_data(dataset, *, client, i_fold, mode): + dataset = dataset.dropna() + train, test = train_test_split(dataset, random_state=i_fold, shuffle=True) + X_train, y_train = train.drop(label_column, axis=1), train[label_column] + X_test, y_test = test.drop(label_column, axis=1), test[label_column] + X_train, y_train = X_train.astype("float32"), y_train.astype("int32") + X_test, y_test = X_test.astype("float32"), y_test.astype("int32") + + if mode == "gpu": + from cuml.dask.common.utils import persist_across_workers + + X_train, y_train, X_test, y_test = persist_across_workers( + client, [X_train, y_train, X_test, y_test], workers=client.has_what().keys() + ) + else: + X_train, y_train = X_train.persist(), y_train.persist() + X_test, y_test = X_test.persist(), y_test.persist() + + wait([X_train, y_train, X_test, y_test]) + + return X_train, y_train, X_test, y_test + + +def train_xgboost(trial, *, dataset, client, mode): + params = { + "max_depth": trial.suggest_int("max_depth", 4, 8), + "learning_rate": trial.suggest_float("learning_rate", 0.001, 0.1, log=True), + "min_child_weight": trial.suggest_float( + "min_child_weight", 0.1, 10.0, log=True + ), + "reg_alpha": trial.suggest_float("reg_alpha", 0.0001, 100, log=True), + "reg_lambda": trial.suggest_float("reg_lambda", 0.0001, 100, log=True), + "verbosity": 0, + "objective": "binary:logistic", + } + num_boost_round = trial.suggest_int("num_boost_round", 100, 500, step=10) + + cv_fold_scores = [] + for i_fold in range(n_cv_folds): + X_train, y_train, X_test, y_test = preprocess_data( + dataset, client=client, i_fold=i_fold, mode=mode + ) + + if mode == "gpu": + from cuml.metrics import accuracy_score as accuracy_score_gpu + + params["tree_method"] = "gpu_hist" + dtrain = xgb.dask.DaskDeviceQuantileDMatrix(client, X_train, y_train) + dtest = xgb.dask.DaskDeviceQuantileDMatrix(client, X_test) + accuracy_score_func = accuracy_score_gpu + else: + params["tree_method"] = "hist" + dtrain = xgb.dask.DaskDMatrix(client, X_train, y_train) + dtest = xgb.dask.DaskDMatrix(client, X_test) + accuracy_score_func = accuracy_score_cpu + + xgboost_output = xgb.dask.train( + client, params, dtrain, num_boost_round=num_boost_round + ) + trained_model = xgboost_output["booster"] + + pred = xgb.dask.predict(client, trained_model, dtest) > 0.5 + pred = pred.astype("int32").compute() + y_test = y_test.compute() + score = accuracy_score_func(y_test, pred) + cv_fold_scores.append(score) + final_score = sum(cv_fold_scores) / len(cv_fold_scores) + return final_score + + +def train_randomforest(trial, *, dataset, client, mode): + params = { + "max_depth": trial.suggest_int("max_depth", 5, 15), + "max_features": trial.suggest_float("max_features", 0.1, 1.0), + "n_estimators": trial.suggest_int("n_estimators", 100, 500, step=10), + "criterion": trial.suggest_categorical("criterion", ["gini", "entropy"]), + "min_samples_split": trial.suggest_int("min_samples_split", 2, 1000, log=True), + } + + cv_fold_scores = [] + for i_fold in range(n_cv_folds): + X_train, y_train, X_test, y_test = preprocess_data( + dataset, client=client, i_fold=i_fold, mode=mode + ) + + if mode == "gpu": + from cuml.dask.ensemble import RandomForestClassifier as RF_gpu + from cuml.metrics import accuracy_score as accuracy_score_gpu + + params["n_bins"] = 256 + trained_model = RF_gpu(client=client, **params) + accuracy_score_func = accuracy_score_gpu + else: + params["n_jobs"] = -1 + trained_model = RF_cpu(**params) + accuracy_score_func = accuracy_score_cpu + + trained_model.fit(X_train, y_train) + pred = trained_model.predict(X_test) + if mode == "gpu": + pred = pred.compute() + y_test = y_test.compute() + score = accuracy_score_func(y_test, pred) + cv_fold_scores.append(score) + final_score = sum(cv_fold_scores) / len(cv_fold_scores) + return final_score + + +def main(args): + tstart = time.perf_counter() + + study = optuna.create_study( + sampler=RandomSampler(seed=args.seed), direction="maximize" + ) + + if args.mode == "gpu": + cluster = LocalCUDACluster() + else: + cluster = LocalCluster(n_workers=os.cpu_count()) + + with Client(cluster) as client: + dataset = ingest_data(mode=args.mode) + client.persist(dataset) + if args.model_type == "XGBoost": + study.optimize( + lambda trial: train_xgboost( + trial, dataset=dataset, client=client, mode=args.mode + ), + n_trials=100, + n_jobs=1, + ) + else: + study.optimize( + lambda trial: train_randomforest( + trial, dataset=dataset, client=client, mode=args.mode + ), + n_trials=100, + n_jobs=1, + ) + + tend = time.perf_counter() + print(f"Time elapsed: {tend - tstart} sec") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--model-type", type=str, required=True, choices=["XGBoost", "RandomForest"] + ) + parser.add_argument("--mode", required=True, choices=["gpu", "cpu"]) + parser.add_argument("--seed", required=False, type=int, default=1) + args = parser.parse_args() + main(args) From 5bfa5f0ffa8d4e041a594edd23ac8f7a8f01f63a Mon Sep 17 00:00:00 2001 From: skirui-source Date: Mon, 7 Aug 2023 14:41:26 -0700 Subject: [PATCH 4/5] undo files saved to wrong folder --- .../Dockerfile.txt | 7 - .../xgboost-randomforest-gpu-hpo-dask/hpo.py | 205 ------------------ 2 files changed, 212 deletions(-) delete mode 100644 source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt delete mode 100644 source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py diff --git a/source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt b/source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt deleted file mode 100644 index 6932cd7f..00000000 --- a/source/examples/xgboost-randomforest-gpu-hpo-dask/Dockerfile.txt +++ /dev/null @@ -1,7 +0,0 @@ -# FROM nvcr.io/nvidia/rapidsai/rapidsai-core:23.06-cuda11.5-runtime-ubuntu20.04-py3.10 - -# FROM rapidsai/rapidsai:23.06-cuda11.5-runtime-ubuntu20.04-py3.10 - -FROM rapidsai/rapidsai:23.06-cuda11.8-runtime-ubuntu22.04-py3.10 - -RUN mamba install -y -n rapids optuna diff --git a/source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py b/source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py deleted file mode 100644 index 7898974a..00000000 --- a/source/examples/xgboost-randomforest-gpu-hpo-dask/hpo.py +++ /dev/null @@ -1,205 +0,0 @@ -import argparse -import glob -import os -import time - -import dask -import optuna -import xgboost as xgb -from dask.distributed import Client, LocalCluster, wait -from dask_cuda import LocalCUDACluster -from dask_ml.model_selection import train_test_split -from optuna.samplers import RandomSampler -from sklearn.ensemble import RandomForestClassifier as RF_cpu -from sklearn.metrics import accuracy_score as accuracy_score_cpu - -n_cv_folds = 5 - -label_column = "ArrDel15" -feature_columns = [ - "Year", - "Quarter", - "Month", - "DayOfWeek", - "Flight_Number_Reporting_Airline", - "DOT_ID_Reporting_Airline", - "OriginCityMarketID", - "DestCityMarketID", - "DepTime", - "DepDelay", - "DepDel15", - "ArrDel15", - "AirTime", - "Distance", -] - - -def ingest_data(mode): - if mode == "gpu": - import dask_cudf - - dataset = dask_cudf.read_parquet( - glob.glob("./data/*.parquet"), - columns=feature_columns, - ) - else: - dataset = dask.dataframe.read_parquet( - glob.glob("./data/*.parquet"), - columns=feature_columns, - ).repartition(npartitions=1000) - return dataset - - -def preprocess_data(dataset, *, client, i_fold, mode): - dataset = dataset.dropna() - train, test = train_test_split(dataset, random_state=i_fold, shuffle=True) - X_train, y_train = train.drop(label_column, axis=1), train[label_column] - X_test, y_test = test.drop(label_column, axis=1), test[label_column] - X_train, y_train = X_train.astype("float32"), y_train.astype("int32") - X_test, y_test = X_test.astype("float32"), y_test.astype("int32") - - if mode == "gpu": - from cuml.dask.common.utils import persist_across_workers - - X_train, y_train, X_test, y_test = persist_across_workers( - client, [X_train, y_train, X_test, y_test], workers=client.has_what().keys() - ) - else: - X_train, y_train = X_train.persist(), y_train.persist() - X_test, y_test = X_test.persist(), y_test.persist() - - wait([X_train, y_train, X_test, y_test]) - - return X_train, y_train, X_test, y_test - - -def train_xgboost(trial, *, dataset, client, mode): - params = { - "max_depth": trial.suggest_int("max_depth", 4, 8), - "learning_rate": trial.suggest_float("learning_rate", 0.001, 0.1, log=True), - "min_child_weight": trial.suggest_float( - "min_child_weight", 0.1, 10.0, log=True - ), - "reg_alpha": trial.suggest_float("reg_alpha", 0.0001, 100, log=True), - "reg_lambda": trial.suggest_float("reg_lambda", 0.0001, 100, log=True), - "verbosity": 0, - "objective": "binary:logistic", - } - num_boost_round = trial.suggest_int("num_boost_round", 100, 500, step=10) - - cv_fold_scores = [] - for i_fold in range(n_cv_folds): - X_train, y_train, X_test, y_test = preprocess_data( - dataset, client=client, i_fold=i_fold, mode=mode - ) - - if mode == "gpu": - from cuml.metrics import accuracy_score as accuracy_score_gpu - - params["tree_method"] = "gpu_hist" - dtrain = xgb.dask.DaskDeviceQuantileDMatrix(client, X_train, y_train) - dtest = xgb.dask.DaskDeviceQuantileDMatrix(client, X_test) - accuracy_score_func = accuracy_score_gpu - else: - params["tree_method"] = "hist" - dtrain = xgb.dask.DaskDMatrix(client, X_train, y_train) - dtest = xgb.dask.DaskDMatrix(client, X_test) - accuracy_score_func = accuracy_score_cpu - - xgboost_output = xgb.dask.train( - client, params, dtrain, num_boost_round=num_boost_round - ) - trained_model = xgboost_output["booster"] - - pred = xgb.dask.predict(client, trained_model, dtest) > 0.5 - pred = pred.astype("int32").compute() - y_test = y_test.compute() - score = accuracy_score_func(y_test, pred) - cv_fold_scores.append(score) - final_score = sum(cv_fold_scores) / len(cv_fold_scores) - return final_score - - -def train_randomforest(trial, *, dataset, client, mode): - params = { - "max_depth": trial.suggest_int("max_depth", 5, 15), - "max_features": trial.suggest_float("max_features", 0.1, 1.0), - "n_estimators": trial.suggest_int("n_estimators", 100, 500, step=10), - "criterion": trial.suggest_categorical("criterion", ["gini", "entropy"]), - "min_samples_split": trial.suggest_int("min_samples_split", 2, 1000, log=True), - } - - cv_fold_scores = [] - for i_fold in range(n_cv_folds): - X_train, y_train, X_test, y_test = preprocess_data( - dataset, client=client, i_fold=i_fold, mode=mode - ) - - if mode == "gpu": - from cuml.dask.ensemble import RandomForestClassifier as RF_gpu - from cuml.metrics import accuracy_score as accuracy_score_gpu - - params["n_bins"] = 256 - trained_model = RF_gpu(client=client, **params) - accuracy_score_func = accuracy_score_gpu - else: - params["n_jobs"] = -1 - trained_model = RF_cpu(**params) - accuracy_score_func = accuracy_score_cpu - - trained_model.fit(X_train, y_train) - pred = trained_model.predict(X_test) - if mode == "gpu": - pred = pred.compute() - y_test = y_test.compute() - score = accuracy_score_func(y_test, pred) - cv_fold_scores.append(score) - final_score = sum(cv_fold_scores) / len(cv_fold_scores) - return final_score - - -def main(args): - tstart = time.perf_counter() - - study = optuna.create_study( - sampler=RandomSampler(seed=args.seed), direction="maximize" - ) - - if args.mode == "gpu": - cluster = LocalCUDACluster() - else: - cluster = LocalCluster(n_workers=os.cpu_count()) - - with Client(cluster) as client: - dataset = ingest_data(mode=args.mode) - client.persist(dataset) - if args.model_type == "XGBoost": - study.optimize( - lambda trial: train_xgboost( - trial, dataset=dataset, client=client, mode=args.mode - ), - n_trials=100, - n_jobs=1, - ) - else: - study.optimize( - lambda trial: train_randomforest( - trial, dataset=dataset, client=client, mode=args.mode - ), - n_trials=100, - n_jobs=1, - ) - - tend = time.perf_counter() - print(f"Time elapsed: {tend - tstart} sec") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-type", type=str, required=True, choices=["XGBoost", "RandomForest"] - ) - parser.add_argument("--mode", required=True, choices=["gpu", "cpu"]) - parser.add_argument("--seed", required=False, type=int, default=1) - args = parser.parse_args() - main(args) From 7171648bca6d5b5f2a68aca9b8101b6e519baa8f Mon Sep 17 00:00:00 2001 From: skirui-source Date: Mon, 7 Aug 2023 14:46:24 -0700 Subject: [PATCH 5/5] add more tags --- .../examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb b/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb index a488a4e6..944dadd3 100644 --- a/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb +++ b/source/examples/xgboost-rf-gpu-cpu-benchmark/notebook.ipynb @@ -10,7 +10,12 @@ "workflow/randomforest", "workflow/hpo", "workflow/xgboost", - "library/dask" + "library/dask", + "library/dask-cuda", + "library/xgboost", + "library/optuna", + "library/sklearn", + "library/dask-ml" ] }, "source": [