diff --git a/source/examples/rapids-azureml-hpo/notebook.ipynb b/source/examples/rapids-azureml-hpo/notebook.ipynb index 48e039a1..53e54e1f 100644 --- a/source/examples/rapids-azureml-hpo/notebook.ipynb +++ b/source/examples/rapids-azureml-hpo/notebook.ipynb @@ -316,8 +316,6 @@ " environment=\"test-rapids-mlflow:1\",\n", " experiment_name=experiment_name,\n", " code=os.getcwd(),\n", - " command=\"python train_rapids.py --data_dir ${{inputs.data_dir}} --n_bins ${{inputs.n_bins}} --compute ${{inputs.compute}} --cv_folds ${{inputs.cv_folds}}\\\n", - " --n_estimators ${{inputs.n_estimators}} --max_depth ${{inputs.max_depth}} --max_features ${{inputs.max_features}}\",\n", " inputs={\n", " \"data_dir\": Input(type=\"uri_file\", path=data_uri),\n", " \"n_bins\": 32,\n", @@ -327,6 +325,8 @@ " \"max_depth\": 6,\n", " \"max_features\": 0.3,\n", " },\n", + " command=\"python train_rapids.py --data_dir ${{inputs.data_dir}} --n_bins ${{inputs.n_bins}} --compute ${{inputs.compute}} --cv_folds ${{inputs.cv_folds}}\\\n", + " --n_estimators ${{inputs.n_estimators}} --max_depth ${{inputs.max_depth}} --max_features ${{inputs.max_features}}\",\n", " compute=\"rapids-cluster\",\n", ")\n", "\n", @@ -480,9 +480,9 @@ "name": "rapids" }, "kernelspec": { - "display_name": "rapids", + "display_name": "rapids-23.06", "language": "python", - "name": "rapids" + "name": "rapids-23.06" }, "language_info": { "codemirror_mode": { @@ -494,7 +494,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" + "version": "3.10.12" }, "microsoft": { "ms_spell_check": { diff --git a/source/examples/rapids-azureml-hpo/rapids_csp_azure.py b/source/examples/rapids-azureml-hpo/rapids_csp_azure.py index e4f53026..32982edb 100644 --- a/source/examples/rapids-azureml-hpo/rapids_csp_azure.py +++ b/source/examples/rapids-azureml-hpo/rapids_csp_azure.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # - +# import json import logging import pprint @@ -185,7 +185,7 @@ def load_data( elif "multi" in self.compute_type: self.log_to_file("\n\tReading using dask dataframe") dataset = dask.dataframe.read_parquet( - target_filename, columns=columns + target_filename, columns=col_labels ) elif "GPU" in self.compute_type: