From fd70b3075d9cd5206084c294e5fa2ee8e7665db5 Mon Sep 17 00:00:00 2001 From: perib Date: Mon, 30 Jan 2023 16:55:40 -0800 Subject: [PATCH] fix get_metric_score to get_metric_fn --- docs/benchmark.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index 247ae963..a53d8534 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -150,7 +150,7 @@ First, the list of tasks is downloaded as already illustrated above. Next, a spe task = openml.tasks.get_task(task_id) # download the OpenML task run = openml.runs.run_model_on_task(clf, task) # run the classifier on the task - score = run.get_metric_score(sklearn.metrics.accuracy_score) # print accuracy score + score = run.get_metric_fn(sklearn.metrics.accuracy_score) # print accuracy score print('Data set: %s; Accuracy: %0.2f' % (task.get_dataset().name,score.mean())) run.publish() # publish the experiment on OpenML (optional, requires internet and an API key) print('URL for run: %s/run/%d' %(openml.config.server,run.run_id))