diff --git a/bloptools/bayesian/acquisition/__init__.py b/bloptools/bayesian/acquisition/__init__.py index 5a1bb72..e71fb6f 100644 --- a/bloptools/bayesian/acquisition/__init__.py +++ b/bloptools/bayesian/acquisition/__init__.py @@ -119,11 +119,11 @@ def get_acquisition_function(agent, acq_func_identifier="qei", return_metadata=T acq_func_meta = {"name": acq_func_name, "args": {"beta": beta}} elif acq_func_name == "expected_mean": - acq_func, _ = get_acquisition_function(agent, acq_func_identifier="ucb", beta=0, return_metadata=False) + acq_func = get_acquisition_function(agent, acq_func_identifier="ucb", beta=0, return_metadata=False) acq_func_meta = {"name": acq_func_name, "args": {}} elif acq_func_name == "monte_carlo_expected_mean": - acq_func, _ = get_acquisition_function(agent, acq_func_identifier="qucb", beta=0, return_metadata=False) + acq_func = get_acquisition_function(agent, acq_func_identifier="qucb", beta=0, return_metadata=False) acq_func_meta = {"name": acq_func_name, "args": {}} return (acq_func, acq_func_meta) if return_metadata else acq_func diff --git a/bloptools/bayesian/agent.py b/bloptools/bayesian/agent.py index 0191694..e94cb6f 100644 --- a/bloptools/bayesian/agent.py +++ b/bloptools/bayesian/agent.py @@ -33,8 +33,6 @@ MAX_TEST_INPUTS = 2**11 -os.environ["KMP_DUPLICATE_LIB_OK"] = "True" - class Agent: def __init__( @@ -126,8 +124,8 @@ def tell(self, new_table=None, append=True, train=True, **kwargs): if not train_index.sum() >= 2: raise ValueError("There must be at least two valid data points per objective!") - train_inputs = torch.tensor(inputs[train_index]).double() - train_targets = torch.tensor(targets[train_index]).double().unsqueeze(-1) # .unsqueeze(0) + train_inputs = torch.tensor(inputs[train_index], dtype=torch.double) + train_targets = torch.tensor(targets[train_index], dtype=torch.double).unsqueeze(-1) # .unsqueeze(0) # for constructing the log normal noise prior # target_snr = 2e2 @@ -212,7 +210,7 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, **acq candidates, _ = botorch.optim.optimize_acqf( acq_function=acq_func, - bounds=self._active_bounds_torch, + bounds=self.acquisition_function_bounds, q=n, sequential=sequential, num_restarts=NUM_RESTARTS, @@ -392,22 +390,6 @@ def _get_objective_targets(self, i): return targets - # @property - # def acquisition_dofs(self): - # """ - # Returns the acquisition DOFs, which are the DOFs to optimize over (that is, active and not read-only). - # """ - # return self.dofs.subset(active=True, read_only=False) - - # @property - # def acquisition_dof_limits(self): - # """ - # Returns the acquisition limits, which are the ranges optimize over (that is, active and not read-only). - # This has shape (n_acq_dofs, 2). - # """ - # acq_dofs = self.dofs.subset(active=True, read_only=False) - # return np.c_[acq_dofs.summary.lower_limit.values, acq_dofs.summary.upper_limit.values] - @property def n_objs(self): """ @@ -441,17 +423,19 @@ def target_names(self): def test_inputs_grid(self, max_inputs=MAX_TEST_INPUTS): """ - Returns a (n_side, ..., n_side, 1, n_active_dof) grid of test_inputs + Returns a (n_side, ..., n_side, 1, n_active_dof) grid of test_inputs. + n_side is 1 if a dof is read-only """ - n_acq_dofs = len(self.dofs.subset(active=True, read_only=False)) - n_side = int(np.power(max_inputs, n_acq_dofs**-1)) + n_settable_acq_func_dofs = len(self.dofs.subset(active=True, read_only=False)) + n_side_settable = int(np.power(max_inputs, n_settable_acq_func_dofs**-1)) + n_sides = [1 if dof.read_only else n_side_settable for dof in self.dofs.subset(active=True)] return torch.cat( [ tensor.unsqueeze(-1) for tensor in torch.meshgrid( *[ - torch.linspace(dof.lower_limit, dof.upper_limit, n_side) if not dof.read_only else dof.readback - for dof in self.dofs.subset(active=True) + torch.linspace(lower_limit, upper_limit, n_side) + for (lower_limit, upper_limit), n_side in zip(self.dofs.subset(active=True).limits, n_sides) ], indexing="ij", ) @@ -463,10 +447,18 @@ def test_inputs(self, n=MAX_TEST_INPUTS): """ Returns a (n, 1, n_active_dof) grid of test_inputs """ - return utils.sobol_sampler(self._active_bounds_torch, n=n) + return utils.sobol_sampler(self.acquisition_function_bounds, n=n) @property - def _active_bounds_torch(self): + def acquisition_function_bounds(self): + """ + Returns a (2, n_active_dof) array of bounds for the acquisition function + """ + acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in self.dofs] + acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in self.dofs] + + return torch.tensor(np.vstack([acq_func_lower_bounds, acq_func_upper_bounds]), dtype=torch.double) + return torch.tensor( [dof.limits if not dof.read_only else tuple(2 * [dof.readback]) for dof in self.dofs.subset(active=True)] ).T @@ -644,48 +636,5 @@ def plot_validity(self, **kwargs): else: plotting._plot_valid_many_dofs(self, **kwargs) - # def plot_history(self, x_key="index", show_all_objs=False): - # x = getattr(self.table, x_key).values - - # num_obj_plots = 1 - # if show_all_objs: - # num_obj_plots = self.n_objs + 1 - - # self.n_objs + 1 if self.n_objs > 1 else 1 - - # hist_fig, hist_axes = plt.subplots( - # num_obj_plots, 1, figsize=(6, 4 * num_obj_plots), sharex=True, constrained_layout=True, dpi=200 - # ) - # hist_axes = np.atleast_1d(hist_axes) - - # unique_strategies, acq_func_index, acq_func_inverse = np.unique( - # self.table.acq_func, return_index=True, return_inverse=True - # ) - - # sample_colors = np.array(DEFAULT_COLOR_LIST)[acq_func_inverse] - - # if show_all_objs: - # for obj_index, obj in enumerate(self.objectives): - # y = self.table.loc[:, f"{obj.key}_fitness"].values - # hist_axes[obj_index].scatter(x, y, c=sample_colors) - # hist_axes[obj_index].plot(x, y, lw=5e-1, c="k") - # hist_axes[obj_index].set_ylabel(obj.key) - - # y = self.scalarized_objectives - - # cummax_y = np.array([np.nanmax(y[: i + 1]) for i in range(len(y))]) - - # hist_axes[-1].scatter(x, y, c=sample_colors) - # hist_axes[-1].plot(x, y, lw=5e-1, c="k") - - # hist_axes[-1].plot(x, cummax_y, lw=5e-1, c="k", ls=":") - - # hist_axes[-1].set_ylabel("total_fitness") - # hist_axes[-1].set_xlabel(x_key) - - # handles = [] - # for i_acq_func, acq_func in enumerate(unique_strategies): - # # i_acq_func = np.argsort(acq_func_index)[i_handle] - # handles.append(Patch(color=DEFAULT_COLOR_LIST[i_acq_func], label=acq_func)) - # legend = hist_axes[0].legend(handles=handles, fontsize=8) - # legend.set_title("acquisition function") + def plot_history(self, **kwargs): + plotting._plot_history(self, **kwargs) diff --git a/bloptools/bayesian/devices.py b/bloptools/bayesian/devices.py index af08bfe..d86b408 100644 --- a/bloptools/bayesian/devices.py +++ b/bloptools/bayesian/devices.py @@ -33,7 +33,7 @@ class DOF: def __init__( self, device: Signal = None, - limits: Tuple[numeric, numeric] = (-10.0, 10.0), + limits: Tuple[float, float] = (-10.0, 10.0), name: str = None, units: str = None, read_only: bool = None, @@ -53,11 +53,11 @@ def __init__( @property def lower_limit(self): - return self.limits[0] + return float(self.limits[0]) @property def upper_limit(self): - return self.limits[1] + return float(self.limits[1]) @property def readback(self): diff --git a/bloptools/bayesian/objective.py b/bloptools/bayesian/objective.py index 0636dcd..70a0bf9 100644 --- a/bloptools/bayesian/objective.py +++ b/bloptools/bayesian/objective.py @@ -6,7 +6,7 @@ numeric = Union[float, int] -DEFAULT_MINIMUM_SNR = 1e1 +DEFAULT_MINIMUM_SNR = 2e1 OBJ_FIELDS = ["name", "key", "limits", "weight", "minimize", "log"] @@ -59,7 +59,7 @@ def summary(self): return series def __repr__(self): - return self.params.__repr__() + return self.summary.__repr__() @property def has_model(self): @@ -88,6 +88,8 @@ def summary(self): for attr in ["minimize", "log"]: summary[attr] = summary[attr].astype(bool) + return summary + def __repr__(self): return self.summary.__repr__() diff --git a/bloptools/bayesian/plotting.py b/bloptools/bayesian/plotting.py index 57e1998..cd35bc0 100644 --- a/bloptools/bayesian/plotting.py +++ b/bloptools/bayesian/plotting.py @@ -1,6 +1,7 @@ import matplotlib as mpl -import matplotlib.pyplot as plt import numpy as np +from matplotlib import pyplot as plt +from matplotlib.patches import Patch from . import acquisition @@ -194,7 +195,7 @@ def _plot_acq_one_dof(agent, acq_funcs, lw=1e0, **kwargs): acq_func, acq_func_meta = acquisition.get_acquisition_function(agent, acq_func_identifier) test_acqf = acq_func(test_inputs).detach().numpy() - agent.acq_axes[iacq_func].plot(test_inputs.squeeze(), test_acqf, lw=lw, color=color) + agent.acq_axes[iacq_func].plot(test_inputs.squeeze(-2), test_acqf, lw=lw, color=color) agent.acq_axes[iacq_func].set_xlim(*x_dof.limits) agent.acq_axes[iacq_func].set_xlabel(x_dof.label) @@ -269,7 +270,7 @@ def _plot_valid_one_dof(agent, size=16, lw=1e0): constraint = agent.constraint(test_inputs)[..., 0] agent.valid_ax.scatter(x_values, agent.all_objectives_valid, s=size) - agent.valid_ax.plot(test_inputs.squeeze(), constraint, lw=lw) + agent.valid_ax.plot(test_inputs.squeeze(-2), constraint, lw=lw) agent.valid_ax.set_xlim(*x_dof.limits) @@ -316,45 +317,51 @@ def _plot_valid_many_dofs(agent, axes=[0, 1], shading="nearest", cmap=DEFAULT_CO ax.set_xlim(*x_dof.limits) ax.set_ylim(*y_dof.limits) - # data_ax = agent.valid_axes[0].scatter( - # *agent.acquisition_inputs.values.T[:2], - # c=agent.all_objectives_valid, - # s=size, - # vmin=0, - # vmax=1, - # cmap=cmap, - # ) - - # x = agent.test_inputs_grid().squeeze() if gridded else agent.test_inputs(n=MAX_TEST_INPUTS) - # *input_shape, input_dim = x.shape - # constraint = agent.classifier.probabilities(x.reshape(-1, 1, input_dim))[..., -1].reshape(input_shape) - - # if gridded: - # agent.valid_axes[1].pcolormesh( - # x[..., 0].detach().numpy(), - # x[..., 1].detach().numpy(), - # constraint.detach().numpy(), - # shading=shading, - # cmap=cmap, - # vmin=0, - # vmax=1, - # ) - - # # agent.acq_fig.colorbar(obj_ax, ax=agent.valid_axes[iacq_func], location="bottom", aspect=32, shrink=0.8) - - # else: - # # agent.valid_axes.set_title(acq_func_meta["name"]) - # agent.valid_axes[1].scatter( - # x.detach().numpy()[..., axes[0]], - # x.detach().numpy()[..., axes[1]], - # c=constraint.detach().numpy(), - # ) - - # agent.valid_fig.colorbar(data_ax, ax=agent.valid_axes[:2], location="bottom", aspect=32, shrink=0.8) - - # for ax in agent.valid_axes.ravel(): - # ax.set_xlim(*agent.dofs.subset(active=True, read_only=False)[axes[0]].limits) - # ax.set_ylim(*agent.dofs.subset(active=True, read_only=False)[axes[1]].limits) + +def _plot_history(agent, x_key="index", show_all_objs=False): + x = getattr(agent.table, x_key).values + + num_obj_plots = 1 + if show_all_objs: + num_obj_plots = agent.n_objs + 1 + + agent.n_objs + 1 if agent.n_objs > 1 else 1 + + hist_fig, hist_axes = plt.subplots( + num_obj_plots, 1, figsize=(6, 4 * num_obj_plots), sharex=True, constrained_layout=True, dpi=200 + ) + hist_axes = np.atleast_1d(hist_axes) + + unique_strategies, acq_func_index, acq_func_inverse = np.unique( + agent.table.acq_func, return_index=True, return_inverse=True + ) + + sample_colors = np.array(DEFAULT_COLOR_LIST)[acq_func_inverse] + + if show_all_objs: + for obj_index, obj in enumerate(agent.objectives): + y = agent.table.loc[:, f"{obj.key}_fitness"].values + hist_axes[obj_index].scatter(x, y, c=sample_colors) + hist_axes[obj_index].plot(x, y, lw=5e-1, c="k") + hist_axes[obj_index].set_ylabel(obj.key) + + y = agent.scalarized_objectives + + cummax_y = np.array([np.nanmax(y[: i + 1]) for i in range(len(y))]) + + hist_axes[-1].scatter(x, y, c=sample_colors) + hist_axes[-1].plot(x, y, lw=5e-1, c="k") + + hist_axes[-1].plot(x, cummax_y, lw=5e-1, c="k", ls=":") + + hist_axes[-1].set_ylabel("total_fitness") + hist_axes[-1].set_xlabel(x_key) + + handles = [] + for i_acq_func, acq_func in enumerate(unique_strategies): + handles.append(Patch(color=DEFAULT_COLOR_LIST[i_acq_func], label=acq_func)) + legend = hist_axes[0].legend(handles=handles, fontsize=8) + legend.set_title("acquisition function") def inspect_beam(agent, index, border=None): diff --git a/bloptools/tasks.py b/bloptools/tasks.py deleted file mode 100644 index aaa12ba..0000000 --- a/bloptools/tasks.py +++ /dev/null @@ -1,25 +0,0 @@ -class Task: - MIN_NOISE_LEVEL = 1e-6 - MAX_NOISE_LEVEL = 1e-2 - - def __init__(self, key, kind="max", min=None, name=None, transform=None, **kwargs): - self.key = key - self.min = min - self.max = max - self.kind = kind - self.name = name if name is not None else f"{key}_fitness" - self.transform = transform if transform is not None else lambda x: x - self.weight = 1.0 - - if kind.lower() in ["min", "minimum", "minimize"]: - self.sign = -1 - elif kind.lower() in ["max", "maximum", "maximize"]: - self.sign = +1 - else: - raise ValueError('"kind" must be either "min" or "max"') - - def fitness_func(self, x): - return self.sign * self.transform(x) - - def get_fitness(self, entry): - return self.fitness_func(getattr(entry, self.key)) diff --git a/bloptools/tests/test_passive_dofs.py b/bloptools/tests/test_passive_dofs.py index d4216dc..732caec 100644 --- a/bloptools/tests/test_passive_dofs.py +++ b/bloptools/tests/test_passive_dofs.py @@ -10,7 +10,7 @@ def test_passive_dofs(RE, db): DOF(name="x1", limits=(-5.0, 5.0)), DOF(name="x2", limits=(-5.0, 5.0)), DOF(BrownianMotion(name="brownian1"), read_only=True), - DOF(BrownianMotion(name="brownian1"), read_only=True), + DOF(BrownianMotion(name="brownian2"), read_only=True), ] objectives = [ @@ -28,6 +28,6 @@ def test_passive_dofs(RE, db): RE(agent.learn("qr", n=32)) - agent.plot_tasks() + agent.plot_objectives() agent.plot_acquisition() agent.plot_validity() diff --git a/bloptools/tests/test_plots.py b/bloptools/tests/test_plots.py index 1d0d864..bf81ac2 100644 --- a/bloptools/tests/test_plots.py +++ b/bloptools/tests/test_plots.py @@ -5,7 +5,7 @@ def test_plots(RE, agent): RE(agent.learn("qr", n=32)) - agent.plot_tasks() + agent.plot_objectives() agent.plot_acquisition() agent.plot_validity() agent.plot_history() diff --git a/docs/source/tutorials/himmelblau.ipynb b/docs/source/tutorials/himmelblau.ipynb index 1a93aa7..d42c32a 100644 --- a/docs/source/tutorials/himmelblau.ipynb +++ b/docs/source/tutorials/himmelblau.ipynb @@ -19,6 +19,18 @@ "Let's use ``bloptools`` to minimize Himmelblau's function, which has four global minima:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf27fc9e-d11c-40f4-a200-98e7814f506b", + "metadata": {}, + "outputs": [], + "source": [ + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp" + ] + }, { "cell_type": "code", "execution_count": null, @@ -33,12 +45,10 @@ "\n", "x1 = x2 = np.linspace(-6, 6, 1024)\n", "X1, X2 = np.meshgrid(x1, x2)\n", - "from bloptools.tasks import Task\n", "\n", - "task = Task(key=\"himmelblau\", kind=\"min\")\n", "F = test_functions.himmelblau(X1, X2)\n", "\n", - "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm())\n", + "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(vmin=1e-1, vmax=1e3), cmap=\"magma_r\")\n", "plt.colorbar()\n", "plt.xlabel(\"x1\")\n", "plt.ylabel(\"x2\")" @@ -59,11 +69,13 @@ "metadata": {}, "outputs": [], "source": [ - "from bloptools import devices\n", + "from bloptools.bayesian import DOF, BrownianMotion\n", "\n", "dofs = [\n", - " {\"device\": devices.DOF(name=\"x1\"), \"limits\": (-6, 6), \"kind\": \"active\"},\n", - " {\"device\": devices.DOF(name=\"x2\"), \"limits\": (-6, 6), \"kind\": \"active\"},\n", + " DOF(name=\"x1\", limits=(-6, 6)),\n", + " DOF(name=\"x2\", limits=(-6, 6)),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True),\n", "]" ] }, @@ -82,9 +94,9 @@ "metadata": {}, "outputs": [], "source": [ - "tasks = [\n", - " {\"key\": \"himmelblau\", \"kind\": \"minimize\"},\n", - "]" + "from bloptools.bayesian import Objective\n", + "\n", + "objectives = [Objective(key=\"himmelblau\", minimize=True)]" ] }, { @@ -130,54 +142,62 @@ }, "outputs": [], "source": [ - "from bloptools.utils import prepare_re_env\n", - "\n", - "%run -i $prepare_re_env.__file__ --db-type=temp\n", "from bloptools.bayesian import Agent\n", "\n", + "\n", "agent = Agent(\n", " dofs=dofs,\n", - " tasks=tasks,\n", + " objectives=objectives,\n", " digestion=digestion,\n", " db=db,\n", ")" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e964d5a5-2a4a-4403-8c06-4ad17b00cecf", + "metadata": {}, + "outputs": [], + "source": [ + "agent.test_inputs_grid().shape" + ] + }, { "cell_type": "markdown", - "id": "b7a608d6", + "id": "27685849", "metadata": {}, "source": [ - "To decide which points to sample, the agent needs an acquisition function. The available acquisition function are here:" + "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. Let's start by quasi-randomly sampling the parameter space, and plotting our model of the function:" ] }, { "cell_type": "code", "execution_count": null, - "id": "fb06739b", + "id": "996da937", "metadata": {}, "outputs": [], "source": [ - "agent.acq_func_info" + "RE(agent.learn(\"quasi-random\", n=32))\n", + "agent.plot_objectives()" ] }, { "cell_type": "markdown", - "id": "27685849", + "id": "dc264346-10fb-4c88-9925-4bfcf0dd3b07", "metadata": {}, "source": [ - "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. Let's start by quasi-randomly sampling the parameter space, and plotting our model of the function:" + "To decide which points to sample, the agent needs an acquisition function. The available acquisition function are here:" ] }, { "cell_type": "code", "execution_count": null, - "id": "996da937", + "id": "fb06739b", "metadata": {}, "outputs": [], "source": [ - "RE(agent.learn(\"quasi-random\", n=32))\n", - "agent.plot_tasks()" + "agent.acq_func_info" ] }, { @@ -196,9 +216,43 @@ "metadata": {}, "outputs": [], "source": [ - "agent.plot_acquisition(acq_funcs=[\"qei\", \"qpi\", \"ucb\"])" + "agent.plot_acquisition(acq_funcs=[\"qei\", \"pi\", \"qucb\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c0b42dc-2df3-4ba6-b02f-569dab48db80", + "metadata": {}, + "outputs": [], + "source": [ + "agent.dofs.limits" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "62b24d9b-740f-45a6-9617-47796c260273", + "metadata": {}, + "outputs": [], + "source": [ + "self = agent\n", + "import torch\n", + "\n", + "acq_func_lower_bounds = [dof.lower_limit if not dof.read_only else dof.readback for dof in self.dofs]\n", + "acq_func_upper_bounds = [dof.upper_limit if not dof.read_only else dof.readback for dof in self.dofs]\n", + "\n", + "torch.tensor(np.vstack([acq_func_lower_bounds, acq_func_upper_bounds]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "217158c0-aa65-409c-a7ab-63bb924723ad", + "metadata": {}, + "outputs": [], + "source": [] + }, { "attachments": {}, "cell_type": "markdown", @@ -208,6 +262,24 @@ "To decide where to go, the agent will find the inputs that maximize a given acquisition function:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "16ec3c97-211b-49df-9e45-fcdd61ae98eb", + "metadata": {}, + "outputs": [], + "source": [ + "agent.acquisition_function_bounds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a066da53-0cdc-429b-a588-ce22b4a599b5", + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, @@ -238,9 +310,13 @@ }, "outputs": [], "source": [ - "X, _ = agent.ask(\"qei\", n=8)\n", + "X, _ = agent.ask(\"qei\", n=8, route=True)\n", "agent.plot_acquisition(acq_funcs=[\"qei\"])\n", - "plt.plot(*X.T, lw=5e-1, c=\"r\", marker=\"x\")" + "plt.scatter(*X.T, marker=\"d\", facecolor=\"w\", edgecolor=\"k\")\n", + "plt.plot(\n", + " *X.T,\n", + " color=\"r\",\n", + ")" ] }, { @@ -276,9 +352,17 @@ "metadata": {}, "outputs": [], "source": [ - "agent.plot_tasks()\n", - "print(agent.best_inputs)" + "agent.plot_objectives()\n", + "# print(agent.best_inputs)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6453b87-f864-40af-ba70-9a42960f54b9", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/docs/source/tutorials/hyperparameters.ipynb b/docs/source/tutorials/hyperparameters.ipynb index e4026e1..29bda11 100644 --- a/docs/source/tutorials/hyperparameters.ipynb +++ b/docs/source/tutorials/hyperparameters.ipynb @@ -93,7 +93,7 @@ "\n", "RE(agent.learn(acq_func=\"qr\", n=16))\n", "\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] }, { @@ -125,7 +125,7 @@ "outputs": [], "source": [ "RE(agent.learn(\"qei\", n=4, iterations=4))\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] } ], diff --git a/docs/source/tutorials/passive-dofs.ipynb b/docs/source/tutorials/passive-dofs.ipynb index 0d16f86..1de0900 100644 --- a/docs/source/tutorials/passive-dofs.ipynb +++ b/docs/source/tutorials/passive-dofs.ipynb @@ -25,6 +25,18 @@ "id": "e6bfcf73", "metadata": {}, "outputs": [], + "source": [ + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4cf5dbd1-e404-4504-b822-3956ca61ef74", + "metadata": {}, + "outputs": [], "source": [ "from bloptools.utils import prepare_re_env\n", "\n", @@ -38,30 +50,50 @@ " products = db[uid].table()\n", "\n", " for index, entry in products.iterrows():\n", - " products.loc[index, \"styblinksi-tang\"] = test_functions.styblinski_tang(entry.x - 1e-1 * entry.brownian)\n", + " products.loc[index, \"styblinksi-tang\"] = test_functions.styblinski_tang(entry.x1 - 1e-1 * entry.brownian)\n", "\n", " return products\n", "\n", "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c88d54d9-c600-41ad-9b3e-53af510d8760", + "metadata": {}, + "outputs": [], + "source": [ "dofs = [\n", - " {\"device\": devices.DOF(name=\"x\"), \"limits\": (-5, 5), \"kind\": \"active\"},\n", - " {\"device\": devices.BrownianMotion(name=\"brownian\"), \"limits\": (-2, 2), \"kind\": \"passive\"},\n", + " DOF(name=\"x1\", limits=(-5.0, 5.0)),\n", + " DOF(name=\"x2\", limits=(-5.0, 5.0)),\n", + " DOF(BrownianMotion(name=\"brownian1\"), read_only=True),\n", + " DOF(BrownianMotion(name=\"brownian2\"), read_only=True),\n", "]\n", "\n", - "tasks = [\n", - " {\"key\": \"styblinksi-tang\", \"kind\": \"minimize\"},\n", + "objectives = [\n", + " Objective(key=\"styblinksi-tang\", minimize=True),\n", "]\n", "\n", "agent = Agent(\n", " dofs=dofs,\n", - " tasks=tasks,\n", + " objectives=objectives,\n", " digestion=digestion,\n", " db=db,\n", - ")\n", - "\n", - "RE(agent.learn(\"qr\", n=32))\n", - "\n", - "agent.plot_tasks()" + " verbose=True,\n", + " tolerate_acquisition_errors=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee813fab-1114-46ea-81f9-35c8292b2ba4", + "metadata": {}, + "outputs": [], + "source": [ + "agent.plot_objectives()" ] } ], @@ -81,7 +113,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.11.4" }, "vscode": { "interpreter": { diff --git a/docs/wip/constrained-himmelblau copy.ipynb b/docs/wip/constrained-himmelblau copy.ipynb index b317aa1..c566dda 100644 --- a/docs/wip/constrained-himmelblau copy.ipynb +++ b/docs/wip/constrained-himmelblau copy.ipynb @@ -171,7 +171,7 @@ "outputs": [], "source": [ "RE(agent.learn(\"quasi-random\", n=64))\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] }, { @@ -297,7 +297,7 @@ }, "outputs": [], "source": [ - "agent.plot_tasks()\n", + "agent.plot_objectives()\n", "agent.plot_acquisition(strategy=[\"ei\", \"pi\", \"ucb\"])" ] }, @@ -318,7 +318,7 @@ "outputs": [], "source": [ "RE(agent.learn(\"ei\", n_iter=16))\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] } ], diff --git a/docs/wip/introduction.ipynb b/docs/wip/introduction.ipynb index 9ccc3b0..709af97 100644 --- a/docs/wip/introduction.ipynb +++ b/docs/wip/introduction.ipynb @@ -154,7 +154,7 @@ "source": [ "# what are the points?\n", "\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] }, { @@ -209,7 +209,7 @@ "outputs": [], "source": [ "RE(agent.learn(\"ei\", n_iter=4))\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] } ], diff --git a/docs/wip/latent-toroid-dimensions.ipynb b/docs/wip/latent-toroid-dimensions.ipynb index dd93bd1..14d8426 100644 --- a/docs/wip/latent-toroid-dimensions.ipynb +++ b/docs/wip/latent-toroid-dimensions.ipynb @@ -84,7 +84,7 @@ }, "outputs": [], "source": [ - "agent.plot_tasks()\n", + "agent.plot_objectives()\n", "agent.plot_validity()\n", "agent.plot_acquisition(strategy=[\"ei\", \"pi\", \"ucb\"])" ] diff --git a/docs/wip/multi-task-sirepo.ipynb b/docs/wip/multi-task-sirepo.ipynb index 906e457..85b4e1b 100644 --- a/docs/wip/multi-task-sirepo.ipynb +++ b/docs/wip/multi-task-sirepo.ipynb @@ -90,7 +90,7 @@ }, "outputs": [], "source": [ - "agent.plot_tasks()\n", + "agent.plot_objectives()\n", "agent.plot_acquisition(strategy=[\"ei\", \"pi\", \"ucb\"])" ] }, @@ -113,7 +113,7 @@ "outputs": [], "source": [ "RE(agent.learn(\"ei\", n_iter=2))\n", - "agent.plot_tasks()" + "agent.plot_objectives()" ] }, {