diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 50cc42c..636daa0 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -33,19 +33,19 @@ jobs: with: fetch-depth: 0 - - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.6.0 + # - name: Start MongoDB + # uses: supercharge/mongodb-github-action@1.6.0 - - name: Start Sirepo Docker container - uses: NSLS-II/start-sirepo-action@v2 - with: - docker-binary: docker + # - name: Start Sirepo Docker container + # uses: NSLS-II/start-sirepo-action@v2 + # with: + # docker-binary: docker - - name: Copy databroker config file - run: | - set -vxeuo pipefail - mkdir -v -p ~/.config/databroker/ - wget https://raw.githubusercontent.com/NSLS-II/sirepo-bluesky/main/examples/local.yml -O ~/.config/databroker/local.yml + # - name: Copy databroker config file + # run: | + # set -vxeuo pipefail + # mkdir -v -p ~/.config/databroker/ + # wget https://raw.githubusercontent.com/NSLS-II/sirepo-bluesky/main/examples/local.yml -O ~/.config/databroker/local.yml - name: Set up Python ${{ matrix.python-version }} with conda uses: conda-incubator/setup-miniconda@v2 @@ -57,19 +57,29 @@ jobs: mamba-version: "*" channels: conda-forge - - name: Install documentation-building requirements + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install documentation-building requirements with apt/dpkg + run: | + set -vxeuo pipefail + wget --progress=dot:giga "https://github.com/jgm/pandoc/releases/download/3.1.6.1/pandoc-3.1.6.1-1-amd64.deb" -O /tmp/pandoc.deb + sudo dpkg -i /tmp/pandoc.deb + # conda install -c conda-forge -y pandoc + which pandoc + pandoc --version + + - name: Install documentation-building requirements with pip run: | # For reference: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html. set -vxeo pipefail - conda env list - # mamba install -c conda-forge shadow3 srwpy pandoc - mamba install -c conda-forge pandoc pip install --upgrade pip wheel pip install -v . pip install -r requirements-dev.txt pip list - conda list - name: Build Docs run: make -C docs/ html diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index f953f1e..2304149 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -32,45 +32,47 @@ jobs: - name: Checkout the code uses: actions/checkout@v3 - - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.6.0 + # - name: Start MongoDB + # uses: supercharge/mongodb-github-action@1.6.0 - - name: Start Sirepo Docker container - uses: NSLS-II/start-sirepo-action@v2 - with: - docker-binary: docker + # - name: Start Sirepo Docker container + # uses: NSLS-II/start-sirepo-action@v2 + # with: + # docker-binary: docker - - name: Copy databroker config file - run: | - set -vxeuo pipefail - mkdir -v -p ~/.config/databroker/ - wget https://raw.githubusercontent.com/NSLS-II/sirepo-bluesky/main/examples/local.yml -O ~/.config/databroker/local.yml + # - name: Copy databroker config file + # run: | + # set -vxeuo pipefail + # mkdir -v -p ~/.config/databroker/ + # wget https://raw.githubusercontent.com/NSLS-II/sirepo-bluesky/main/examples/local.yml -O ~/.config/databroker/local.yml + + # - name: Set up Python ${{ matrix.python-version }} with conda + # uses: conda-incubator/setup-miniconda@v2 + # with: + # activate-environment: ${{ env.REPOSITORY_NAME }}-py${{ matrix.python-version }} + # auto-update-conda: true + # miniconda-version: "latest" + # python-version: ${{ matrix.python-version }} + # mamba-version: "*" + # channels: conda-forge - - name: Set up Python ${{ matrix.python-version }} with conda - uses: conda-incubator/setup-miniconda@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 with: - activate-environment: ${{ env.REPOSITORY_NAME }}-py${{ matrix.python-version }} - auto-update-conda: true - miniconda-version: "latest" python-version: ${{ matrix.python-version }} - mamba-version: "*" - channels: conda-forge - name: Install the package and its dependencies run: | # For reference: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html. set -vxeo pipefail - conda env list - # mamba install -c conda-forge shadow3 srwpy pip install --upgrade pip wheel pip install -v . pip install -r requirements-dev.txt pip list - conda list - name: Test with pytest run: | set -vxeuo pipefail - coverage run -m pytest -vv -s + coverage run -m pytest -vv -s -m test_func coverage report -m diff --git a/bloptools/bayesian/__init__.py b/bloptools/bayesian/__init__.py index d1d643a..b951ce0 100644 --- a/bloptools/bayesian/__init__.py +++ b/bloptools/bayesian/__init__.py @@ -1,5 +1,7 @@ import logging +import os import time as ttime +import uuid import warnings from collections import OrderedDict from collections.abc import Mapping @@ -14,77 +16,58 @@ import pandas as pd import scipy as sp import torch +from botorch.acquisition.objective import ScalarizedPosteriorTransform +from botorch.models.deterministic import GenericDeterministicModel +from botorch.models.model_list_gp_regression import ModelListGP from matplotlib import pyplot as plt from matplotlib.patches import Patch from .. import utils -from . import models -from .acquisition import default_acquisition_plan +from . import acquisition, models +from .acquisition import ACQ_FUNC_CONFIG, default_acquisition_plan from .digestion import default_digestion_function +os.environ["KMP_DUPLICATE_LIB_OK"] = "True" + warnings.filterwarnings("ignore", category=botorch.exceptions.warnings.InputDataWarning) mpl.rc("image", cmap="coolwarm") -DEFAULT_COLOR_LIST = ["dodgerblue", "tomato", "mediumseagreen", "goldenrod"] -DEFAULT_COLORMAP = "turbo" - MAX_TEST_INPUTS = 2**11 - TASK_CONFIG = {} -ACQ_FUNC_CONFIG = { - "quasi-random": { - "identifiers": ["qr", "quasi-random"], - "pretty_name": "Quasi-random", - "description": "Sobol-sampled quasi-random points.", - }, - "expected_mean": { - "identifiers": ["em", "expected_mean"], - "pretty_name": "Expected mean", - "description": "The expected value at each input.", - }, - "expected_improvement": { - "identifiers": ["ei", "expected_improvement"], - "pretty_name": "Expected improvement", - "description": r"The expected value of max(f(x) - \nu, 0), where \nu is the current maximum.", - }, - "probability_of_improvement": { - "identifiers": ["pi", "probability_of_improvement"], - "pretty_name": "Probability of improvement", - "description": "The probability that this input improves on the current maximum.", - }, - "upper_confidence_bound": { - "identifiers": ["ucb", "upper_confidence_bound"], - "default_args": {"z": 2}, - "pretty_name": "Upper confidence bound", - "description": r"The expected value, plus some multiple of the uncertainty (typically \mu + 2\sigma).", - }, -} - TASK_TRANSFORMS = {"log": lambda x: np.log(x)} +DEFAULT_COLOR_LIST = ["dodgerblue", "tomato", "mediumseagreen", "goldenrod"] +DEFAULT_COLORMAP = "turbo" +DEFAULT_SCATTER_SIZE = 16 + def _validate_and_prepare_dofs(dofs): - for dof in dofs: + for i_dof, dof in enumerate(dofs): if not isinstance(dof, Mapping): raise ValueError("Supplied dofs must be an iterable of mappings (e.g. a dict)!") if "device" not in dof.keys(): raise ValueError("Each DOF must have a device!") dof["device"].kind = "hinted" + dof["name"] = dof["device"].name if hasattr(dof["device"], "name") else f"x{i_dof+1}" if "limits" not in dof.keys(): dof["limits"] = (-np.inf, np.inf) dof["limits"] = tuple(np.array(dof["limits"], dtype=float)) + if "tags" not in dof.keys(): + dof["tags"] = [] + # dofs are passive by default dof["kind"] = dof.get("kind", "passive") if dof["kind"] not in ["active", "passive"]: raise ValueError('DOF kinds must be one of "active" or "passive"') + # active dofs are on by default, passive dofs are off by default dof["mode"] = dof.get("mode", "on" if dof["kind"] == "active" else "off") if dof["mode"] not in ["on", "off"]: raise ValueError('DOF modes must be one of "on" or "off"') @@ -106,6 +89,8 @@ def _validate_and_prepare_tasks(tasks): raise ValueError("Supplied tasks must be an iterable of mappings (e.g. a dict)!") if task["kind"] not in ["minimize", "maximize"]: raise ValueError('"mode" must be specified as either "minimize" or "maximize"') + if "name" not in task.keys(): + task["name"] = task["key"] if "weight" not in task.keys(): task["weight"] = 1 if "limits" not in task.keys(): @@ -175,20 +160,26 @@ def __init__( self.digestion = kwargs.get("digestion", default_digestion_function) self.dets = list(np.atleast_1d(kwargs.get("dets", []))) + self.trigger_delay = kwargs.get("trigger_delay", 0) + self.acq_func_config = kwargs.get("acq_func_config", ACQ_FUNC_CONFIG) + self.sample_center_on_init = kwargs.get("sample_center_on_init", False) + self.table = pd.DataFrame() self._initialized = False self._train_models = True self.a_priori_hypers = None - def _subset_inputs_sampler(self, kind=None, mode=None, n=MAX_TEST_INPUTS): + self.plots = {"tasks": {}} + + def reset(self): """ - Returns $n$ quasi-randomly sampled inputs in the bounded parameter space + Reset the agent. """ - transform = self._subset_input_transform(kind=kind, mode=mode) - return transform.untransform(utils.normalized_sobol_sampler(n, d=self._len_subset_dofs(kind=kind, mode=mode))) + self.table = pd.DataFrame() + self._initialized = False def initialize( self, @@ -210,6 +201,11 @@ def initialize( if hypers is not None: self.a_priori_hypers = self.load_hypers(hypers) + if data is not None: + new_table = yield from self.acquire(self._acq_func_bounds.mean(axis=0)) + new_table.loc[:, "acq_func"] = "sample_center_on_init" + self.tell(new_table=new_table, train=False) + if data is not None: if type(data) == str: self.tell(new_table=pd.read_hdf(data, key="table")) @@ -237,33 +233,27 @@ def tell(self, new_table=None, append=True, train=True, **kwargs): self.table = pd.concat([self.table, new_table]) if append else new_table self.table.index = np.arange(len(self.table)) - fitnesses = self.task_fitnesses # computes from self.table - - # update fitness estimates - self.table.loc[:, fitnesses.columns] = fitnesses.values - self.table.loc[:, "total_fitness"] = fitnesses.values.sum(axis=1) - - skew_dims = [tuple(np.arange(self._len_subset_dofs(mode="on")))] + skew_dims = self.latent_dim_tuples if self._initialized: cached_hypers = self.hypers - feasibility = ~fitnesses.isna().any(axis=1) + inputs = self.inputs.loc[:, self._subset_dof_names(mode="on")].values - if not feasibility.sum() >= 2: - raise ValueError("There must be at least two feasible data points per task!") + for i, task in enumerate(self.tasks): + self.table.loc[:, f"{task['key']}_fitness"] = targets = self._get_task_fitness(i) + train_index = ~np.isnan(targets) - inputs = self.inputs.loc[feasibility, self._subset_dof_names(mode="on")].values - train_inputs = torch.tensor(inputs).double() # .unsqueeze(0) + if not train_index.sum() >= 2: + raise ValueError("There must be at least two valid data points per task!") - for task in self.tasks: - targets = self.table.loc[feasibility, f'{task["key"]}_fitness'].values - train_targets = torch.tensor(targets).double().unsqueeze(-1) # .unsqueeze(0) + train_inputs = torch.tensor(inputs[train_index]).double() + train_targets = torch.tensor(targets[train_index]).double().unsqueeze(-1) # .unsqueeze(0) likelihood = gpytorch.likelihoods.GaussianLikelihood( noise_constraint=gpytorch.constraints.Interval( torch.tensor(1e-6).square(), - torch.tensor(1e-2).square(), + torch.tensor(1e0).square(), ), ).double() @@ -278,18 +268,12 @@ def tell(self, new_table=None, append=True, train=True, **kwargs): outcome_transform=outcome_transform, ).double() - # this ensures that we have equal weight between task fitness and feasibility fitness - self.task_scalarization = botorch.acquisition.objective.ScalarizedPosteriorTransform( - weights=torch.tensor([*torch.ones(self.n_tasks), self.fitness_variance.sum().sqrt()]).double(), - offset=0, - ) - dirichlet_likelihood = gpytorch.likelihoods.DirichletClassificationLikelihood( - torch.tensor(feasibility).long(), learn_additional_noise=True + self.all_tasks_valid.long(), learn_additional_noise=True ).double() self.classifier = models.LatentDirichletClassifier( - train_inputs=torch.tensor(self.inputs.values).double(), + train_inputs=torch.tensor(inputs).double(), train_targets=dirichlet_likelihood.transformed_targets.transpose(-1, -2).double(), skew_dims=skew_dims, likelihood=dirichlet_likelihood, @@ -309,66 +293,60 @@ def tell(self, new_table=None, append=True, train=True, **kwargs): else: raise RuntimeError("Could not fit model on initialization!") - feasibility_fitness_model = botorch.models.deterministic.GenericDeterministicModel( - f=lambda X: -self.classifier.log_prob(X).square() - ) - - self.model_list = botorch.models.model.ModelList(*[task["model"] for task in self.tasks], feasibility_fitness_model) + self.constraint = GenericDeterministicModel(f=lambda x: self.classifier.probabilities(x)[..., -1].squeeze(-1)) @property - def task_fitnesses(self): - df = pd.DataFrame(index=self.table.index) - for task in self.tasks: - name = f'{task["key"]}_fitness' + def model(self): + """ + A model encompassing all the tasks. A single GP in the single-task case, or a model list. + """ + return ModelListGP(*[task["model"] for task in self.tasks]) if self.num_tasks > 1 else self.tasks[0]["model"] - df.loc[:, name] = task["weight"] * self.table.loc[:, task["key"]] + def _get_task_fitness(self, task_index): + """ + Returns the fitness for a task given the task index. + """ + task = self.tasks[task_index] - # check that task values are inside acceptable values - valid = (df.loc[:, name] > task["limits"][0]) & (df.loc[:, name] < task["limits"][1]) + targets = self.table.loc[:, task["key"]].values.copy() - # transform if needed - if "transform" in task.keys(): - if task["transform"] == "log": - valid &= df.loc[:, name] > 0 - df.loc[valid, name] = np.log(df.loc[valid, name]) - df.loc[~valid, name] = np.nan + # check that task values are inside acceptable values + valid = (targets > task["limits"][0]) & (targets < task["limits"][1]) + targets = np.where(valid, targets, np.nan) - if task["kind"] == "minimize": - df.loc[valid, name] *= -1 - return df + # transform if needed + if "transform" in task.keys(): + if task["transform"] == "log": + targets = np.where(targets > 0, np.log(targets), np.nan) - def _dof_kind_mask(self, kind=None): - return [dof["kind"] == kind if kind is not None else True for dof in self.dofs] + if task["kind"] == "minimize": + targets *= -1 - def _dof_mode_mask(self, mode=None): - return [dof["mode"] == mode if mode is not None else True for dof in self.dofs] - - def _dof_mask(self, kind=None, mode=None): - return [(k and m) for k, m in zip(self._dof_kind_mask(kind), self._dof_mode_mask(mode))] - - def _subset_dofs(self, kind=None, mode=None): - return [dof for dof, m in zip(self.dofs, self._dof_mask(kind, mode)) if m] + return targets - def _len_subset_dofs(self, kind=None, mode=None): - return len(self._subset_dofs(kind, mode)) - - def _subset_devices(self, kind=None, mode=None): - return [dof["device"] for dof in self._subset_dofs(kind, mode)] + @property + def fitnesses(self): + """ + Returns a (num_tasks x n_obs) array of fitnesses + """ + return torch.cat([torch.tensor(self._get_task_fitness(i))[..., None] for i in range(self.num_tasks)], dim=1) - def _read_subset_devices(self, kind=None, mode=None): - return [device.read()[device.name]["value"] for device in self._subset_devices(kind, mode)] + @property + def scalarized_fitness(self): + return (self.fitnesses * self.task_weights).sum(axis=-1) - def _subset_dof_names(self, kind=None, mode=None): - return [device.name for device in self._subset_devices(kind, mode)] + @property + def best_scalarized_fitness(self): + f = self.scalarized_fitness + return np.where(np.isnan(f), -np.inf, f).max() - def _subset_dof_limits(self, kind=None, mode=None): - dofs_subset = self._subset_dofs(kind, mode) - if len(dofs_subset) > 0: - return torch.tensor([dof["limits"] for dof in dofs_subset], dtype=torch.float64).T - return torch.empty((2, 0)) + @property + def all_tasks_valid(self): + return ~torch.isnan(self.scalarized_fitness) - def test_inputs(self, n=MAX_TEST_INPUTS): - return utils.sobol_sampler(self._acq_func_bounds, n=n) + @property + def target_names(self): + return [f'{task["key"]}_fitness' for task in self.tasks] @property def test_inputs_grid(self): @@ -397,7 +375,7 @@ def _acq_func_bounds(self): ).T @property - def n_tasks(self): + def num_tasks(self): return len(self.tasks) @property @@ -420,14 +398,79 @@ def task_weights(self): def task_signs(self): return torch.tensor([(1 if task["kind"] == "maximize" else -1) for task in self.tasks], dtype=torch.long) - def _subset_input_transform(self, kind=None, mode=None): - limits = self._subset_dof_limits(kind, mode) + def _dof_kind_mask(self, kind=None): + return [dof["kind"] == kind if kind is not None else True for dof in self.dofs] + + def _dof_mode_mask(self, mode=None): + return [dof["mode"] == mode if mode is not None else True for dof in self.dofs] + + def _dof_tags_mask(self, tags=[]): + return [np.isin(dof["tags"], tags).any() if tags else True for dof in self.dofs] + + def _dof_mask(self, kind=None, mode=None, tags=[]): + return [ + (k and m and t) + for k, m, t in zip(self._dof_kind_mask(kind), self._dof_mode_mask(mode), self._dof_tags_mask(tags)) + ] + + def activate_dofs(self, kind=None, mode=None, tags=[]): + for dof in self._subset_dofs(kind, mode, tags): + dof["mode"] = "on" + + def deactivate_dofs(self, kind=None, mode=None, tags=[]): + for dof in self._subset_dofs(kind, mode, tags): + dof["mode"] = "off" + + def _subset_dofs(self, kind=None, mode=None, tags=[]): + return [dof for dof, m in zip(self.dofs, self._dof_mask(kind, mode, tags)) if m] + + def _len_subset_dofs(self, kind=None, mode=None, tags=[]): + return len(self._subset_dofs(kind, mode, tags)) + + def _subset_devices(self, kind=None, mode=None, tags=[]): + return [dof["device"] for dof in self._subset_dofs(kind, mode, tags)] + + def _read_subset_devices(self, kind=None, mode=None, tags=[]): + return [device.read()[device.name]["value"] for device in self._subset_devices(kind, mode, tags)] + + def _subset_dof_names(self, kind=None, mode=None, tags=[]): + return [device.name for device in self._subset_devices(kind, mode, tags)] + + def _subset_dof_limits(self, kind=None, mode=None, tags=[]): + dofs_subset = self._subset_dofs(kind, mode, tags) + if len(dofs_subset) > 0: + return torch.tensor([dof["limits"] for dof in dofs_subset], dtype=torch.float64).T + return torch.empty((2, 0)) + + @property + def latent_dim_tuples(self): + """ + Returns a list of tuples, where each tuple represent a group of dimension to find a latent representation of. + """ + + latent_dim_labels = [dof.get("latent_group", str(uuid.uuid4())) for dof in self._subset_dofs(mode="on")] + u, uinv = np.unique(latent_dim_labels, return_inverse=True) + + return [tuple(np.where(uinv == i)[0]) for i in range(len(u))] + + def test_inputs(self, n=MAX_TEST_INPUTS): + return utils.sobol_sampler(self._acq_func_bounds, n=n) + + def _subset_input_transform(self, kind=None, mode=None, tags=[]): + limits = self._subset_dof_limits(kind, mode, tags) offset = limits.min(dim=0).values coefficient = limits.max(dim=0).values - offset return botorch.models.transforms.input.AffineInputTransform( d=limits.shape[-1], coefficient=coefficient, offset=offset ) + def _subset_inputs_sampler(self, kind=None, mode=None, tags=[], n=MAX_TEST_INPUTS): + """ + Returns $n$ quasi-randomly sampled inputs in the bounded parameter space + """ + transform = self._subset_input_transform(kind, mode, tags) + return transform.untransform(utils.normalized_sobol_sampler(n, d=self._len_subset_dofs(kind, mode, tags))) + def save_data(self, filepath="./self_data.h5"): """ Save the sampled inputs and targets of the agent to a file, which can be used @@ -506,51 +549,78 @@ def acq_func_info(self): print("\n\n".join(entries)) - def get_acquisition_function(self, acq_func_identifier="ei", return_metadata=False, acq_func_args={}, **kwargs): + def get_acquisition_function(self, acq_func_identifier="ei", return_metadata=False, **acq_func_kwargs): + """ + Generates an acquisition function from a supplied identifier. + """ + if not self._initialized: raise RuntimeError( f'Can\'t construct acquisition function "{acq_func_identifier}" (the agent is not initialized!)' ) - if acq_func_identifier.lower() in ACQ_FUNC_CONFIG["expected_improvement"]["identifiers"]: - acq_func = botorch.acquisition.analytic.LogExpectedImprovement( - self.model_list, - best_f=self.scalarized_fitness.max(), - posterior_transform=self.task_scalarization, - **kwargs, + acq_func_name = None + for _acq_func_name in ACQ_FUNC_CONFIG.keys(): + if acq_func_identifier.lower() in ACQ_FUNC_CONFIG[_acq_func_name]["identifiers"]: + acq_func_name = _acq_func_name + + if acq_func_name is None: + raise ValueError(f'Unrecognized acquisition function "{acq_func_identifier}".') + + if ACQ_FUNC_CONFIG[acq_func_name]["multitask_only"] and (self.num_tasks == 1): + raise ValueError(f'Acquisition function "{acq_func_name}" is only for multi-task optimization problems!') + + if acq_func_name == "expected_improvement": + acq_func = acquisition.ConstrainedLogExpectedImprovement( + constraint=self.constraint, + model=self.model, + best_f=self.best_scalarized_fitness, + posterior_transform=ScalarizedPosteriorTransform(weights=self.task_weights, offset=0), + ) + acq_func_meta = {"name": acq_func_name, "args": {}} + + elif acq_func_name == "probability_of_improvement": + acq_func = acquisition.ConstrainedLogProbabilityOfImprovement( + constraint=self.constraint, + model=self.model, + best_f=self.best_scalarized_fitness, + posterior_transform=ScalarizedPosteriorTransform(weights=self.task_weights, offset=0), ) - acq_func_meta = {"name": "expected improvement", "args": {}} - - elif acq_func_identifier.lower() in ACQ_FUNC_CONFIG["probability_of_improvement"]["identifiers"]: - acq_func = botorch.acquisition.analytic.LogProbabilityOfImprovement( - self.model_list, - best_f=self.scalarized_fitness.max(), - posterior_transform=self.task_scalarization, - **kwargs, + acq_func_meta = {"name": acq_func_name, "args": {}} + + elif acq_func_name == "lower_bound_max_value_entropy": + acq_func = acquisition.qConstrainedLowerBoundMaxValueEntropy( + constraint=self.constraint, + model=self.model, + candidate_set=self.test_inputs(n=1024).squeeze(1), ) - acq_func_meta = {"name": "probability of improvement", "args": {}} - - elif acq_func_identifier.lower() in ACQ_FUNC_CONFIG["expected_mean"]["identifiers"]: - acq_func = botorch.acquisition.analytic.UpperConfidenceBound( - self.model_list, - beta=0, - posterior_transform=self.task_scalarization, - **kwargs, + acq_func_meta = {"name": acq_func_name, "args": {}} + + elif acq_func_name == "noisy_expected_hypervolume_improvement": + acq_func = acquisition.qConstrainedNoisyExpectedHypervolumeImprovement( + constraint=self.constraint, + model=self.model, + ref_point=self.train_targets.min(dim=0).values, + X_baseline=self.train_inputs, + prune_baseline=True, ) - acq_func_meta = {"name": "expected mean"} + acq_func_meta = {"name": acq_func_name, "args": {}} - elif acq_func_identifier.lower() in ACQ_FUNC_CONFIG["upper_confidence_bound"]["identifiers"]: - beta = ACQ_FUNC_CONFIG["upper_confidence_bound"]["default_args"]["z"] ** 2 - acq_func = botorch.acquisition.analytic.UpperConfidenceBound( - self.model_list, + elif acq_func_name == "upper_confidence_bound": + config = ACQ_FUNC_CONFIG["upper_confidence_bound"] + beta = acq_func_kwargs.get("beta", config["default_args"]["beta"]) + + acq_func = acquisition.ConstrainedUpperConfidenceBound( + constraint=self.constraint, + model=self.model, beta=beta, - posterior_transform=self.task_scalarization, - **kwargs, + posterior_transform=ScalarizedPosteriorTransform(weights=self.task_weights, offset=0), ) - acq_func_meta = {"name": "upper confidence bound", "args": {"beta": beta}} + acq_func_meta = {"name": acq_func_name, "args": {"beta": beta}} - else: - raise ValueError(f'Unrecognized acquisition acq_func_identifier "{acq_func_identifier}".') + elif acq_func_name == "expected_mean": + acq_func = self.get_acquisition_function(acq_func_identifier="ucb", beta=0, return_metadata=False) + acq_func_meta = {"name": acq_func_name, "args": {}} return (acq_func, acq_func_meta) if return_metadata else acq_func @@ -584,8 +654,8 @@ def ask(self, acq_func_identifier="ei", n=1, route=True, return_metadata=False): active_X = np.concatenate(active_x_list, axis=0) self.forget(self.table.index[-(n - 1) :]) - if route: - active_X = active_X[utils.route(self._read_subset_devices(kind="active", mode="on"), active_X)] + if route: + active_X = active_X[utils.route(self._read_subset_devices(kind="active", mode="on"), active_X)] return (active_X, acq_func_meta) if return_metadata else active_X @@ -605,7 +675,7 @@ def ask_single( ) BATCH_SIZE = 1 - NUM_RESTARTS = 8 + NUM_RESTARTS = 4 RAW_SAMPLES = 256 candidates, _ = botorch.optim.optimize_acqf( @@ -639,7 +709,10 @@ def acquire(self, active_inputs): passive_devices = [*self._subset_devices(kind="passive"), *self._subset_devices(kind="active", mode="off")] uid = yield from self.acquisition_plan( - active_devices, active_inputs.astype(float), [*self.dets, *passive_devices] + active_devices, + active_inputs.astype(float), + [*self.dets, *passive_devices], + delay=self.trigger_delay, ) products = self.digestion(self.db, uid) @@ -668,6 +741,7 @@ def learn( n_iter=1, n_per_iter=1, reuse_hypers=True, + train=True, upsample=1, verbose=True, plots=[], @@ -678,40 +752,33 @@ def learn( It should be passed to a Bluesky RunEngine. """ - for iteration in range(n_iter): + for i in range(n_iter): x, acq_func_meta = self.ask( n=n_per_iter, acq_func_identifier=acq_func_identifier, return_metadata=True, **kwargs ) new_table = yield from self.acquire(x) - new_table.loc[:, "acq_func"] = acq_func_meta["name"] - - self.tell(new_table=new_table, reuse_hypers=reuse_hypers) + self.tell(new_table=new_table, train=train) @property def inputs(self): return self.table.loc[:, self._subset_dof_names(mode="on")].astype(float) @property - def fitness_variance(self): - return torch.tensor(np.nanvar(self.task_fitnesses.values, axis=0)) - - @property - def scalarized_fitness(self): - return self.task_fitnesses.sum(axis=1) + def best_inputs(self): + return self.inputs.values[np.nanargmax(self.scalarized_fitness)] - # @property - # def best_sum_of_tasks_inputs(self): - # return self.inputs[np.nanargmax(self.task_fitnesses.sum(axis=1))] - - @property def go_to(self, inputs): - yield from bps.mv(*[_ for items in zip(self._subset_dofs(kind="active"), np.atleast_1d(inputs).T) for _ in items]) + args = [] + for dof, value in zip(self._subset_dofs(mode="on"), np.atleast_1d(inputs).T): + if dof["kind"] == "active": + args.append(dof["device"]) + args.append(value) + yield from bps.mv(*args) - # @property - # def go_to_best_sum_of_tasks(self): - # yield from self.go_to(self.best_sum_of_tasks_inputs) + def go_to_best(self): + yield from self.go_to(self.best_inputs) def plot_tasks(self, **kwargs): if self._len_subset_dofs(kind="active", mode="on") == 1: @@ -721,9 +788,9 @@ def plot_tasks(self, **kwargs): def _plot_tasks_one_dof(self, size=16, lw=1e0): self.task_fig, self.task_axes = plt.subplots( - self.n_tasks, + self.num_tasks, 1, - figsize=(6, 4 * self.n_tasks), + figsize=(6, 4 * self.num_tasks), sharex=True, constrained_layout=True, ) @@ -731,6 +798,8 @@ def _plot_tasks_one_dof(self, size=16, lw=1e0): self.task_axes = np.atleast_1d(self.task_axes) for itask, task in enumerate(self.tasks): + task_plots = self.plots["tasks"][task["name"]] = {} + color = DEFAULT_COLOR_LIST[itask] self.task_axes[itask].set_ylabel(task["key"]) @@ -740,12 +809,7 @@ def _plot_tasks_one_dof(self, size=16, lw=1e0): task_mean = task_posterior.mean.detach().numpy() task_sigma = task_posterior.variance.sqrt().detach().numpy() - self.task_axes[itask].scatter( - self.inputs.loc[:, self._subset_dof_names(kind="active", mode="on")], - self.table.loc[:, f'{task["key"]}_fitness'], - s=size, - color=color, - ) + task_plots["sampled"] = self.task_axes[itask].scatter([], [], s=size, color=color) on_dofs_are_active_mask = [dof["kind"] == "active" for dof in self._subset_dofs(mode="on")] @@ -766,9 +830,9 @@ def _plot_tasks_many_dofs(self, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL gridded = self._len_subset_dofs(kind="active", mode="on") == 2 self.task_fig, self.task_axes = plt.subplots( - self.n_tasks, + len(self.tasks), 3, - figsize=(10, 4 * self.n_tasks), + figsize=(10, 4 * len(self.tasks)), sharex=True, sharey=True, constrained_layout=True, @@ -777,10 +841,8 @@ def _plot_tasks_many_dofs(self, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL self.task_axes = np.atleast_2d(self.task_axes) # self.task_fig.suptitle(f"(x,y)=({self.dofs[axes[0]].name},{self.dofs[axes[1]].name})") - feasible = ~self.task_fitnesses.isna().any(axis=1) - for itask, task in enumerate(self.tasks): - sampled_fitness = np.where(feasible, self.table.loc[:, f'{task["key"]}_fitness'].values, np.nan) + sampled_fitness = np.where(self.all_tasks_valid, self.table.loc[:, f'{task["key"]}_fitness'].values, np.nan) task_vmin, task_vmax = np.nanpercentile(sampled_fitness, q=[1, 99]) task_norm = mpl.colors.Normalize(task_vmin, task_vmax) @@ -808,16 +870,16 @@ def _plot_tasks_many_dofs(self, axes=[0, 1], shading="nearest", cmap=DEFAULT_COL if not x.ndim == 3: raise ValueError() self.task_axes[itask, 1].pcolormesh( - x[..., 0], - x[..., 1], + x[..., 0].detach().numpy(), + x[..., 1].detach().numpy(), task_mean[..., 0].detach().numpy(), shading=shading, cmap=cmap, norm=task_norm, ) sigma_ax = self.task_axes[itask, 2].pcolormesh( - x[..., 0], - x[..., 1], + x[..., 0].detach().numpy(), + x[..., 1].detach().numpy(), task_sigma[..., 0].detach().numpy(), shading=shading, cmap=cmap, @@ -917,8 +979,8 @@ def _plot_acq_many_dofs( if gridded: self.acq_axes[iacq_func].set_title(acq_func_meta["name"]) obj_ax = self.acq_axes[iacq_func].pcolormesh( - x[..., 0], - x[..., 1], + x[..., 0].detach().numpy(), + x[..., 1].detach().numpy(), obj.detach().numpy(), shading=shading, cmap=cmap, @@ -940,37 +1002,39 @@ def _plot_acq_many_dofs( ax.set_xlim(*self._subset_dofs(kind="active", mode="on")[axes[0]]["limits"]) ax.set_ylim(*self._subset_dofs(kind="active", mode="on")[axes[1]]["limits"]) - def plot_feasibility(self, **kwargs): + def plot_validity(self, **kwargs): if self._len_subset_dofs(kind="active", mode="on") == 1: - self._plot_feas_one_dof(**kwargs) + self._plot_valid_one_dof(**kwargs) else: - self._plot_feas_many_dofs(**kwargs) + self._plot_valid_many_dofs(**kwargs) - def _plot_feas_one_dof(self, size=16, lw=1e0): - self.feas_fig, self.feas_ax = plt.subplots(1, 1, figsize=(4, 4), sharex=True, constrained_layout=True) + def _plot_valid_one_dof(self, size=16, lw=1e0): + self.valid_fig, self.valid_ax = plt.subplots(1, 1, figsize=(4, 4), sharex=True, constrained_layout=True) x = self.test_inputs_grid *input_shape, input_dim = x.shape - log_prob = self.classifier.log_prob(x.reshape(-1, 1, input_dim)).reshape(input_shape) + constraint = self.classifier.probabilities(x.reshape(-1, 1, input_dim))[..., -1].reshape(input_shape) - self.feas_ax.scatter(self.inputs.values, ~self.task_fitnesses.isna().any(axis=1), s=size) + self.valid_ax.scatter(self.inputs.values, self.all_tasks_valid, s=size) on_dofs_are_active_mask = [dof["kind"] == "active" for dof in self._subset_dofs(mode="on")] - self.feas_ax.plot(x[..., on_dofs_are_active_mask].squeeze(), log_prob.exp().detach().numpy(), lw=lw) + self.valid_ax.plot(x[..., on_dofs_are_active_mask].squeeze(), constraint.detach().numpy(), lw=lw) - self.feas_ax.set_xlim(*self._subset_dofs(kind="active", mode="on")[0]["limits"]) + self.valid_ax.set_xlim(*self._subset_dofs(kind="active", mode="on")[0]["limits"]) - def _plot_feas_many_dofs(self, axes=[0, 1], shading="nearest", cmap=DEFAULT_COLORMAP, size=16, gridded=None): - self.feas_fig, self.feas_axes = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True, constrained_layout=True) + def _plot_valid_many_dofs(self, axes=[0, 1], shading="nearest", cmap=DEFAULT_COLORMAP, size=16, gridded=None): + self.valid_fig, self.valid_axes = plt.subplots( + 1, 2, figsize=(8, 4), sharex=True, sharey=True, constrained_layout=True + ) if gridded is None: gridded = self._len_subset_dofs(kind="active", mode="on") == 2 - data_ax = self.feas_axes[0].scatter( + data_ax = self.valid_axes[0].scatter( *self.inputs.values.T[:2], - c=~self.task_fitnesses.isna().any(axis=1), + c=self.all_tasks_valid, s=size, vmin=0, vmax=1, @@ -979,32 +1043,32 @@ def _plot_feas_many_dofs(self, axes=[0, 1], shading="nearest", cmap=DEFAULT_COLO x = self.test_inputs_grid.squeeze() if gridded else self.test_inputs(n=MAX_TEST_INPUTS) *input_shape, input_dim = x.shape - log_prob = self.classifier.log_prob(x.reshape(-1, 1, input_dim)).reshape(input_shape) + constraint = self.classifier.probabilities(x.reshape(-1, 1, input_dim))[..., -1].reshape(input_shape) if gridded: - self.feas_axes[1].pcolormesh( - x[..., 0], - x[..., 1], - log_prob.exp().detach().numpy(), + self.valid_axes[1].pcolormesh( + x[..., 0].detach().numpy(), + x[..., 1].detach().numpy(), + constraint.detach().numpy(), shading=shading, cmap=cmap, vmin=0, vmax=1, ) - # self.acq_fig.colorbar(obj_ax, ax=self.feas_axes[iacq_func], location="bottom", aspect=32, shrink=0.8) + # self.acq_fig.colorbar(obj_ax, ax=self.valid_axes[iacq_func], location="bottom", aspect=32, shrink=0.8) else: - # self.feas_axes.set_title(acq_func_meta["name"]) - self.feas_axes[1].scatter( + # self.valid_axes.set_title(acq_func_meta["name"]) + self.valid_axes[1].scatter( x.detach().numpy()[..., axes[0]], x.detach().numpy()[..., axes[1]], - c=log_prob.exp().detach().numpy(), + c=constraint.detach().numpy(), ) - self.feas_fig.colorbar(data_ax, ax=self.feas_axes[:2], location="bottom", aspect=32, shrink=0.8) + self.valid_fig.colorbar(data_ax, ax=self.valid_axes[:2], location="bottom", aspect=32, shrink=0.8) - for ax in self.feas_axes.ravel(): + for ax in self.valid_axes.ravel(): ax.set_xlim(*self._subset_dofs(kind="active", mode="on")[axes[0]]["limits"]) ax.set_ylim(*self._subset_dofs(kind="active", mode="on")[axes[1]]["limits"]) @@ -1031,9 +1095,9 @@ def plot_history(self, x_key="index", show_all_tasks=False): num_task_plots = 1 if show_all_tasks: - num_task_plots = self.n_tasks + 1 + num_task_plots = self.num_tasks + 1 - self.n_tasks + 1 if self.n_tasks > 1 else 1 + self.num_tasks + 1 if self.num_tasks > 1 else 1 hist_fig, hist_axes = plt.subplots( num_task_plots, 1, figsize=(6, 4 * num_task_plots), sharex=True, constrained_layout=True, dpi=200 @@ -1053,7 +1117,7 @@ def plot_history(self, x_key="index", show_all_tasks=False): hist_axes[itask].plot(x, y, lw=5e-1, c="k") hist_axes[itask].set_ylabel(task["key"]) - y = self.table.total_fitness + y = self.scalarized_fitness cummax_y = np.array([np.nanmax(y[: i + 1]) for i in range(len(y))]) diff --git a/bloptools/bayesian/acquisition.py b/bloptools/bayesian/acquisition.py index 3354d19..e6d3835 100644 --- a/bloptools/bayesian/acquisition.py +++ b/bloptools/bayesian/acquisition.py @@ -1,7 +1,151 @@ +import math + +import bluesky.plan_stubs as bps import bluesky.plans as bp import numpy as np +import torch +from botorch.acquisition.analytic import LogExpectedImprovement, LogProbabilityOfImprovement, UpperConfidenceBound +from botorch.acquisition.max_value_entropy_search import qLowerBoundMaxValueEntropy +from botorch.acquisition.multi_objective.monte_carlo import qNoisyExpectedHypervolumeImprovement + + +def list_scan_with_delay(*args, delay=0, **kwargs): + "Accepts all the normal 'scan' parameters, plus an optional delay." + + def one_nd_step_with_delay(detectors, step, pos_cache): + "This is a copy of bluesky.plan_stubs.one_nd_step with a sleep added." + motors = step.keys() + yield from bps.move_per_step(step, pos_cache) + yield from bps.sleep(delay) + yield from bps.trigger_and_read(list(detectors) + list(motors)) + + kwargs.setdefault("per_step", one_nd_step_with_delay) + uid = yield from bp.list_scan(*args, **kwargs) + return uid -def default_acquisition_plan(dofs, inputs, dets): - uid = yield from bp.list_scan(dets, *[_ for items in zip(dofs, np.atleast_2d(inputs).T) for _ in items]) +def default_acquisition_plan(dofs, inputs, dets, **kwargs): + delay = kwargs.get("delay", 0) + args = [] + for dof, points in zip(dofs, np.atleast_2d(inputs).T): + args.append(dof) + args.append(list(points)) + + uid = yield from list_scan_with_delay(dets, *args, delay=delay) return uid + + +# def sleepy_acquisition_plan(dofs, inputs, dets): + +# args = [] +# for dof, points in zip(dofs, np.atleast_2d(inputs).T): +# args.append(dof) +# args.append(list(points)) + +# for point in inputs: +# args = [] +# for dof, value in zip(dofs, point): +# args.append(dof) +# args.append(value) + +# yield from bps.mv(*args) +# yield from bps.count([*dets, *dofs]) +# yield from bps.sleep(1) + +# return uid + + +ACQ_FUNC_CONFIG = { + "quasi-random": { + "identifiers": ["qr", "quasi-random"], + "pretty_name": "Quasi-random", + "description": "Sobol-sampled quasi-random points.", + "multitask_only": False, + }, + "expected_mean": { + "identifiers": ["em", "expected_mean"], + "pretty_name": "Expected mean", + "multitask_only": False, + "description": "The expected value at each input.", + }, + "expected_improvement": { + "identifiers": ["ei", "expected_improvement"], + "pretty_name": "Expected improvement", + "multitask_only": False, + "description": r"The expected value of max(f(x) - \nu, 0), where \nu is the current maximum.", + }, + "noisy_expected_hypervolume_improvement": { + "identifiers": ["nehvi", "noisy_expected_hypervolume_improvement"], + "pretty_name": "Noisy expected hypervolume improvement", + "multitask_only": True, + "description": r"It's like a big box. How big is the box?", + }, + "lower_bound_max_value_entropy": { + "identifiers": ["lbmve", "lbmes", "gibbon", "lower_bound_max_value_entropy"], + "pretty_name": "Lower bound max value entropy", + "multitask_only": False, + "description": r"Max entropy search, basically", + }, + "probability_of_improvement": { + "identifiers": ["pi", "probability_of_improvement"], + "pretty_name": "Probability of improvement", + "multitask_only": False, + "description": "The probability that this input improves on the current maximum.", + }, + "upper_confidence_bound": { + "identifiers": ["ucb", "upper_confidence_bound"], + "default_args": {"beta": 4}, + "pretty_name": "Upper confidence bound", + "multitask_only": False, + "description": r"The expected value, plus some multiple of the uncertainty (typically \mu + 2\sigma).", + }, +} + + +class ConstrainedUpperConfidenceBound(UpperConfidenceBound): + def __init__(self, constraint, *args, **kwargs): + super().__init__(*args, **kwargs) + self.constraint = constraint + + def forward(self, x): + mean, sigma = self._mean_and_sigma(x) + + p_eff = 0.5 * (1 + torch.special.erf(self.beta.sqrt() / math.sqrt(2))) * torch.clamp(self.constraint(x), min=1e-6) + + return (mean if self.maximize else -mean) + sigma * np.sqrt(2) * torch.special.erfinv(2 * p_eff - 1) + + +class ConstrainedLogExpectedImprovement(LogExpectedImprovement): + def __init__(self, constraint, *args, **kwargs): + super().__init__(*args, **kwargs) + self.constraint = constraint + + def forward(self, x): + return super().forward(x) + self.constraint(x).log() + + +class ConstrainedLogProbabilityOfImprovement(LogProbabilityOfImprovement): + def __init__(self, constraint, *args, **kwargs): + super().__init__(*args, **kwargs) + self.constraint = constraint + + def forward(self, x): + return super().forward(x) + self.constraint(x).log() + + +class qConstrainedNoisyExpectedHypervolumeImprovement(qNoisyExpectedHypervolumeImprovement): + def __init__(self, constraint, *args, **kwargs): + super().__init__(*args, **kwargs) + self.constraint = constraint + + def forward(self, x): + return super().forward(x) * self.constraint(x) + + +class qConstrainedLowerBoundMaxValueEntropy(qLowerBoundMaxValueEntropy): + def __init__(self, constraint, *args, **kwargs): + super().__init__(*args, **kwargs) + self.constraint = constraint + + def forward(self, x): + return super().forward(x) * self.constraint(x) diff --git a/bloptools/bayesian/digestion.py b/bloptools/bayesian/digestion.py index 147d2d3..eed3199 100644 --- a/bloptools/bayesian/digestion.py +++ b/bloptools/bayesian/digestion.py @@ -1,2 +1,4 @@ def default_digestion_function(db, uid): - return db[uid].table(fill=True) + products = db[uid].table(fill=True) + print(products) + return products diff --git a/bloptools/bayesian/models.py b/bloptools/bayesian/models.py index a160703..8733ac8 100644 --- a/bloptools/bayesian/models.py +++ b/bloptools/bayesian/models.py @@ -21,21 +21,14 @@ def __init__(self, train_inputs, train_targets, skew_dims=True, *args, **kwargs) ) -class LatentDirichletClassifier(botorch.models.gp_regression.SingleTaskGP): +class LatentDirichletClassifier(LatentGP): def __init__(self, train_inputs, train_targets, skew_dims=True, *args, **kwargs): - super().__init__(train_inputs, train_targets, *args, **kwargs) - - self.mean_module = gpytorch.means.ConstantMean() - self.covar_module = kernels.LatentKernel( - num_inputs=train_inputs.shape[-1], - num_outputs=train_targets.shape[-1], - skew_dims=skew_dims, - diag_prior=True, - scale=True, - **kwargs - ) + super().__init__(train_inputs, train_targets, skew_dims, *args, **kwargs) - def log_prob(self, x, n_samples=256): + def probabilities(self, x, n_samples=256): + """ + Takes in a (..., m) dimension tensor and returns a (..., n_classes) tensor + """ *input_shape, n_dim = x.shape samples = self.posterior(x.reshape(-1, n_dim)).sample(torch.Size((n_samples,))).exp() - return torch.log((samples / samples.sum(-1, keepdim=True)).mean(0)[:, 1]).reshape(*input_shape, 1) + return (samples / samples.sum(-1, keepdim=True)).mean(0).reshape(*input_shape, -1) diff --git a/bloptools/functions.py b/bloptools/functions.py new file mode 100644 index 0000000..2d7eff3 --- /dev/null +++ b/bloptools/functions.py @@ -0,0 +1,5 @@ +import numpy as np + + +def sigmoid(x): + return 1 / (1 + np.exp(-x)) diff --git a/bloptools/test_functions.py b/bloptools/test_functions.py index d61b7f8..377cf16 100644 --- a/bloptools/test_functions.py +++ b/bloptools/test_functions.py @@ -29,6 +29,34 @@ def constrained_himmelblau(x1, x2): return np.where(x1**2 + x2**2 < 50, himmelblau(x1, x2), np.nan) +def binh_korn(x1, x2): + """ + Binh and Korn function + """ + f1 = 4 * x1**2 + 4 * x2**2 + f2 = (x1 - 5) ** 2 + (x2 - 5) ** 2 + g1 = (x1 - 5) ** 2 + x2**2 <= 25 + g2 = (x1 - 8) ** 2 + (x2 + 3) ** 2 >= 7.7 + + c = g1 & g2 + + return np.where(c, f1, np.nan), np.where(c, f2, np.nan) + + +def binh_korn_digestion(db, uid): + """ + Digests Himmelblau's function into the feedback. + """ + products = db[uid].table() + + for index, entry in products.iterrows(): + f1, f2 = binh_korn(entry.x1, entry.x2) + products.loc[index, "f1"] = f1 + products.loc[index, "f2"] = f2 + + return products + + def skewed_himmelblau(x1, x2): """ Himmelblau's function, with skewed coordinates @@ -77,19 +105,57 @@ def ackley(*x): def gaussian_beam_waist(x1, x2): """ - Simulating a misaligned Gaussian beam. The optimum is at (1, 1, 1, 1) + Simulating a misaligned Gaussian beam. The optimum is at (1, 1) """ return np.sqrt(1 + 0.25 * (x1 - x2) ** 2 + 16 * (x1 + x2 - 2) ** 2) -def himmelblau_digestion(db, uid): +def hartmann6(*x): + X = np.c_[x] + + alpha = np.array([1.0, 1.2, 3.0, 3.2]) + + A = np.array( + [[10, 3, 17, 3.5, 1.7, 8], [0.05, 10, 17, 0.1, 8, 14], [3, 3.5, 1.7, 10, 17, 8], [17, 8, 0.05, 10, 0.1, 14]] + ) + + P = 1e-4 * np.array( + [ + [1312, 1696, 5569, 124, 8283, 5886], + [2329, 4135, 8307, 3736, 1004, 9991], + [2348, 1451, 3522, 2883, 3047, 6650], + [4047, 8828, 8732, 5743, 1091, 381], + ] + ) + + return -(alpha * np.exp(-(A * np.square(X - P)).sum(axis=1))).sum() + + +def kb_tradeoff_2d(x1, x2): + width = np.sqrt(1 + 0.25 * (x1 - x2) ** 2 + 16 * (x1 + x2 - 2) ** 2) + d = np.sqrt(x1**2 + x2**2) + flux = np.exp(-0.5 * np.where(d < 5, np.where(d > -5, 0, d + 5), d - 5) ** 2) + + return width, flux + + +def kb_tradeoff_4d(x1, x2, x3, x4): + x_width = np.sqrt(1 + 0.25 * (x1 - x2) ** 2 + 16 * (x1 + x2 - 2) ** 2) + y_width = np.sqrt(1 + 0.25 * (x3 - x4) ** 2 + 16 * (x3 + x4 - 2) ** 2) + d = np.sqrt(x1**2 + x2**2 + x3**2 + x4**2) + flux = np.exp(-0.5 * np.where(d < 5, np.where(d > -5, 0, d + 5), d - 5) ** 2) + + return x_width, y_width, flux + + +def constrained_himmelblau_digestion(db, uid): """ Digests Himmelblau's function into the feedback. """ products = db[uid].table() for index, entry in products.iterrows(): - products.loc[index, "himmelblau"] = himmelblau(entry.x1, entry.x2) + products.loc[index, "himmelblau"] = constrained_himmelblau(entry.x1, entry.x2) return products diff --git a/bloptools/tests/conftest.py b/bloptools/tests/conftest.py index a02f9ca..07cc958 100644 --- a/bloptools/tests/conftest.py +++ b/bloptools/tests/conftest.py @@ -7,10 +7,6 @@ from bluesky.run_engine import RunEngine from databroker import Broker from ophyd.utils import make_dir_tree -from sirepo_bluesky.madx_handler import MADXFileHandler -from sirepo_bluesky.shadow_handler import ShadowFileHandler -from sirepo_bluesky.sirepo_bluesky import SirepoBluesky -from sirepo_bluesky.srw_handler import SRWFileHandler from bloptools.bayesian import Agent @@ -19,6 +15,41 @@ @pytest.fixture(scope="function") def db(): + """Return a data broker""" + # MongoDB backend: + db = Broker.named("temp") # mongodb backend + try: + databroker.assets.utils.install_sentinels(db.reg.config, version=1) + except Exception: + pass + + return db + + +@pytest.fixture(scope="function") +def RE(db): + loop = asyncio.new_event_loop() + loop.set_debug(True) + RE = RunEngine({}, loop=loop) + RE.subscribe(db.insert) + + bec = best_effort.BestEffortCallback() + RE.subscribe(bec) + + bec.disable_baseline() + bec.disable_heading() + bec.disable_table() + bec.disable_plots() + + return RE + + +@pytest.fixture(scope="function") +def db_with_bluesky(): + from sirepo_bluesky.madx_handler import MADXFileHandler + from sirepo_bluesky.shadow_handler import ShadowFileHandler + from sirepo_bluesky.srw_handler import SRWFileHandler + """Return a data broker""" # MongoDB backend: db = Broker.named("local") # mongodb backend @@ -36,11 +67,11 @@ def db(): @pytest.fixture(scope="function") -def RE(db): +def RE_with_bluesky(db_with_bluesky): loop = asyncio.new_event_loop() loop.set_debug(True) RE = RunEngine({}, loop=loop) - RE.subscribe(db.insert) + RE.subscribe(db_with_bluesky.insert) bec = best_effort.BestEffortCallback() RE.subscribe(bec) @@ -71,7 +102,7 @@ def agent(db): agent = Agent( dofs=dofs, tasks=tasks, - digestion=test_functions.himmelblau_digestion, + digestion=test_functions.constrained_himmelblau_digestion, db=db, verbose=True, tolerate_acquisition_errors=False, @@ -123,15 +154,17 @@ def make_dirs(): _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir) -@pytest.fixture(scope="function") -def srw_tes_simulation(make_dirs): - connection = SirepoBluesky("http://localhost:8000") - data, _ = connection.auth("srw", "00000002") - return connection +# @pytest.fixture(scope="function") +# def srw_tes_simulation(make_dirs): +# from sirepo_bluesky.sirepo_bluesky import SirepoBluesky +# connection = SirepoBluesky("http://localhost:8000") +# data, _ = connection.auth("srw", "00000002") +# return connection -@pytest.fixture(scope="function") -def shadow_tes_simulation(make_dirs): - connection = SirepoBluesky("http://localhost:8000") - data, _ = connection.auth("shadow", "00000002") - return connection +# @pytest.fixture(scope="function") +# def shadow_tes_simulation(make_dirs): +# from sirepo_bluesky.sirepo_bluesky import SirepoBluesky +# connection = SirepoBluesky("http://localhost:8000") +# data, _ = connection.auth("shadow", "00000002") +# return connection diff --git a/bloptools/tests/test_acq_funcs.py b/bloptools/tests/test_acq_funcs.py new file mode 100644 index 0000000..0dcead6 --- /dev/null +++ b/bloptools/tests/test_acq_funcs.py @@ -0,0 +1,27 @@ +import pytest + +from bloptools import devices, test_functions +from bloptools.bayesian import Agent + + +@pytest.mark.test_func +def test_acq_funcs_single_task(RE, db): + dofs = [ + {"device": devices.DOF(name="x1"), "limits": (-8, 8), "kind": "active"}, + {"device": devices.DOF(name="x2"), "limits": (-8, 8), "kind": "active"}, + ] + + tasks = [ + {"key": "himmelblau", "kind": "minimize"}, + ] + + agent = Agent( + dofs=dofs, + tasks=tasks, + digestion=test_functions.constrained_himmelblau_digestion, + db=db, + ) + + RE(agent.initialize("qr", n_init=64)) + RE(agent.learn("ei", n_iter=2)) + RE(agent.learn("pi", n_iter=2)) diff --git a/bloptools/tests/test_bayesian_shadow.py b/bloptools/tests/test_bayesian_shadow.py deleted file mode 100644 index 001f6e8..0000000 --- a/bloptools/tests/test_bayesian_shadow.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest -from sirepo_bluesky.sirepo_ophyd import create_classes - -import bloptools -from bloptools.experiments.sirepo.tes import w9_digestion - - -@pytest.mark.shadow -def test_bayesian_agent_tes_shadow(RE, db, shadow_tes_simulation): - data, schema = shadow_tes_simulation.auth("shadow", "00000002") - classes, objects = create_classes(connection=shadow_tes_simulation) - globals().update(**objects) - - data["models"]["simulation"]["npoint"] = 100000 - data["models"]["watchpointReport12"]["histogramBins"] = 32 - - dofs = [ - {"device": kbv.x_rot, "limits": (-0.1, 0.1), "kind": "active"}, - {"device": kbv.offz, "limits": (-0.5, 0.5), "kind": "active"}, - ] - - tasks = [ - {"key": "flux", "kind": "maximize"}, - {"key": "w9_fwhm_x", "kind": "minimize"}, - {"key": "w9_fwhm_y", "kind": "minimize"}, - ] - - agent = bloptools.bayesian.Agent( - dofs=dofs, - tasks=tasks, - dets=[w9], - digestion=w9_digestion, - db=db, - ) - - RE(agent.initialize("qr", n_init=4)) - - RE(agent.learn("ei", n_iter=2)) - RE(agent.learn("pi", n_iter=2)) - - agent.plot_tasks() diff --git a/bloptools/tests/test_passive_dofs.py b/bloptools/tests/test_passive_dofs.py index f170a79..d6aa6b7 100644 --- a/bloptools/tests/test_passive_dofs.py +++ b/bloptools/tests/test_passive_dofs.py @@ -20,7 +20,7 @@ def test_passive_dofs(RE, db): agent = Agent( dofs=dofs, tasks=tasks, - digestion=test_functions.himmelblau_digestion, + digestion=test_functions.constrained_himmelblau_digestion, db=db, verbose=True, tolerate_acquisition_errors=False, @@ -30,4 +30,4 @@ def test_passive_dofs(RE, db): agent.plot_tasks() agent.plot_acquisition() - agent.plot_feasibility() + agent.plot_validity() diff --git a/bloptools/tests/test_plots.py b/bloptools/tests/test_plots.py index 4675b01..aa1aa42 100644 --- a/bloptools/tests/test_plots.py +++ b/bloptools/tests/test_plots.py @@ -7,4 +7,5 @@ def test_plots(RE, agent): agent.plot_tasks() agent.plot_acquisition() - agent.plot_feasibility() + agent.plot_validity() + agent.plot_history() diff --git a/bloptools/utils.py b/bloptools/utils/__init__.py similarity index 67% rename from bloptools/utils.py rename to bloptools/utils/__init__.py index 67e1ec1..6f3bd85 100644 --- a/bloptools/utils.py +++ b/bloptools/utils/__init__.py @@ -50,7 +50,14 @@ def route(start_point, points): """ total_points = np.r_[np.atleast_2d(start_point), points] - normalized_points = (total_points - total_points.min(axis=0)) / total_points.ptp(axis=0) + points_scale = total_points.ptp(axis=0) + dim_mask = points_scale > 0 + + if dim_mask.sum() == 0: + return np.arange(len(points)) + + normalized_points = (total_points - total_points.min(axis=0))[:, dim_mask] / points_scale[dim_mask] + delay_matrix = np.sqrt(np.square(normalized_points[:, None, :] - normalized_points[None, :, :]).sum(axis=-1)) delay_matrix = (1e4 * delay_matrix).astype(int) # it likes integers idk @@ -137,3 +144,84 @@ def get_principal_component_bounds(image, beam_prop=0.5): y_max, separability, ) + + +def get_beam_bounding_box(image, thresh=0.5): + """ + Returns the bounding box in pixel units of an image, along with a goodness of fit parameter. + This should go off without a hitch as long as beam_prop is less than 1. + """ + + n_y, n_x = image.shape + + if image.sum() == 0: + return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan + + # filter the image + zim = sp.ndimage.median_filter(image.astype(float), size=3) + zim -= np.median(zim, axis=0) + zim -= np.median(zim, axis=1)[:, None] + + x_sum = zim.sum(axis=0) + y_sum = zim.sum(axis=1) + + x_sum_min_val = thresh * x_sum.max() + y_sum_min_val = thresh * y_sum.max() + + gtt_x = x_sum > x_sum_min_val + gtt_y = y_sum > y_sum_min_val + + i_x_min_start = np.where(~gtt_x[:-1] & gtt_x[1:])[0][0] + i_x_max_start = np.where(gtt_x[:-1] & ~gtt_x[1:])[0][-1] + i_y_min_start = np.where(~gtt_y[:-1] & gtt_y[1:])[0][0] + i_y_max_start = np.where(gtt_y[:-1] & ~gtt_y[1:])[0][-1] + + x_min = ( + 0 + if gtt_x[0] + else np.interp(x_sum_min_val, x_sum[[i_x_min_start, i_x_min_start + 1]], [i_x_min_start, i_x_min_start + 1]) + ) + y_min = ( + 0 + if gtt_y[0] + else np.interp(y_sum_min_val, y_sum[[i_y_min_start, i_y_min_start + 1]], [i_y_min_start, i_y_min_start + 1]) + ) + x_max = ( + n_x - 2 + if gtt_x[-1] + else np.interp(x_sum_min_val, x_sum[[i_x_max_start + 1, i_x_max_start]], [i_x_max_start + 1, i_x_max_start]) + ) + y_max = ( + n_y - 2 + if gtt_y[-1] + else np.interp(y_sum_min_val, y_sum[[i_y_max_start + 1, i_y_max_start]], [i_y_max_start + 1, i_y_max_start]) + ) + + return ( + x_min, + x_max, + y_min, + y_max, + ) + + +def best_image_feedback(image): + n_y, n_x = image.shape + + fim = sp.ndimage.median_filter(image, size=3) + + masked_image = fim * (fim - fim.mean() > 0.5 * fim.ptp()) + + x_weight = masked_image.sum(axis=0) + y_weight = masked_image.sum(axis=1) + + x = np.arange(n_x) + y = np.arange(n_y) + + x0 = np.sum(x_weight * x) / np.sum(x_weight) + y0 = np.sum(y_weight * y) / np.sum(y_weight) + + xw = 2 * np.sqrt((np.sum(x_weight * x**2) / np.sum(x_weight) - x0**2)) + yw = 2 * np.sqrt((np.sum(y_weight * y**2) / np.sum(y_weight) - y0**2)) + + return x0, xw, y0, yw diff --git a/bloptools/utils/prepare_re_env.py b/bloptools/utils/prepare_re_env.py new file mode 100644 index 0000000..1a7950b --- /dev/null +++ b/bloptools/utils/prepare_re_env.py @@ -0,0 +1,99 @@ +import argparse +import datetime +import json # noqa F401 + +import bluesky.plan_stubs as bps # noqa F401 +import bluesky.plans as bp # noqa F401 +import databroker +import matplotlib.pyplot as plt +import numpy as np # noqa F401 +from bluesky.callbacks import best_effort +from bluesky.run_engine import RunEngine +from databroker import Broker +from ophyd.utils import make_dir_tree + +DEFAULT_DB_TYPE = "local" +DEFAULT_ROOT_DIR = "/tmp/sirepo-bluesky-data" +DEFAULT_ENV_TYPE = "stepper" +DEFAULT_USE_SIREPO = False + + +def re_env(db_type=DEFAULT_DB_TYPE, root_dir=DEFAULT_ROOT_DIR): + RE = RunEngine({}) + bec = best_effort.BestEffortCallback() + RE.subscribe(bec) + + db = Broker.named(db_type) + try: + databroker.assets.utils.install_sentinels(db.reg.config, version=1) + except Exception: + pass + RE.subscribe(db.insert) + + _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir) + + return dict(RE=RE, db=db, bec=bec) + + +def register_handlers(db, handlers): + for handler_spec, handler_class in handlers.items(): + db.reg.register_handler(handler_spec, handler_class, overwrite=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Prepare bluesky environment") + parser.add_argument( + "-d", + "--db-type", + dest="db_type", + default=DEFAULT_DB_TYPE, + help="Type of databroker ('local', 'temp', etc.)", + ) + parser.add_argument( + "-r", + "--root-dir", + dest="root_dir", + default=DEFAULT_ROOT_DIR, + help="The root dir to create YYYY/MM/DD dir structure.", + ) + + parser.add_argument( + "-s", + "--use-sirepo", + dest="use_sirepo", + default=DEFAULT_USE_SIREPO, + help="The root dir to create YYYY/MM/DD dir structure.", + ) + + env_choices = ["stepper", "flyer"] + parser.add_argument( + "-e", + "--env-type", + dest="env_type", + choices=env_choices, + default=DEFAULT_ENV_TYPE, + help="Type of RE environment.", + ) + + args = parser.parse_args() + kwargs_re = dict(db_type=args.db_type, root_dir=args.root_dir) + ret = re_env(**kwargs_re) + globals().update(**ret) + + if args.use_sirepo: + from sirepo_bluesky.srw_handler import SRWFileHandler + + if args.env_type == "stepper": + from sirepo_bluesky.shadow_handler import ShadowFileHandler + + handlers = {"srw": SRWFileHandler, "SIREPO_FLYER": SRWFileHandler, "shadow": ShadowFileHandler} + plt.ion() + elif args.env_type == "flyer": + from sirepo_bluesky.madx_handler import MADXFileHandler + + handlers = {"srw": SRWFileHandler, "SIREPO_FLYER": SRWFileHandler, "madx": MADXFileHandler} + bec.disable_plots() # noqa: F821 + else: + raise RuntimeError(f"Unknown environment type: {args.env_type}.\nAvailable environment types: {env_choices}") + + register_handlers(db, handlers) # noqa: F821 diff --git a/docs/source/tutorials.rst b/docs/source/tutorials.rst index 3a57df0..297b303 100644 --- a/docs/source/tutorials.rst +++ b/docs/source/tutorials.rst @@ -8,5 +8,3 @@ Tutorials tutorials/constrained-himmelblau.ipynb tutorials/hyperparameters.ipynb tutorials/passive-dofs.ipynb - tutorials/latent-toroid-dimensions.ipynb - tutorials/multi-task-sirepo.ipynb diff --git a/docs/source/tutorials/constrained-himmelblau.ipynb b/docs/source/tutorials/constrained-himmelblau.ipynb index 01c06d8..95f863d 100644 --- a/docs/source/tutorials/constrained-himmelblau.ipynb +++ b/docs/source/tutorials/constrained-himmelblau.ipynb @@ -87,7 +87,9 @@ }, "outputs": [], "source": [ - "%run -i ../../../examples/prepare_bluesky.py # prepare the bluesky environment\n", + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", "\n", "from bloptools import devices\n", "from bloptools.bayesian import Agent\n", @@ -131,7 +133,7 @@ }, "outputs": [], "source": [ - "agent.plot_feasibility()" + "agent.plot_validity()" ] }, { @@ -210,7 +212,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.11.4 ('bluesky')", "language": "python", "name": "python3" }, @@ -224,11 +226,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.4" }, "vscode": { "interpreter": { - "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" + "hash": "eee21ccc240bdddd7cf04478199e20f7257541e2f592ca1a4d34ebdc0225d742" } } }, diff --git a/docs/source/tutorials/hyperparameters.ipynb b/docs/source/tutorials/hyperparameters.ipynb index d37b5c1..e3908f5 100644 --- a/docs/source/tutorials/hyperparameters.ipynb +++ b/docs/source/tutorials/hyperparameters.ipynb @@ -68,7 +68,9 @@ }, "outputs": [], "source": [ - "%run -i ../../../examples/prepare_bluesky.py # prepare the bluesky environment\n", + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", "\n", "from bloptools import devices\n", "from bloptools.bayesian import Agent\n", @@ -129,7 +131,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.11.4 64-bit", "language": "python", "name": "python3" }, @@ -143,11 +145,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.5" }, "vscode": { "interpreter": { - "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" + "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, diff --git a/docs/source/tutorials/introduction.ipynb b/docs/source/tutorials/introduction.ipynb index 1d4eef1..9ccc3b0 100644 --- a/docs/source/tutorials/introduction.ipynb +++ b/docs/source/tutorials/introduction.ipynb @@ -118,7 +118,9 @@ }, "outputs": [], "source": [ - "%run -i ../../../examples/prepare_bluesky.py # prepare the bluesky environment\n", + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", "\n", "from bloptools.bayesian import Agent\n", "\n", @@ -213,7 +215,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.10.12 ('bluesky')", "language": "python", "name": "python3" }, @@ -227,11 +229,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.4" }, "vscode": { "interpreter": { - "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" + "hash": "eee21ccc240bdddd7cf04478199e20f7257541e2f592ca1a4d34ebdc0225d742" } } }, diff --git a/docs/source/tutorials/passive-dofs.ipynb b/docs/source/tutorials/passive-dofs.ipynb index 6f5adea..2ce4245 100644 --- a/docs/source/tutorials/passive-dofs.ipynb +++ b/docs/source/tutorials/passive-dofs.ipynb @@ -26,7 +26,9 @@ "metadata": {}, "outputs": [], "source": [ - "%run -i ../../../examples/prepare_bluesky.py # prepare the bluesky environment\n", + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", "\n", "from bloptools import devices, test_functions\n", "from bloptools.bayesian import Agent\n", @@ -65,7 +67,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.11.4 64-bit", "language": "python", "name": "python3" }, @@ -79,11 +81,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.5" }, "vscode": { "interpreter": { - "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" + "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, diff --git a/docs/source/tutorials/latent-toroid-dimensions.ipynb b/docs/wip/latent-toroid-dimensions.ipynb similarity index 90% rename from docs/source/tutorials/latent-toroid-dimensions.ipynb rename to docs/wip/latent-toroid-dimensions.ipynb index 7814d33..dd93bd1 100644 --- a/docs/source/tutorials/latent-toroid-dimensions.ipynb +++ b/docs/wip/latent-toroid-dimensions.ipynb @@ -20,7 +20,9 @@ }, "outputs": [], "source": [ - "%run -i ../../../examples/prepare_bluesky.py\n", + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", "%run -i ../../../examples/prepare_tes_shadow.py" ] }, @@ -83,14 +85,14 @@ "outputs": [], "source": [ "agent.plot_tasks()\n", - "agent.plot_feasibility()\n", + "agent.plot_validity()\n", "agent.plot_acquisition(strategy=[\"ei\", \"pi\", \"ucb\"])" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.11.5 64-bit", "language": "python", "name": "python3" }, @@ -104,11 +106,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.5" }, "vscode": { "interpreter": { - "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" + "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, diff --git a/docs/source/tutorials/multi-task-sirepo.ipynb b/docs/wip/multi-task-sirepo.ipynb similarity index 91% rename from docs/source/tutorials/multi-task-sirepo.ipynb rename to docs/wip/multi-task-sirepo.ipynb index b965327..906e457 100644 --- a/docs/source/tutorials/multi-task-sirepo.ipynb +++ b/docs/wip/multi-task-sirepo.ipynb @@ -22,20 +22,12 @@ }, "outputs": [], "source": [ - "%run -i ../../../examples/prepare_bluesky.py\n", + "from bloptools.utils import prepare_re_env\n", + "\n", + "%run -i $prepare_re_env.__file__ --db-type=temp\n", "%run -i ../../../examples/prepare_tes_shadow.py" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "8672bf9e", - "metadata": {}, - "outputs": [], - "source": [ - "toroid.offz" - ] - }, { "cell_type": "code", "execution_count": null, @@ -67,7 +59,17 @@ " db=db,\n", ")\n", "\n", - "RE(agent.initialize(\"qr\", n_init=16))" + "RE(agent.initialize(\"qr\", n_init=4))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2409e012", + "metadata": {}, + "outputs": [], + "source": [ + "RE(agent.learn(\"ei\"))" ] }, { @@ -126,7 +128,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.11.4 64-bit", "language": "python", "name": "python3" }, @@ -140,11 +142,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.5" }, "vscode": { "interpreter": { - "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" + "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, diff --git a/requirements-dev.txt b/requirements-dev.txt index 6782e7c..2ec27e7 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -19,5 +19,6 @@ matplotlib nbsphinx numpydoc pandoc +# sirepo-bluesky sphinx-copybutton sphinx_rtd_theme diff --git a/requirements.txt b/requirements.txt index 0977478..e48f1be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,13 @@ -bluesky>=1.6.1 +bluesky botorch databroker gpytorch +h5py matplotlib numpy ophyd ortools scipy -sirepo-bluesky>=0.7.0 tables torch +zict<3.0.0