diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 03d23b4..a72ee2d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -11,20 +11,20 @@ jobs: steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - - name: Install Twine - run: sudo $pythonLocation/bin/python3 -m pip install twine wheel + - name: Install Twine and Build + run: sudo pip install twine build - name: Create the distribution run: | git fetch --prune --unshallow --tags - sudo $pythonLocation/bin/python3 setup.py sdist bdist_wheel + sudo python3 -m build - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 @@ -38,12 +38,12 @@ jobs: shell: bash -l {0} steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install Miniconda uses: conda-incubator/setup-miniconda@v2 @@ -52,28 +52,27 @@ jobs: activate-environment: "" miniconda-version: "latest" - - name: Install the Mamba Dependencies - run: | + - name: Install the Conda Dependencies + run: | conda config --set always_yes yes --set auto_update_conda false conda update conda - conda install mamba -n base -c conda-forge conda install -n base conda-libmamba-solver - mamba install python=3.9 "conda-build=3.21" colorama pip ruamel ruamel.yaml rich mamba jsonschema -c conda-forge - $pythonLocation/bin/python3 -m pip install -e . + conda install python=3.10 conda-build colorama pip ruamel ruamel.yaml rich jsonschema -c conda-forge + git fetch --prune --unshallow --tags + pip install -e . # run install twice due to client-size to ensure all files downloaded # echo yes before login to prevent anaconda bug breaking automation # git tags MUST be fetched otherwise output will be blank # bash variables cannot be used in github actions, must use actions specific syntax and methods - name: Build the Anaconda Package - id: mambabuild + id: condabuild run: | - mamba install anaconda-client - conda config --set anaconda_upload no + conda install anaconda-client + conda config --set anaconda_upload no --set solver libmamba echo yes | anaconda login --username ${{ secrets.ANACONDA_CLOUD_USERNAME }} --password ${{ secrets.ANACONDA_CLOUD_PASSWORD }} - git fetch --prune --unshallow --tags - VERSION_FROM_GIT_TAG=$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-) conda build . -c conda-forge -c stanfordcvxgrp --numpy 1.16.4 - echo '::set-output name=gitversion::$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-)' + VERSION_FROM_GIT_TAG=$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-) conda build . -c anaconda -c conda-forge -c stanfordcvxgrp + echo "gitversion=$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-)" >> $GITHUB_OUTPUT - name: Upload the Anaconda Package id: condaload diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1ae0e00..9ebbf4c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,56 +1,59 @@ name: Main Test + on: - pull_request: - push: - branches: - - master - tags: - - '*' + push: + pull_request: + branches: [main] + types: [opened, reopened] jobs: run-tests: runs-on: ubuntu-latest + environment: test + strategy: + fail-fast: false + matrix: + python-version: [ "3.10", "3.11", "3.12" ] steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: '3.9' + python-version: ${{ matrix.python-version }} - - name: Install Dependencies - run: | - sudo $pythonLocation/bin/python3 -m pip install -r requirements.txt - sudo $pythonLocation/bin/python3 -m pip install pytest - sudo $pythonLocation/bin/python3 -m pip install -e . + - name: Install Python Dependencies + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + uv pip install --system --break-system-packages -r requirements.txt + uv pip install --system --break-system-packages pytest pytest-cov pytest-github-report pytest-github-actions-annotate-failures -# Current unit test is not consistent. Occasionally fails despite usually passing. Needs to be fixed. - name: Run Unit Tests - run: | - $pythonLocation/bin/python3 -m pytest --import-mode=append tests/ + env: + pytest_github_report: true + pytest_verbosity: 2 + run: pytest -v --cov=src --cov-report=xml --cov-report=term-missing --color=yes tests/ test-build-pypi: - needs: run-tests runs-on: ubuntu-latest steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - - name: Install Twine - run: sudo $pythonLocation/bin/python3 -m pip install twine wheel + - name: Install Twine and Build + run: sudo pip install twine build - name: Create the distribution run: | git fetch --prune --unshallow --tags - sudo $pythonLocation/bin/python3 setup.py sdist bdist_wheel + sudo python3 -m build test-build-conda: - needs: run-tests runs-on: ubuntu-latest # sets default shell to remove need for source to run the conda shell defaults: @@ -58,28 +61,28 @@ jobs: shell: bash -l {0} steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-activate-base: true activate-environment: "" miniconda-version: "latest" - - name: Install the Mamba Dependencies - run: | + - name: Install the Conda Dependencies + run: | conda config --set always_yes yes --set auto_update_conda false conda update conda - conda install mamba -n base -c conda-forge conda install -n base conda-libmamba-solver - mamba install python=3.9 "conda-build=3.21" colorama pip ruamel ruamel.yaml rich mamba jsonschema -c conda-forge - $pythonLocation/bin/python3 -m pip install -e . + conda install python=3.10 conda-build colorama pip ruamel ruamel.yaml rich jsonschema -c conda-forge + git fetch --prune --unshallow --tags + pip install -e . # run install twice due to client-size to ensure all files downloaded @@ -87,10 +90,10 @@ jobs: # git tags MUST be fetched otherwise output will be blank # bash variables cannot be used in github actions, must use actions specific syntax and methods - name: Build the Anaconda Package - id: mambabuild + id: condabuild run: | - mamba install anaconda-client - conda config --set anaconda_upload no - git fetch --prune --unshallow --tags - VERSION_FROM_GIT_TAG=$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-) conda build . -c conda-forge -c stanfordcvxgrp --numpy 1.16.4 - echo '::set-output name=gitversion::$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-)' + conda install anaconda-client + conda clean --all + conda config --set anaconda_upload no --set solver libmamba + VERSION_FROM_GIT_TAG=$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-)test conda build . -c conda-forge -c stanfordcvxgrp + echo "gitversion=$(git tag --list "v*[0-9]" --sort=version:refname | tail -1 | cut -c 2-)" >> $GITHUB_OUTPUT diff --git a/conda_recipe/meta.yaml b/conda_recipe/meta.yaml index 6bad3ea..99b3783 100644 --- a/conda_recipe/meta.yaml +++ b/conda_recipe/meta.yaml @@ -5,7 +5,7 @@ package: version: {{ environ.get('VERSION_FROM_GIT_TAG') }} source: - git_url: https://github.com/cvxgrp/signal-decomposition + path: .. # checked out repo instead of main repo branch build: noarch: python @@ -15,32 +15,37 @@ build: requirements: host: - pip - - python >=3.9 - - scipy - - numpy >=1.20 - - scikit-learn - - cvxpy - - matplotlib + - python >=3.6 run: - pip - python - scipy - - numpy >=1.20 + - numpy - scikit-learn - cvxpy - matplotlib + - qss + - clarabel + - pandas + +test: + imports: + - gfosd + commands: + - pip check + requires: + - pip about: home: https://github.com/cvxgrp/signal-decomposition - license: BSD + license: BSD-3-Clause + license_file: LICENSE license_family: BSD - license_file: - summary: Optimzation-based signal decomposition - doc_url: - dev_url: + summary: Optimization-based signal decomposition extra: recipe-maintainers: - - Bennet Myers - - Thistleman + - bmeyers + - thistleman + - pluflou diff --git a/gfosd/__init__.py b/gfosd/__init__.py index 04ff0f2..71cdf70 100644 --- a/gfosd/__init__.py +++ b/gfosd/__init__.py @@ -1 +1,2 @@ +from ._version import version as __version__ from gfosd.problem import Problem \ No newline at end of file diff --git a/gfosd/_version.py b/gfosd/_version.py new file mode 100644 index 0000000..1841dac --- /dev/null +++ b/gfosd/_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '0.0.2.dev30+gac101bd' +__version_tuple__ = version_tuple = (0, 0, 2, 'dev30', 'gac101bd') diff --git a/gfosd/components/__init__.py b/gfosd/components/__init__.py index 5070441..c21e168 100644 --- a/gfosd/components/__init__.py +++ b/gfosd/components/__init__.py @@ -2,9 +2,10 @@ SumAbs, SumHuber, SumQuantile, - SumCard) + SumCard, + SumSquareReference) from gfosd.components.inequality_constraints import Inequality -from gfosd.components.basis_constraints import Basis, Periodic +from gfosd.components.basis_constraints import Basis, Periodic, Fourier from gfosd.components.finite_set import FiniteSet, Boolean from gfosd.components.aggregate import Aggregate from gfosd.components.equality_constraints import (FirstValEqual, diff --git a/gfosd/components/aggregate.py b/gfosd/components/aggregate.py index 8a621cb..50c95a8 100644 --- a/gfosd/components/aggregate.py +++ b/gfosd/components/aggregate.py @@ -37,7 +37,10 @@ def prepare_attributes(self, T, p=1): self._Pz = sp.block_diag([ c._Pz for c in self._gf_list ]) - self._make_q() + # same logic as above but for q + qx = self._gf_list[g_ix]._q[:self._gf_list[g_ix].x_size] + qz = np.concatenate([c._q[self._gf_list[g_ix].x_size:] for c in self._gf_list]) + self._q = np.concatenate([qx, qz]) self._make_r() self._gx = self._make_gx() self._gz = self._make_gz() @@ -53,6 +56,9 @@ def _make_P(self): c._Pz for c in self._gf_list ]) + def _make_r(self): + self._r = np.sum([c._r for c in self._gf_list]) + def _make_gx(self): gx = [] for ix, component in enumerate(self._gf_list): diff --git a/gfosd/components/base_graph_class.py b/gfosd/components/base_graph_class.py index c1eceec..fe2d3ad 100644 --- a/gfosd/components/base_graph_class.py +++ b/gfosd/components/base_graph_class.py @@ -94,10 +94,10 @@ def _make_P(self, size): return sp.dok_matrix(2 * (size,)) def _make_q(self): - self._q = None + self._q = np.zeros(self.x_size + self.z_size) def _make_r(self): - self._r = None + self._r = 0 def _make_g(self, size): return [] diff --git a/gfosd/components/basis_constraints.py b/gfosd/components/basis_constraints.py index 9fcdcad..75851ee 100644 --- a/gfosd/components/basis_constraints.py +++ b/gfosd/components/basis_constraints.py @@ -11,10 +11,11 @@ """ - import numpy as np import scipy.sparse as sp from gfosd.components.base_graph_class import GraphComponent +from gfosd.components.utilities import make_basis_matrix, make_regularization_matrix + class Basis(GraphComponent): def __init__(self, basis, penalty=None, *args, **kwargs): @@ -51,8 +52,8 @@ def _make_g(self, size): else: # typically 'abs', 'huber', or 'quantile' g = [{'g': self._penalty, - 'args': {'weight': self.weight}, - 'range': (0, size)}] + 'args': {'weight': self.weight}, + 'range': (0, size)}] return g def _make_P(self, size): @@ -65,8 +66,32 @@ def _make_P(self, size): P = sp.dok_matrix(2 * (size,)) return P + class Periodic(Basis): def __init__(self, period, *args, **kwargs): self._period = period M = sp.eye(period) super().__init__(M, *args, **kwargs) + + +class Fourier(Basis): + def __init__(self, num_harmonics, length, periods, standing_wave=False, trend=False, max_cross_k=None, + custom_basis=None, weight=1, **kwargs): + _B = make_basis_matrix( + num_harmonics, + length, + periods, + standing_wave, + trend, + max_cross_k, + custom_basis) + _D = make_regularization_matrix( + num_harmonics, + weight, + periods, + standing_wave, + trend, + max_cross_k, + custom_basis + ) + super().__init__(basis=_B, penalty=_D, weight=weight, **kwargs) diff --git a/gfosd/components/equality_constraints.py b/gfosd/components/equality_constraints.py index 8e89bdf..fabf41e 100644 --- a/gfosd/components/equality_constraints.py +++ b/gfosd/components/equality_constraints.py @@ -53,6 +53,34 @@ def _make_B(self): def _make_c(self): self._c = np.concatenate([np.atleast_1d(self._c[-1]), [self._last_val]]) + + +class ValsEqual(GraphComponent): + def __init__(self, indices=0, value=0, *args, **kwargs): + self.indices = np.atleast_1d(indices) + self._val = value + super().__init__(*args, **kwargs) + # always retain helper variable + self._has_helpers = True + + def _make_A(self): + super()._make_A() + super()._make_B() + super()._make_c() + self._A = sp.bmat([ + [self._A.tocsr()[-1]], + [sp.dok_matrix((len(self.indices), self._A.shape[1]))] + ]) + + def _make_B(self): + self._B = sp.bmat([ + [self._B.tocsr()[-1]], #XXXXX got to here! need to update this matrix generating function to identify all indices that are to be set equal + [sp.coo_matrix(([1], ([0], [self._B.shape[1]-1])), shape=(len(self.indices), self._B.shape[1]))] + ]) + + def _make_c(self): + self._c = np.concatenate([np.atleast_1d(self._c[-1]), + [self.val] * len(self.indices)]) class AverageEqual(GraphComponent): def __init__(self, value=0, period=None, *args, **kwargs): diff --git a/gfosd/components/sums.py b/gfosd/components/sums.py index 7b83a79..4656f4c 100644 --- a/gfosd/components/sums.py +++ b/gfosd/components/sums.py @@ -1,6 +1,8 @@ +import numpy as np import scipy.sparse as sp from gfosd.components.base_graph_class import GraphComponent + class SumSquare(GraphComponent): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -9,17 +11,65 @@ def __init__(self, *args, **kwargs): def _make_P(self, size): return self.weight * 2 * sp.eye(size) # note the (1/2) in canonical form! + +class SumSquareReference(GraphComponent): + """ + phi(x;v) = || x - v ||_2^2 = ||x||_2^2 -2v^Tx + ||v||_2^2 + """ + + def __init__(self, vec, mask=None, *args, **kwargs): + self._vec = vec + self._mask = mask + super().__init__(*args, **kwargs) + if self._mask is not None: + self._has_helpers = True + + def _set_z_size(self): + if self._mask is None: + self._z_size = (self._T - self._diff) * self._p + else: + self._z_size = self._mask.shape[0] + + def _make_P(self, size): + return self.weight * 2 * sp.eye(size) # note the (1/2) in canonical form! + + def _make_q(self): + if self._mask is None: + qx = self.weight * (-2) * self._vec + self._q = np.r_[qx, np.zeros(self.z_size)] + else: + qz = (-2) * self.weight * self._mask @ self._vec + self._q = np.r_[np.zeros(self.x_size), qz] + + def _make_r(self): + if self._mask is None: + self._r = self.weight * np.sum(np.square(self._vec)) + else: + self._r = self.weight * np.sum(np.square(self._mask @ self._vec)) + + def _make_A(self): + self._A = self._mask + + class SumAbs(GraphComponent): - def __init__(self, *args, **kwargs): + def __init__(self, weighting_mat=None, *args, **kwargs): + self.weighting_mat = weighting_mat super().__init__(*args, **kwargs) return + def _make_B(self): + if self.weighting_mat is None: + self._B = sp.eye(self._A.shape[0], self.z_size) * -1 + else: + self._B = self.weighting_mat + def _make_g(self, size): g = [{'g': 'abs', 'args': {'weight': self.weight}, 'range': (0, size)}] return g + class SumHuber(GraphComponent): def __init__(self, M=1, *args, **kwargs): self._M = M @@ -32,6 +82,7 @@ def _make_g(self, size): 'range': (0, size)}] return g + class SumQuantile(GraphComponent): def __init__(self, tau, *args, **kwargs): self.tau = tau @@ -44,6 +95,7 @@ def _make_g(self, size): 'range': (0, size)}] return g + class SumCard(GraphComponent): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/gfosd/components/utilities.py b/gfosd/components/utilities.py new file mode 100644 index 0000000..102c1c1 --- /dev/null +++ b/gfosd/components/utilities.py @@ -0,0 +1,270 @@ +""" +This module contains functions to create basis and regularization matrices for +a time series with one or multiple periods. +:author: Bennet Meyers, Mehmet Giray, Aramis Dufour +""" + +import numpy as np +from scipy.sparse import spdiags +from itertools import combinations + + +def make_basis_matrix( + num_harmonics, + length, + periods, + standing_wave=False, + trend=False, + max_cross_k=None, + custom_basis=None, +): + """ + This function creates a basis matrix for a time series with one or multiple + periods. It creates a Fourier basis matrix for each period, then combines + them into a single matrix, including cross-terms. It also includes an offset + term, and can include a linear trend. + + :param num_harmonics: number of harmonics to use for each period. + :type num_harmonics: int or 1D iterable of ints + :param length: length of the time series. + :type length: int + :param periods: periods of the signal, each period yields a Fourier basis matrix. + :type periods: float or 1D iterable of floats + :param standing_wave: whether to use the standing wave basis for each period. + A standing wave basis only includes sine terms and starts at T/2. Defaults + to False. + :type standing_wave: bool or 1D iterable of bools + :param trend: whether to include a linear trend in the basis matrix. Defaults to + False. + :type trend: bool + :param max_cross_k: maximum number of cross terms. Defaults to None. + :type max_cross_k: int + :param custom_basis: allows the user to pass a dictionary of custom basis + matrices. The key is the index of the period and the value is the basis + matrix. The user can input a shorter basis than length, but the associated + period must be an int. Defaults to None. + :type custom_basis: dict + :return: basis matrix, it's shape depends on the parameters. + :rtype 2D array of floats + """ + sort_idx, Ps, num_harmonics, standing_wave = initialize_arrays( + num_harmonics, periods, standing_wave, custom_basis + ) + # Make the basis + t_values = np.arange(length) # Time stamps (row vector) + B_fourier = [] + for ix, P in enumerate(Ps): + i_values = np.arange(1, num_harmonics[ix] + 1)[ + :, np.newaxis + ] # Harmonic indices (column vector) + if standing_wave[ix]: + w = 2 * np.pi / (P * 2) + B_sin = np.sin(i_values * w * np.mod(t_values, P)) + B_f = np.empty((length, num_harmonics[ix]), dtype=float) + B_f[:] = B_sin.T + else: + w = 2 * np.pi / P + B_cos = np.cos(i_values * w * t_values) + B_sin = np.sin(i_values * w * t_values) + B_f = np.empty((length, 2 * num_harmonics[ix]), dtype=float) + B_f[:, ::2] = B_cos.T + B_f[:, 1::2] = B_sin.T + B_fourier.append(B_f) + # Use custom basis if provided + if custom_basis is not None: + for ix, val in custom_basis.items(): + # check length + if val.shape[0] != length: + # extend to cover future time period if necessary + multiplier = max(1, length // val.shape[0] + 1) + new_val = np.tile(val, (multiplier, 1))[:length] + else: + new_val = val[:length] + # also reorder index of custom basis, if necessary + ixt = np.where(sort_idx == ix)[0][0] + B_fourier[ixt] = new_val + # Add offset and linear terms + if trend is False: + B_P0 = np.ones((length, 1)) + B0 = [B_P0] + else: + v = np.sqrt(3) + B_PL = np.linspace(-v, v, length).reshape(-1, 1) + B_P0 = np.ones((length, 1)) + B0 = [B_PL, B_P0] + # Cross terms, this handles the case of no cross terms gracefully + # (empty list) + C = [ + cross_bases(*base_tuple, max_k=max_cross_k) + for base_tuple in combinations(B_fourier, 2) + ] + B_list = B0 + B_fourier + C + B = np.hstack(B_list) + return B + + +def make_regularization_matrix( + num_harmonics, + weight, + periods, + standing_wave=False, + trend=False, + max_cross_k=None, + custom_basis=None, +): + """ + This function creates a regularization matrix for a time series with one or + multiple periods. It creates a regularization matrix for each period, then + combines them into a single matrix. The weights are determined using the + Dirichlet energy of the basis functions. + + :param num_harmonics: number of harmonics to use for each period. + :type num_harmonics: int or 1D iterable of ints + :param weight: weight for the regularization matrix. + :type weight: float + :param periods: periods of signal, each period will yield a bloc. + :type periods: float or 1D iterable of floats + :param standing_wave: whether to use the standing wave basis for each period. + :type standing_wave: bool or 1D iterable of bools + :param trend: whether to include a linear trend in the basis matrix. Defaults + to False. + :type trend: bool + :param max_cross_k: maximum number of cross terms. Defaults to None. + :type max_cross_k: int + :param custom_basis: allows the user to pass a dictionary of custom basis + matrices. + :type custom_basis: dict + :return: regularization matrix, it's shape depends on the parameters. + :rtype 2D array of floats + """ + sort_idx, Ps, num_harmonics, standing_wave = initialize_arrays( + num_harmonics, periods, standing_wave, custom_basis + ) + ls_original = [weight * (2 * np.pi) / np.sqrt(P) for P in Ps] + # Create a sequence of values from 1 to K (repeated for cosine + # and sine when not standing wave) + i_value_list = [] + for ix, nh in enumerate(num_harmonics): + if standing_wave[ix]: + i_value_list.append(np.arange(1, nh + 1)) + else: + i_value_list.append(np.repeat(np.arange(1, nh + 1), 2)) + # Create blocks of coefficients + blocks_original = [iv * lx for iv, lx in zip(i_value_list, ls_original)] + if custom_basis is not None: + for ix, val in custom_basis.items(): + ixt = np.where(sort_idx == ix)[0][0] + blocks_original[ixt] = ls_original[ixt] * np.arange(1, val.shape[1] + 1) + if max_cross_k is not None: + max_cross_k *= 2 + # This assumes that the list of periods is ordered, which is ensured + # when calling initialize_arrays. + blocks_cross = [ + [l2 for l1 in c[0][:max_cross_k] for l2 in c[1][:max_cross_k]] + for c in combinations(blocks_original, 2) + ] + # Combine the blocks to form the coefficient array + if trend is False: + first_block = [np.zeros(1)] + else: + first_block = [np.zeros(2)] + coeff_i = np.concatenate(first_block + blocks_original + blocks_cross) + # Create the diagonal matrix + D = spdiags(coeff_i, 0, coeff_i.size, coeff_i.size) + return D + + +def initialize_arrays(num_harmonics, periods, standing_wave, custom_basis): + """ + This function initializes the arrays for periods, harmonics, and standing wave. + It sorts everything in descending order of periods. It also performs some basic + checks on the input parameters. + + :param num_harmonics: number of harmonics to use for each period. + :type num_harmonics: int or 1D iterable of ints + :param periods: periods of the signal, each period yields a bloc. + :type periods: float or 1D iterable of floats + :param standing_wave: whether to use standing wave basis for each period. + :type standing_wave: bool or 1D iterable of bools + :param custom_basis: allows the user to pass a dictionary of custom basis + matrices. + :type custom_basis: dict + :raises TypeError: raised if custom_basis is not a dictionary or None. + :raises ValueError: raised if num_harmonics and periods have different lengths. + :raises ValueError: raised if standing_wave and periods have different lengths. + :return: tuple with sorted indices, sorted periods, sorted number of harmonics, sorted + standing wave. + :rtype tuple + """ + # Custom basis + if not (isinstance(custom_basis, dict) or custom_basis is None): + raise TypeError( + "custom_basis should be a dictionary where the key is the index\n" + + "of the period and the value is list containing the basis and the weights" + ) + # Periods + Ps = np.atleast_1d(periods) + # Number of harmonics + num_harmonics = np.atleast_1d(num_harmonics) + if len(num_harmonics) == 1: + num_harmonics = np.tile(num_harmonics, len(Ps)) + elif len(num_harmonics) != len(Ps): + raise ValueError( + "Please pass a single number of harmonics for all periods or a number\n" + + "for each period" + ) + # Standing wave + standing_wave = np.atleast_1d(standing_wave) + if len(standing_wave) == 1: + standing_wave = np.tile(standing_wave, len(Ps)) + elif len(standing_wave) != len(Ps): + raise ValueError( + "Please pass a single boolean for standing_wave for all periods or a\n" + + "boolean for each period" + ) + # Sort the periods, harmonics, and standing wave + sort_idx = np.argsort(-Ps) + Ps = -np.sort(-Ps) # Sort in descending order + num_harmonics = num_harmonics[sort_idx] + standing_wave = standing_wave[sort_idx] + return sort_idx, Ps, num_harmonics, standing_wave + + +def cross_bases(B_P1, B_P2, max_k=None): + """ + This function computes the cross terms between two basis matrices. + + :param B_P1: basis matrix for the first period. + :type B_P1: 2D array of floats + :param B_P2: basis matrix for the second period. + :type B_P2: 2D array of floats + :param max_k: maximum number of cross terms. Defaults to None. + :type max_k: int + :return: cross terms matrix. + :rtype 2D array of floats + """ + if max_k is None: + # Reshape both arrays to introduce a new axis for broadcasting + B_P1_new = B_P1[:, :, None] + B_P2_new = B_P2[:, None, :] + else: + B_P1_new = B_P1[:, : 2 * max_k, None] + B_P2_new = B_P2[:, None, : 2 * max_k] + # Use broadcasting to compute the outer product for each row + result = B_P1_new * B_P2_new + # Reshape the result to the desired shape + result = result.reshape(result.shape[0], -1) + return result + + +def pinball_slopes(quantiles): + """ + This function computes the slopes for the pinball loss function. + + :param quantiles: ndarray, 1D array quantiles. + :return: tuple, slopes. + """ + percentiles = np.asarray(quantiles) + a = quantiles - 0.5 + b = (0.5) * np.ones((len(a),)) + return a, b \ No newline at end of file diff --git a/gfosd/problem.py b/gfosd/problem.py index 4992774..b83b2b3 100644 --- a/gfosd/problem.py +++ b/gfosd/problem.py @@ -38,6 +38,8 @@ def make_graph_form(self): Px = sp.block_diag([d['Px'] for d in dicts]) Pz = sp.block_diag([d['Pz'] for d in dicts]) P = sp.block_diag([Px, Pz]) + q = np.concatenate([d['q'] for d in dicts]) # not currently used + r = np.sum([d['r'] for d in dicts]) Al = sp.block_diag([d['A'] for d in dicts]) Ar = sp.block_diag([d['B'] for d in dicts]) A = sp.bmat([[Al, Ar]]) @@ -75,7 +77,7 @@ def make_graph_form(self): g.append(new_d) out = { 'P': P, - 'q': np.zeros(P.shape[0]), # not currently used + 'q': np.zeros(P.shape[0]), 'r': 0, # not currently used 'A': A, 'b': b, diff --git a/osd/masking.py b/osd/masking.py index 6bbded0..28418e2 100644 --- a/osd/masking.py +++ b/osd/masking.py @@ -11,18 +11,19 @@ class Mask(): def __init__(self, use_set): - if len(use_set.shape) == 1: + us = np.atleast_1d(use_set) + if len(us.shape) == 1: p = 1 - T = len(use_set) + T = len(us) else: - T, p, = use_set.shape - self.use_set = use_set + T, p, = us.shape + self.use_set = us self.T = T self.p = p - self.q = np.sum(use_set) - self.M = make_mask_matrix(use_set) - self.M_star = make_inverse_mask_matrix(use_set) - self.MstM = make_masked_identity_matrix(use_set) + self.q = np.sum(us) + self.M = make_mask_matrix(us) + self.M_star = make_inverse_mask_matrix(us) + self.MstM = make_masked_identity_matrix(us) def mask(self, v): if self.p == 1: diff --git a/pyproject.toml b/pyproject.toml index a7a6804..79df062 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,52 @@ [build-system] -requires = ["setuptools>=61.0.0", "wheel"] -build-backend = "setuptools.build_meta" \ No newline at end of file +requires = ["setuptools>=64.0", "setuptools-scm[toml]"] +build-backend = "setuptools.build_meta" + +[project] +name = "sig-decomp" +description = "Optimization-based signal decomposition" +readme = "README.md" +license.file = "LICENSE" +authors = [ + { name = "Bennet Meyers", email = "bennetm@stanford.edu" }, +] +maintainers = [ + { name = "Sara Miskovich", email = "smiskov@slac.stanford.edu" }, +] +keywords = ["convex optimization", "optimization"] +requires-python = ">=3.6" + +dependencies = [ + "cvxpy", + "matplotlib", + "numpy", + "scipy", + "scikit-learn", + "qss", + "pandas" # may want to remove once osd is deprecated +] + +classifiers = [ + "Development Status :: 4 - Beta", + "License :: OSI Approved :: BSD License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering", +] +dynamic = ["version"] +[tool.setuptools_scm] +write_to = "gfosd/_version.py" + +[tool.setuptools.packages.find] +include = ["gfosd*", "osd*"] + +[project.urls] +Homepage = "https://github.com/cvxgrp/signal-decomposition" +"Bug Tracker" = "https://github.com/cvxgrp/signal-decomposition/issues" diff --git a/requirements.txt b/requirements.txt index 62f8eea..a842e08 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,3 +5,4 @@ matplotlib scikit-learn qss clarabel +pandas diff --git a/setup.py b/setup.py deleted file mode 100644 index 30273df..0000000 --- a/setup.py +++ /dev/null @@ -1,40 +0,0 @@ -from setuptools import find_packages, setup -import subprocess - -with open("README.md", "r") as fh: - long_description = fh.read() - -# get all the git tags from the cmd line that follow our versioning pattern -git_tags = subprocess.Popen( - ["git", "tag", "--list", "v*[0-9]", "--sort=version:refname"], - stdout=subprocess.PIPE, -) -tags = git_tags.stdout.read() -git_tags.stdout.close() -tags = tags.decode("utf-8").split("\n") -tags.sort() - -# PEP 440 won't accept the v in front, so here we remove it, strip the new line and decode the byte stream -VERSION_FROM_GIT_TAG = tags[-1][1:] - -setup( - name="sig-decomp", - version=VERSION_FROM_GIT_TAG, - description="Optimzation-based signal decomposition", - long_description=long_description, - long_description_content_type="text/markdown", - setup_requires=["setuptools>=18.0"], - install_requires=[ - "cvxpy", - "matplotlib", - "numpy", - "pandas", - "scipy", - "scikit-learn", - "qss" - ], - packages=find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - ], -)