Skip to content

Commit

Permalink
Update papers.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
louissharrock committed Jul 11, 2024
1 parent e4cc25a commit c549d15
Showing 1 changed file with 43 additions and 19 deletions.
62 changes: 43 additions & 19 deletions _bibliography/papers.bib
Original file line number Diff line number Diff line change
Expand Up @@ -74,22 +74,8 @@ @article{Sharrock2020a
html={https://projecteuclid.org/journals/bernoulli/volume-29/issue-2/Two-timescale-stochastic-gradient-descent-in-continuous-time-with-applications/10.3150/22-BEJ1493.full},
}

@article{Sharrock2022d,
author = {Sharrock, Louis and Simons, Jack and Liu, Song and Beaumont, Mark},
journal = {arXiv preprint},
title = {{Sequential Neural Score Estimation: Likelihood-Free Inference with Conditional Score Based Diffusion Models}},
year = {2022},
altmetric = {false},
abbr={arXiv},
abstract = {We introduce Sequential Neural Posterior Score Estimation (SNPSE) and Sequential Neural Likelihood Score Estimation (SNLSE), two new score-based methods for Bayesian inference in simulator-based models. Our methods, inspired by the success of score-based methods in generative modelling, leverage conditional score-based diffusion models to generate samples from the posterior distribution of interest. These models can be trained using one of two possible objective functions, one of which approximates the score of the intractable likelihood, while the other directly estimates the score of the posterior. We embed these models into a sequential training procedure, which guides simulations using the current approximation of the posterior at the observation of interest, thereby reducing the simulation cost. We validate our methods, as well as their amortised, non-sequential variants, on several numerical examples, demonstrating comparable or superior performance to existing state-of-the-art methods such as Sequential Neural Posterior Estimation (SNPE) and Sequential Neural Likelihood Estimation (SNLE).},
arxiv={2210.04872},
}

@article{Sharrock2023,
archivePrefix = {arXiv},
arxivId = {2301.11294},
author = {Sharrock, Louis and Nemeth, Christopher},
eprint = {2301.11294},
journal = {Proceedings of the 40th International Conference on Machine Learning (ICML 2023)},
title = {{Coin Sampling: Gradient-Based Bayesian Inference without Learning Rates}},
year = {2023},
Expand Down Expand Up @@ -118,14 +104,52 @@ @article{Sharrock2023a
code = {https://github.com/louissharrock/constrained-coin-sampling},
}

@article{Sharrock2023b,
@article{Sharrock2024,
author = {Sharrock, Louis and Dodd, Daniel and Nemeth, Christopher},
journal = {arXiv preprint},
title = {{CoinEM: Tuning-Free Particle-Based Variational Inference for Latent Variable Models}},
year = {2023},
journal = {Proceedings of The 27th International Conference on Artificial Intelligence and Statistics (AISTATS 2024)},
title = {{Tuning-Free Maximum Likelihood Training of Latent Variable Models via Coin Betting}},
year = {2024},
altmetric = {false},
abbr={arXiv},
abbr={AISTATS},
abstract = {We introduce two new particle-based algorithms for learning latent variable models via marginal maximum likelihood estimation, including one which is entirely tuning-free. Our methods are based on the perspective of marginal maximum likelihood estimation as an optimization problem: namely, as the minimization of a free energy functional. One way to solve this problem is to consider the discretization of a gradient flow associated with the free energy. We study one such approach, which resembles an extension of the popular Stein variational gradient descent algorithm. In particular, we establish a descent lemma for this algorithm, which guarantees that the free energy decreases at each iteration. This method, and any other obtained as the discretization of the gradient flow, will necessarily depend on a learning rate which must be carefully tuned by the practitioner in order to ensure convergence at a suitable rate. With this in mind, we also propose another algorithm for optimizing the free energy which is entirely learning rate free, based on coin betting techniques from convex optimization. We validate the performance of our algorithms across a broad range of numerical experiments, including several high-dimensional settings. Our results are competitive with existing particle-based methods, without the need for any hyperparameter tuning.},
arxiv={2305.14916},
html={https://proceedings.mlr.press/v238/sharrock24a.html},
selected = {true},
code = {https://github.com/chris-nemeth/CoinEM},
}

@article{Sharrock2024a,
author = {Sharrock, Louis and Simons, Jack and Liu, Song and Beaumont, Mark},
journal = {Proceedings of the 41st International Conference on Machine Learning (ICML 2024)},
title = {{Sequential Neural Score Estimation: Likelihood-Free Inference with Conditional Score Based Diffusion Models}},
year = {2024},
altmetric = {false},
abbr={ICML},
abstract = {We introduce Sequential Neural Posterior Score Estimation (SNPSE) and Sequential Neural Likelihood Score Estimation (SNLSE), two new score-based methods for Bayesian inference in simulator-based models. Our methods, inspired by the success of score-based methods in generative modelling, leverage conditional score-based diffusion models to generate samples from the posterior distribution of interest. These models can be trained using one of two possible objective functions, one of which approximates the score of the intractable likelihood, while the other directly estimates the score of the posterior. We embed these models into a sequential training procedure, which guides simulations using the current approximation of the posterior at the observation of interest, thereby reducing the simulation cost. We validate our methods, as well as their amortised, non-sequential variants, on several numerical examples, demonstrating comparable or superior performance to existing state-of-the-art methods such as Sequential Neural Posterior Estimation (SNPE) and Sequential Neural Likelihood Estimation (SNLE).},
arxiv={2210.04872},
html={https://openreview.net/forum?id=8viuf9PdzU},
selected={true},
code={https://github.com/jacksimons15327/snpse_icml},
}

@article{Dodd2024,
author = {Dodd, Daniel and Sharrock, Louis and Nemeth, Christopher},
journal = {Proceedings of the 41st International Conference on Machine Learning (ICML 2024)},
title = {{Learning-Rate-Free Stochastic Optimization over Riemannian Manifolds}},
year = {2024},
altmetric = {false},
abbr = {ICML},
arxiv = {2406.02296},
html = {https://openreview.net/forum?id=eY98MVffrD},
}

@article{Cabezas2024,
author = {Cabezas, Alberto and Sharrock, Louis and Nemeth, Christopher},
journal = {arXiv preprint},
title = {{Markovian Flow Matching: Accelerating MCMC with Continuous Normalizing Flows}},
year = {2024},
altmetric = {false},
abbr = {arXiv},
arxiv = {2405.14392},
}

0 comments on commit c549d15

Please sign in to comment.