diff --git a/.credo.exs b/.credo.exs index 6537d951..db0f4c3c 100644 --- a/.credo.exs +++ b/.credo.exs @@ -117,9 +117,9 @@ ## Refactoring Opportunities # {Credo.Check.Refactor.CondStatements, []}, - {Credo.Check.Refactor.CyclomaticComplexity, []}, + {Credo.Check.Refactor.CyclomaticComplexity, [max_complexity: 40]}, {Credo.Check.Refactor.FunctionArity, []}, - {Credo.Check.Refactor.LongQuoteBlocks, [max_line_count: 300, ignore_comments: true]}, + {Credo.Check.Refactor.LongQuoteBlocks, [max_line_count: 200]}, # {Credo.Check.Refactor.MapInto, []}, {Credo.Check.Refactor.MatchInCondition, []}, {Credo.Check.Refactor.NegatedConditionsInUnless, []}, diff --git a/.dialyzer_ignore.exs b/.dialyzer_ignore.exs deleted file mode 100644 index 34636911..00000000 --- a/.dialyzer_ignore.exs +++ /dev/null @@ -1,6 +0,0 @@ -[ - ~r/Function :persistent_term.get\/1\ does\ not\ exist\./, - ~r/Function :persistent_term.get\/2\ does\ not\ exist\./, - ~r/Function :persistent_term.put\/2\ does\ not\ exist\./, - ~r/Function :persistent_term.erase\/1\ does\ not\ exist\./ -] diff --git a/.doctor.exs b/.doctor.exs index b4c0477b..5c82f208 100644 --- a/.doctor.exs +++ b/.doctor.exs @@ -1,17 +1,14 @@ %Doctor.Config{ ignore_modules: [ - Nebulex.Adapter, - Nebulex.Adapters.Local.Metadata, - Nebulex.Adapters.Partitioned.Bootstrap, - Nebulex.Helpers, - Nebulex.Telemetry, - Nebulex.Cluster, - Nebulex.NodeCase, - Nebulex.TestCache.Common, + Nebulex.Cache.Impl, + Nebulex.Cache.Options, + Nebulex.Cache.QuerySpec, + Nebulex.Caching.Options, + Nebulex.Adapter.Transaction.Options, Nebulex.Dialyzer.CachingDecorators ], ignore_paths: [], - min_module_doc_coverage: 30, + min_module_doc_coverage: 40, min_module_spec_coverage: 0, min_overall_doc_coverage: 80, min_overall_moduledoc_coverage: 100, diff --git a/.formatter.exs b/.formatter.exs index bdf866e8..e2ac7370 100644 --- a/.formatter.exs +++ b/.formatter.exs @@ -1,5 +1,21 @@ locals_without_parens = [ + # Nebulex.Utils + unwrap_or_raise: 1, + wrap_ok: 1, + wrap_error: 1, + wrap_error: 2, + + # Nebulex.Cache.Utils + defcacheapi: 2, + + # Nebulex.Adapter + defcommand: 1, + defcommand: 2, + defcommandp: 1, + defcommandp: 2, + # Nebulex.Caching + dynamic_cache: 2, keyref: 1, keyref: 2, diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 45bf2899..60a0d3d4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,10 +29,7 @@ jobs: - elixir: 1.15.x otp: 26.x os: 'ubuntu-latest' - - elixir: 1.14.x - otp: 25.x - os: 'ubuntu-latest' - - elixir: 1.13.x + - elixir: 1.15.x otp: 24.x os: 'ubuntu-latest' - elixir: 1.12.x @@ -90,19 +87,15 @@ jobs: if: ${{ matrix.style }} - name: Run tests - run: | - epmd -daemon - mix test --trace + run: mix test if: ${{ !matrix.coverage }} - name: Run tests with coverage - run: | - epmd -daemon - mix coveralls.github + run: mix coveralls.github if: ${{ matrix.coverage }} - name: Run sobelow - run: mix sobelow --exit --skip + run: mix sobelow --skip --exit Low if: ${{ matrix.sobelow }} - name: Restore PLT Cache @@ -110,7 +103,7 @@ jobs: id: plt-cache with: path: priv/plts - key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-plt-v1 + key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-plt-v3-1 if: ${{ matrix.dialyzer }} - name: Create PLTs @@ -123,10 +116,6 @@ jobs: run: mix dialyzer --format github if: ${{ matrix.dialyzer && steps.plt-cache.outputs.cache-hit != 'true' }} - - name: Doc coverage report - run: MIX_ENV=docs mix inch.report - if: ${{ matrix.inch-report }} - - name: Run documentation health check run: mix doctor if: ${{ matrix.doctor }} diff --git a/.gitignore b/.gitignore index 076b29cc..20270cae 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ erl_crash.dump /priv .sobelow* /config +Elixir* diff --git a/.tool-versions b/.tool-versions index 829d6939..587170bc 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -elixir 1.16.0 -erlang 26.1 +elixir 1.16.1 +erlang 26.2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 752685f6..4bffdcbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,51 +4,6 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [v2.6.1](https://github.com/cabol/nebulex/tree/v2.6.1) (2024-02-24) - -[Full Changelog](https://github.com/cabol/nebulex/compare/v2.6.0...v2.6.1) - -**Merged pull requests:** - -- Improve variable handing in key generators - [#221](https://github.com/cabol/nebulex/pull/221) - ([hissssst](https://github.com/hissssst)) - -## [v2.6.0](https://github.com/cabol/nebulex/tree/v2.6.0) (2024-01-21) - -[Full Changelog](https://github.com/cabol/nebulex/compare/v2.5.2...v2.6.0) - -**Fixed bugs:** - -- Fix compatibility with Elixir 1.15 and 1.16 - [#220](https://github.com/cabol/nebulex/issues/220) - -**Closed issues:** - -- `Multilevel` inclusive cache doesn't duplicate entries backwards on - `get_all/2` - [#219](https://github.com/cabol/nebulex/issues/219) -- Empty arguments list passed to `generate/3` in Elixir 1.16 - [#218](https://github.com/cabol/nebulex/issues/218) -- Regression on decorated functions and Elixir 1.16 - [#216](https://github.com/cabol/nebulex/issues/216) -- Bug on Local adapter when using `delete_all` and keys are nested tuples: - not a valid match specification - [#211](https://github.com/cabol/nebulex/issues/211) -- `Nebulex.RegistryLookupError` - [#207](https://github.com/cabol/nebulex/issues/207) -- Docs on Migrating to v2 from Nebulex.Adapters.Dist.Cluster - [#198](https://github.com/cabol/nebulex/issues/198) - -**Merged pull requests:** - -- Partitioned Adapter supports two-item tuples as keys - [#214](https://github.com/cabol/nebulex/pull/214) - ([twinn](https://github.com/twinn)) -- Adds nebulex Ecto adapter - [#212](https://github.com/cabol/nebulex/pull/212) - ([hissssst](https://github.com/hissssst)) - ## [v2.5.2](https://github.com/cabol/nebulex/tree/v2.5.2) (2023-07-14) [Full Changelog](https://github.com/cabol/nebulex/compare/v2.5.1...v2.5.2) diff --git a/README.md b/README.md index cc7a53c9..07a53126 100644 --- a/README.md +++ b/README.md @@ -8,15 +8,15 @@ [![License](https://img.shields.io/hexpm/l/nebulex.svg)](LICENSE) Nebulex provides support for transparently adding caching into an existing -Elixir application. Similar to [Ecto][ecto], the caching abstraction allows -consistent use of various caching solutions with minimal impact on the code. +Elixir application. Like [Ecto][ecto], the caching abstraction allows consistent +use of various caching solutions with minimal impact on the code. Nebulex cache abstraction shields developers from directly dealing with the underlying caching implementations, such as [Redis][redis], [Memcached][memcached], or even other Elixir cache implementations like -[Cachex][cachex]. Additionally, it provides totally out-of-box features such as -[cache usage patterns][cache_patterns], -[declarative annotation-based caching][nbx_caching], and +[Cachex][cachex]. Additionally, it provides out-of-box features such as +[declarative decorator-based caching][nbx_caching], +[cache usage patterns][cache_patterns], and [distributed cache topologies][cache_topologies], among others. See the [getting started guide](http://hexdocs.pm/nebulex/getting-started.html) @@ -27,74 +27,61 @@ for more information. [cachex]: https://github.com/whitfin/cachex [redis]: https://redis.io/ [memcached]: https://memcached.org/ -[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.html +[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html [cache_patterns]: http://hexdocs.pm/nebulex/cache-usage-patterns.html [cache_topologies]: https://docs.oracle.com/middleware/1221/coherence/develop-applications/cache_intro.htm ## Usage -You need to add `nebulex` as a dependency to your `mix.exs` file. However, in -the case you want to use an external (a non built-in adapter) cache adapter, -you also have to add the proper dependency to your `mix.exs` file. - -The supported caches and their adapters are: +You need to add both Nebulex and the cache adapter as a dependency to your +`mix.exs` file. The supported caches and their adapters are: Cache | Nebulex Adapter | Dependency :-----| :---------------| :--------- -Generational Local Cache | [Nebulex.Adapters.Local][la] | Built-In -Partitioned | [Nebulex.Adapters.Partitioned][pa] | Built-In -Replicated | [Nebulex.Adapters.Replicated][ra] | Built-In -Multilevel | [Nebulex.Adapters.Multilevel][ma] | Built-In -Nil (special adapter that disables the cache) | [Nebulex.Adapters.Nil][nil] | Built-In -Cachex | Nebulex.Adapters.Cachex | [nebulex_adapters_cachex][nbx_cachex] +Nil (special adapter to disable caching) | [Nebulex.Adapters.Nil][nil] | Built-In +Generational Local Cache | Nebulex.Adapters.Local | [nebulex_adapters_local][la] +Partitioned | Nebulex.Adapters.Partitioned | [nebulex_adapters_partitioned][pa] +Replicated | Nebulex.Adapters.Replicated | [nebulex_adapters_replicated][ra] +Multilevel | Nebulex.Adapters.Multilevel | [nebulex_adapters_multilevel][ma] Redis | NebulexRedisAdapter | [nebulex_redis_adapter][nbx_redis] +Cachex | Nebulex.Adapters.Cachex | [nebulex_adapters_cachex][nbx_cachex] Distributed with Horde | Nebulex.Adapters.Horde | [nebulex_adapters_horde][nbx_horde] Multilevel with cluster broadcasting | NebulexLocalMultilevelAdapter | [nebulex_local_multilevel_adapter][nbx_local_multilevel] -Ecto Postgres table | Nebulex.Adapters.Ecto | [nebulex_adapters_ecto][nbx_ecto_postgres] -[la]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.html -[pa]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Partitioned.html -[ra]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Replicated.html -[ma]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Multilevel.html [nil]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Nil.html -[nbx_cachex]: https://github.com/cabol/nebulex_adapters_cachex +[la]: https://github.com/elixir-nebulex/nebulex_adapters_local +[pa]: https://github.com/elixir-nebulex/nebulex_adapters_partitioned +[ra]: https://github.com/elixir-nebulex/nebulex_adapters_replicated +[ma]: https://github.com/elixir-nebulex/nebulex_adapters_multilevel [nbx_redis]: https://github.com/cabol/nebulex_redis_adapter +[nbx_cachex]: https://github.com/cabol/nebulex_adapters_cachex [nbx_horde]: https://github.com/eliasdarruda/nebulex_adapters_horde [nbx_local_multilevel]: https://github.com/slab/nebulex_local_multilevel_adapter -[nbx_ecto_postgres]: https://github.com/hissssst/nebulex_adapters_ecto - -For example, if you want to use a built-in cache, add to your `mix.exs` file: +For example, if you want to use the Nebulex Generational Local Cache +(`Nebulex.Adapters.Local` adapter), add to your `mix.exs` file: ```elixir def deps do [ - {:nebulex, "~> 2.6"}, - {:shards, "~> 1.1"}, #=> When using :shards as backend - {:decorator, "~> 1.4"}, #=> When using Caching Annotations - {:telemetry, "~> 1.0"} #=> When using the Telemetry events (Nebulex stats) + {:nebulex, "~> 3.0"}, + {:nebulex_adapters_local, "~> 3.0"}, + {:decorator, "~> 1.4"}, #=> For Caching decorators (recommended adding it) + {:telemetry, "~> 1.2"} #=> For Telemetry events (recommended adding it) ] end ``` -In order to give more flexibility and fetch only needed dependencies, Nebulex -makes all dependencies optional. For example: - - * For intensive workloads, you may want to use `:shards` as the backend for - the local adapter and having partitioned tables. In such a case, you have - to add `:shards` to the dependency list. +To give more flexibility and load only needed dependencies, Nebulex makes all +dependencies optional. For example: - * For enabling the usage of - [declarative annotation-based caching via decorators][nbx_caching], - you have to add `:decorator` to the dependency list. + * For enabling [declarative decorator-based caching][nbx_caching], you + have to add `:decorator` to the dependency list (recommended adding it). - * For enabling Telemetry events to be dispatched when using Nebulex, - you have to add `:telemetry` to the dependency list. + * For enabling Telemetry events dispatched by Nebulex, you have to add + `:telemetry` to the dependency list (recommended adding it). See [telemetry guide][telemetry]. - * If you want to use an external adapter (e.g: Cachex or Redis adapter), you - have to add the adapter dependency too. - [telemetry]: http://hexdocs.pm/nebulex/telemetry.html Then run `mix deps.get` in your shell to fetch the dependencies. If you want to @@ -102,7 +89,7 @@ use another cache adapter, just choose the proper dependency from the table above. Finally, in the cache definition, you will need to specify the `adapter:` -respective to the chosen dependency. For the local built-in cache it is: +respective to the chosen dependency. For the local cache would be: ```elixir defmodule MyApp.Cache do @@ -112,35 +99,37 @@ defmodule MyApp.Cache do end ``` -## Quickstart example +## Quickstart example using caching decorators Assuming you are using `Ecto` and you want to use declarative caching: ```elixir # In the config/config.exs file -config :my_app, MyApp.PartitionedCache, - primary: [ - gc_interval: :timer.hours(12), - backend: :shards, - partitions: 2 - ] - -# Defining a Cache with a partitioned topology -defmodule MyApp.PartitionedCache do +config :my_app, MyApp.Cache, + # Create new generation every 12 hours + gc_interval: :timer.hours(12), + # Max 1M entries + max_size: 1_000_000, + # Max 2GB of memory + allocated_memory: 2_000_000_000, + # Run size and memory checks every 10 seconds + gc_memory_check_interval: :timer.seconds(10) + +# Defining the cache +defmodule MyApp.Cache do use Nebulex.Cache, otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local + adapter: Nebulex.Adapters.Local end -# Some Ecto schema +# Ecto schema defmodule MyApp.Accounts.User do use Ecto.Schema schema "users" do - field(:username, :string) - field(:password, :string) - field(:role, :string) + field :username, :string + field :password, :string + field :role, :string end def changeset(user, attrs) do @@ -152,28 +141,26 @@ end # The Accounts context defmodule MyApp.Accounts do - use Nebulex.Caching + use Nebulex.Caching, cache: MyApp.Cache alias MyApp.Accounts.User - alias MyApp.PartitionedCache, as: Cache alias MyApp.Repo @ttl :timer.hours(1) - @decorate cacheable(cache: Cache, key: {User, id}, opts: [ttl: @ttl]) + @decorate cacheable(key: {User, id}, opts: [ttl: @ttl]) def get_user!(id) do Repo.get!(User, id) end - @decorate cacheable(cache: Cache, key: {User, username}, opts: [ttl: @ttl]) + @decorate cacheable(key: {User, username}, references: & &1.id) def get_user_by_username(username) do Repo.get_by(User, [username: username]) end @decorate cache_put( - cache: Cache, - keys: [{User, user.id}, {User, user.username}], - match: &match_update/1, + key: {User, user.id}, + match: &__MODULE__.match_update/1, opts: [ttl: @ttl] ) def update_user(%User{} = user, attrs) do @@ -182,10 +169,7 @@ defmodule MyApp.Accounts do |> Repo.update() end - @decorate cache_evict( - cache: Cache, - keys: [{User, user.id}, {User, user.username}] - ) + @decorate cache_evict(key: {User, user.id}) def delete_user(%User{} = user) do Repo.delete(user) end @@ -196,8 +180,8 @@ defmodule MyApp.Accounts do |> Repo.insert() end - defp match_update({:ok, value}), do: {true, value} - defp match_update({:error, _}), do: false + def match_update({:ok, value}), do: {true, value} + def match_update({:error, _}), do: false end ``` @@ -205,29 +189,29 @@ See more [Nebulex examples](https://github.com/cabol/nebulex_examples). ## Important links - * [Getting Started](http://hexdocs.pm/nebulex/getting-started.html) - * [Documentation](http://hexdocs.pm/nebulex/Nebulex.html) - * [Cache Usage Patterns](http://hexdocs.pm/nebulex/cache-usage-patterns.html) - * [Instrumenting the Cache with Telemetry](http://hexdocs.pm/nebulex/telemetry.html) - * [Migrating to v2.x](http://hexdocs.pm/nebulex/migrating-to-v2.html) - * [Examples](https://github.com/cabol/nebulex_examples) +* [Getting Started](http://hexdocs.pm/nebulex/getting-started.html) +* [Documentation](http://hexdocs.pm/nebulex/Nebulex.html) +* [Migrating to v3.x](http://hexdocs.pm/nebulex/migrating-to-v3.html) +* [Cache Usage Patterns](http://hexdocs.pm/nebulex/cache-usage-patterns.html) +* [Instrumenting the Cache with Telemetry](http://hexdocs.pm/nebulex/telemetry.html) +* [Examples](https://github.com/cabol/nebulex_examples) ## Testing -Testing by default spawns nodes internally for distributed tests. To run tests -that do not require clustering, exclude the `clustered` tag: +To run only the tests: ``` -$ mix test --exclude clustered +$ mix test ``` -If you have issues running the clustered tests try running: +Additionally, to run all Nebulex checks run: ``` -$ epmd -daemon +$ mix check ``` -before running the tests. +The `mix check` will run the tests, coverage, credo, dialyzer, etc. This is the +recommended way to test Nebulex. ## Benchmarks @@ -238,27 +222,12 @@ the directory [benchmarks](./benchmarks). To run a benchmark test you have to run: ``` -$ MIX_ENV=test mix run benchmarks/{BENCH_TEST_FILE} -``` - -Where `BENCH_TEST_FILE` can be any of: - - * `local_with_ets_bench.exs`: benchmark for the local adapter using - `:ets` backend. - * `local_with_shards_bench.exs`: benchmark for the local adapter using - `:shards` backend. - * `partitioned_bench.exs`: benchmark for the partitioned adapter. - -For example, for running the benchmark for the local adapter using `:shards` -backend: - -``` -$ MIX_ENV=test mix run benchmarks/local_with_shards_bench.exs +$ mix run benchmarks/benchmark.exs ``` -Additionally, you can also run performance tests using `:basho_bench`. -See [nebulex_bench example](https://github.com/cabol/nebulex_examples/tree/master/nebulex_bench) -for more information. +> The benchmark uses the `Nebulex.Adapters.Nil` adapter; it is more focused on +> measuring the Nebulex abstraction layer performance rather than a specific +> adapter. ## Contributing diff --git a/benchmarks/bench_helper.exs b/benchmarks/bench_helper.exs deleted file mode 100644 index 18c816bd..00000000 --- a/benchmarks/bench_helper.exs +++ /dev/null @@ -1,77 +0,0 @@ -defmodule BenchHelper do - @moduledoc """ - Benchmark commons. - """ - - @doc false - def benchmarks(cache) do - %{ - "get" => fn input -> - cache.get(input) - end, - "get_all" => fn input -> - cache.get_all([input, "foo", "bar"]) - end, - "put" => fn input -> - cache.put(input, input) - end, - "put_new" => fn input -> - cache.put_new(input, input) - end, - "replace" => fn input -> - cache.replace(input, input) - end, - "put_all" => fn input -> - cache.put_all([{input, input}, {"foo", "bar"}]) - end, - "delete" => fn input -> - cache.delete(input) - end, - "take" => fn input -> - cache.take(input) - end, - "has_key?" => fn input -> - cache.has_key?(input) - end, - "count_all" => fn _input -> - cache.count_all() - end, - "ttl" => fn input -> - cache.ttl(input) - end, - "expire" => fn input -> - cache.expire(input, 1) - end, - "incr" => fn _input -> - cache.incr(:counter, 1) - end, - "update" => fn input -> - cache.update(input, 1, &Kernel.+(&1, 1)) - end, - "all" => fn _input -> - cache.all() - end - } - end - - @doc false - def run(benchmarks, opts \\ []) do - Benchee.run( - benchmarks, - Keyword.merge( - [ - inputs: %{"rand" => 100_000}, - before_each: fn n -> :rand.uniform(n) end, - formatters: [ - {Benchee.Formatters.Console, comparison: false, extended_statistics: true}, - {Benchee.Formatters.HTML, extended_statistics: true, auto_open: false} - ], - print: [ - fast_warning: false - ] - ], - opts - ) - ) - end -end diff --git a/benchmarks/benchmark.exs b/benchmarks/benchmark.exs new file mode 100644 index 00000000..1cd6f52a --- /dev/null +++ b/benchmarks/benchmark.exs @@ -0,0 +1,75 @@ +## Benchmarks + +_ = Application.start(:telemetry) + +defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.Adapters.Nil +end + +benchmarks = %{ + "fetch" => fn -> + Cache.fetch("foo") + end, + "put" => fn -> + Cache.put("foo", "bar") + end, + "get" => fn -> + Cache.get("foo") + end, + "get_all" => fn -> + Cache.get_all(in: ["foo", "bar"]) + end, + "put_new" => fn -> + Cache.put_new("foo", "bar") + end, + "replace" => fn -> + Cache.replace("foo", "bar") + end, + "put_all" => fn -> + Cache.put_all([{"foo", "bar"}]) + end, + "delete" => fn -> + Cache.delete("foo") + end, + "take" => fn -> + Cache.take("foo") + end, + "has_key?" => fn -> + Cache.has_key?("foo") + end, + "count_all" => fn -> + Cache.count_all() + end, + "ttl" => fn -> + Cache.ttl("foo") + end, + "expire" => fn -> + Cache.expire("foo", 1) + end, + "incr" => fn -> + Cache.incr(:counter, 1) + end, + "update" => fn -> + Cache.update(1, 1, &Kernel.+(&1, 1)) + end +} + +# Start cache +{:ok, pid} = Cache.start_link() + +Benchee.run( + benchmarks, + formatters: [ + {Benchee.Formatters.Console, comparison: false, extended_statistics: true}, + {Benchee.Formatters.HTML, extended_statistics: true, auto_open: false} + ], + print: [ + fast_warning: false + ] +) + +# Stop cache +if Process.alive?(pid), do: Supervisor.stop(pid) diff --git a/benchmarks/local_with_ets_bench.exs b/benchmarks/local_with_ets_bench.exs deleted file mode 100644 index e75b7deb..00000000 --- a/benchmarks/local_with_ets_bench.exs +++ /dev/null @@ -1,21 +0,0 @@ -## Benchmarks - -:ok = Application.start(:telemetry) -Code.require_file("bench_helper.exs", __DIR__) - -defmodule Cache do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local -end - -# start local cache -{:ok, local} = Cache.start_link(telemetry: false) - -Cache -|> BenchHelper.benchmarks() -|> BenchHelper.run() - -# stop cache -if Process.alive?(local), do: Supervisor.stop(local) diff --git a/benchmarks/local_with_shards_bench.exs b/benchmarks/local_with_shards_bench.exs deleted file mode 100644 index 586013d1..00000000 --- a/benchmarks/local_with_shards_bench.exs +++ /dev/null @@ -1,21 +0,0 @@ -## Benchmarks - -:ok = Application.start(:telemetry) -Code.require_file("bench_helper.exs", __DIR__) - -defmodule Cache do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local -end - -# start local cache -{:ok, local} = Cache.start_link(backend: :shards) - -Cache -|> BenchHelper.benchmarks() -|> BenchHelper.run() - -# stop cache -if Process.alive?(local), do: Supervisor.stop(local) diff --git a/benchmarks/partitioned_bench.exs b/benchmarks/partitioned_bench.exs deleted file mode 100644 index 6b1163f1..00000000 --- a/benchmarks/partitioned_bench.exs +++ /dev/null @@ -1,22 +0,0 @@ -## Benchmarks - -:ok = Application.start(:telemetry) -Code.require_file("bench_helper.exs", __DIR__) - -nodes = [:"node1@127.0.0.1", :"node2@127.0.0.1"] -Nebulex.Cluster.spawn(nodes) - -alias Nebulex.NodeCase -alias Nebulex.TestCache.Partitioned - -# start distributed caches -{:ok, dist} = Partitioned.start_link(primary: [backend: :shards]) -node_pid_list = NodeCase.start_caches(Node.list(), [{Partitioned, primary: [backend: :shards]}]) - -Partitioned -|> BenchHelper.benchmarks() -|> BenchHelper.run(parallel: 4, time: 30) - -# stop caches -if Process.alive?(dist), do: Supervisor.stop(dist) -NodeCase.stop_caches(node_pid_list) diff --git a/guides/cache-info.md b/guides/cache-info.md new file mode 100644 index 00000000..f31fcc68 --- /dev/null +++ b/guides/cache-info.md @@ -0,0 +1,300 @@ +# Cache Info + +Since Nebulex v3, the adapter's Info API is introduced. This is a more generic +API to get information about the cache, including the stats. Adapters are +responsible for implementing the Info API and are also free to add the +information specification keys they want. Therefore, it is highly recommended +to review the adapter's documentation you're using. + +> See `c:Nebulex.Cache.info/2` for more information. + +Nebulex also provides a simple implementation +[`Nebulex.Adapters.Common.Info`][nbx_common_info], which is used by the +`Nebulex.Adapters.Local` adapter. This implementation uses a Telemetry +handler to aggregate the stats and keep them updated, therefore, it requires +`:telemetry` to be available. + +[nbx_common_info]: https://hexdocs.pm/nebulex/Nebulex.Adapters.Common.Info.html + +## Usage + +Let's define our cache: + +```elixir +defmodule MyApp.Cache do + use Nebulex.Cache, + otp_app: :my_app, + adapter: Nebulex.Adapters.Local +end +``` + +And the configuration: + +```elixir +config :my_app, MyApp.Cache, + gc_interval: :timer.hours(12), + max_size: 1_000_000, + allocated_memory: 1_000_000, + gc_cleanup_min_timeout: :timer.seconds(10), + gc_cleanup_max_timeout: :timer.minutes(10) +``` + +Once you have set up the `MyApp.Cache` within the application's supervision +tree, you can get the cache info like so: + +```elixir +iex> MyApp.Cache.info!() +%{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + }, + memory: %{ + total: 1_000_000, + used: 0 + }, + stats: %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } +} +``` + +You could also request for a specific item or items: + +```elixir +iex> MyApp.Cache.info!(:stats) +%{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 +} + +iex> MyApp.Cache.info!([:stats, :memory]) +%{ + memory: %{ + total: 1_000_000, + used: 0 + }, + stats: %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } +} +``` + +## Telemetry Metrics + +Now, let's see how we can provide metrics out of the info data. + +First of all, make sure you have added `:telemetry`, `:telemetry_metrics`, and +`:telemetry_poller` packages as dependencies to your `mix.exs` file. + +Create your Telemetry supervisor at `lib/my_app/telemetry.ex`: + +```elixir +# lib/my_app/telemetry.ex +defmodule MyApp.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + def init(_arg) do + children = [ + # Configure `:telemetry_poller` for reporting the cache stats + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000}, + + # For example, we use the console reporter, but you can change it. + # See `:telemetry_metrics` for for information. + {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + defp metrics do + [ + # Stats + last_value("nebulex.cache.info.stats.hits", tags: [:cache]), + last_value("nebulex.cache.info.stats.misses", tags: [:cache]), + last_value("nebulex.cache.info.stats.writes", tags: [:cache]), + last_value("nebulex.cache.info.stats.evictions", tags: [:cache]), + + # Memory + last_value("nebulex.cache.info.memory.used", tags: [:cache]), + last_value("nebulex.cache.info.memory.total", tags: [:cache]) + ] + end + + defp periodic_measurements do + [ + {__MODULE__, :cache_stats, []}, + {__MODULE__, :cache_memory, []} + ] + end + + def cache_stats do + with {:ok, info} <- MyApp.Cache.info([:server, :stats]) do + :telemetry.execute( + [:nebulex, :cache, :info, :stats], + info.stats, + %{cache: info.server[:cache_name]} + ) + end + + :ok + end + + def cache_memory do + with {:ok, info} <- MyApp.Cache.info([:server, :memory]) do + :telemetry.execute( + [:nebulex, :cache, :info, :memory], + info.memory, + %{cache: info.server[:cache_name]} + ) + end + + :ok + end +end +``` + +Then add it to your main application's supervision tree +(usually in `lib/my_app/application.ex`): + +```elixir +children = [ + MyApp.Cache, + MyApp.Telemetry, + ... +] +``` + +Now start an IEx session and you should see something like the following output: + +``` +[Telemetry.Metrics.ConsoleReporter] Got new event! +Event name: nebulex.cache.info.stats +All measurements: %{evictions: 2, hits: 1, misses: 2, writes: 2} +All metadata: %{cache: MyApp.Cache} + +Metric measurement: :hits (last_value) +With value: 1 +Tag values: %{cache: MyApp.Cache} + +Metric measurement: :misses (last_value) +With value: 2 +Tag values: %{cache: MyApp.Cache} + +Metric measurement: :writes (last_value) +With value: 2 +Tag values: %{cache: MyApp.Cache} + +Metric measurement: :evictions (last_value) +With value: 2 +Tag values: %{cache: MyApp.Cache} + +[Telemetry.Metrics.ConsoleReporter] Got new event! +Event name: nebulex.cache.info.memory +All measurements: %{total: 2000000, used: 0} +All metadata: %{cache: MyApp.Cache} + +Metric measurement: :total (last_value) +With value: 2000000 +Tag values: %{cache: MyApp.Cache} + +Metric measurement: :used (last_value) +With value: 0 +Tag values: %{cache: MyApp.Cache} +``` + +### Custom metrics + +In the same way, you can add another periodic measurement for reporting the +cache size: + +```elixir +defmodule MyApp.Cache do + use Nebulex.Cache, + otp_app: :my_app, + adapter: Nebulex.Adapters.Local + + def dispatch_cache_size do + :telemetry.execute( + [:nebulex, :cache, :size], + %{value: count_all()}, + %{cache: __MODULE__, node: node()} + ) + end +end +``` + +Now let's add a new periodic measurement to invoke `dispatch_cache_size()` +through `:telemetry_poller`: + +```elixir +defp periodic_measurements do + [ + {__MODULE__, :cache_stats, []}, + {__MODULE__, :cache_memory, []}, + {MyApp.Cache, :dispatch_cache_size, []} + ] +end +``` + +> Notice the node name was added to the metadata so we can use it in the +> metric tags. + +Metrics: + +```elixir +defp metrics do + [ + # Stats + last_value("nebulex.cache.info.stats.hits", tags: [:cache]), + last_value("nebulex.cache.info.stats.misses", tags: [:cache]), + last_value("nebulex.cache.info.stats.writes", tags: [:cache]), + last_value("nebulex.cache.info.stats.evictions", tags: [:cache]), + + # Memory + last_value("nebulex.cache.info.memory.used", tags: [:cache]), + last_value("nebulex.cache.info.memory.total", tags: [:cache]), + + # Nebulex custom Metrics + last_value("nebulex.cache.size.value", tags: [:cache, :node]) + ] +end +``` + +If you start an IEx session like previously, you should see the new metric too: + +``` +[Telemetry.Metrics.ConsoleReporter] Got new event! +Event name: nebulex.cache.size +All measurements: %{value: 0} +All metadata: %{cache: MyApp.Cache, node: :nonode@nohost} + +Metric measurement: :value (last_value) +With value: 0 +Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} +``` diff --git a/guides/cache-usage-patterns.md b/guides/cache-usage-patterns.md index ba730f5e..330b25ad 100644 --- a/guides/cache-usage-patterns.md +++ b/guides/cache-usage-patterns.md @@ -1,9 +1,10 @@ -# Cache Usage Patterns via Nebulex.Caching +# Cache Usage Patterns via Nebulex.Caching.Decorators There are several common access patterns when using a cache. **Nebulex** -supports most of these patterns by means of [Nebulex.Caching][nbx_caching]. +supports most of these patterns by means of +[Nebulex.Caching.Decorators][nbx_caching]. -[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.html +[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html > Most of the following documentation about caching patterns it based on [EHCache Docs][EHCache] @@ -24,11 +25,9 @@ the system-of-record. ### Reading values ```elixir -if value = MyCache.get(key) do - value -else - value = SoR.get(key) # maybe Ecto? - :ok = MyCache.put(key, value) +with {:error, _reason_} <- MyCache.fetch(key) do + value = SoR.get(key) # maybe Ecto.Repo + MyCache.put(key, value) value end ``` @@ -36,8 +35,8 @@ end ### Writing values ```elixir -:ok = MyCache.put(key, value) -SoR.insert(key, value) # maybe Ecto? +MyCache.put(key, value) +SoR.insert(key, value) # maybe Ecto.Repo ``` As you may have noticed, this is the default behavior for most of the caches, @@ -71,11 +70,11 @@ A disadvantage of using the cache-as-SoR pattern is: * Less directly visible code-path -But how to get all this out-of-box? This is where declarative annotation-based +But how to get all this out-of-box? This is where declarative decorator-based caching comes in. Nebulex provides a set of annotation to abstract most of the logic behind **Read-through** and **Write-through** patterns and make the implementation extremely easy. But let's go over these patterns more in detail -and how to implement them by using [Nebulex annotations][nbx_caching]. +and how to implement them by using [Nebulex decorators][nbx_caching]. ## Read-through @@ -90,27 +89,26 @@ The next time the cache is asked for the value for the same key it can be returned from the cache without using the loader (unless the entry has been evicted or expired). -This pattern can be easily implemented using `cache` decorator as follows: +This pattern can be easily implemented using the `cacheable` decorator +as follows: ```elixir defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache @ttl :timer.hours(1) - @decorate cacheable(cache: Cache, key: name) + @decorate cacheable(key: name) def get_by_name(name) do # your logic (the loader to retrieve the value from the SoR) end - @decorate cacheable(cache: Cache, key: age, opts: [ttl: @ttl]) + @decorate cacheable(key: age, opts: [ttl: @ttl]) def get_by_age(age) do # your logic (the loader to retrieve the value from the SoR) end - @decorate cacheable(cache: Cache) + @decorate cacheable() def all(query) do # your logic (the loader to retrieve the value from the SoR) end @@ -128,25 +126,23 @@ that knows how to write data to the system-of-record (SoR). When the cache is asked to store a value for a key, the cache invokes the writer to store the value in the SoR, as well as updating (or deleting) the cache. -This pattern can be implemented using `defevict` or `defupdatable`. When the -data is written to the system-of-record (SoR), you can update the cached value -associated with the given key using `defupdatable`, or just delete it using -`defevict`. +This pattern can be implemented using `cache_evict` or `cache_put` decorators. +When the data is written to the system-of-record (SoR), you can update the +cached value associated with the given key using `cache_put`, or just delete +it using `cache_evict`. ```elixir defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache # When the data is written to the SoR, it is updated in the cache - @decorate cache_put(cache: Cache, key: something) + @decorate cache_put(key: something) def update(something) do # Write data to the SoR (most likely the Database) end # When the data is written to the SoR, it is deleted (evicted) from the cache - @decorate cache_evict(cache: Cache, key: something) + @decorate cache_evict(key: something) def update_something(something) do # Write data to the SoR (most likely the Database) end diff --git a/guides/creating-new-adapter.md b/guides/creating-new-adapter.md index 6daca250..c1e2e522 100644 --- a/guides/creating-new-adapter.md +++ b/guides/creating-new-adapter.md @@ -23,14 +23,14 @@ Now let's modify `mix.exs` so that we could fetch Nebulex repository. defmodule NebulexMemoryAdapter.MixProject do use Mix.Project - @nbx_vsn "2.5.2" + @nbx_vsn "3.0.0" @version "0.1.0" def project do [ app: :nebulex_memory_adapter, version: @version, - elixir: "~> 1.13", + elixir: "~> 1.15", elixirc_paths: elixirc_paths(Mix.env()), aliases: aliases(), deps: deps(), @@ -108,7 +108,7 @@ end We won't be writing tests ourselves. Instead, we will use shared tests from the Nebulex parent repo. To do so, we will create a helper module in `test/shared/cache_test.exs` that will `use` test suites for behaviour we are -going to implement. The minimal set of behaviours is `Entry` and `Queryable` so +going to implement. The minimal set of behaviours is `KV` and `Queryable` so we'll go with them. ```elixir @@ -119,7 +119,7 @@ defmodule NebulexMemoryAdapter.CacheTest do defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest + use Nebulex.Cache.KVTest use Nebulex.Cache.QueryableTest end end @@ -143,13 +143,16 @@ defmodule NebulexMemoryAdapterTest do Cache.delete_all() :ok - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(pid), do: Cache.stop(pid) - end) + on_exit(fn -> safe_stop(pid) end) {:ok, cache: Cache, name: Cache} end + + defp safe_stop(pid) do + Cache.stop(pid) + catch + :exit, _ -> :ok + end end ``` @@ -187,7 +190,7 @@ Another try ```console mix test == Compilation error in file test/nebulex_memory_adapter_test.exs == -** (CompileError) test/nebulex_memory_adapter_test.exs:3: module Nebulex.Cache.EntryTest is not loaded and could not be found +** (CompileError) test/nebulex_memory_adapter_test.exs:3: module Nebulex.Cache.KVTest is not loaded and could not be found (elixir 1.13.2) expanding macro: Kernel.use/1 test/nebulex_memory_adapter_test.exs:3: NebulexMemoryAdapterTest (module) expanding macro: NebulexMemoryAdapter.CacheTest.__using__/1 @@ -256,21 +259,25 @@ defmodule NebulexMemoryAdapter do @behaviour Nebulex.Adapter @behaviour Nebulex.Adapter.Queryable + import Nebulex.Utils + @impl Nebulex.Adapter defmacro __before_compile__(_env), do: :ok @impl Nebulex.Adapter def init(_opts) do child_spec = Supervisor.child_spec({Agent, fn -> %{} end}, id: {Agent, 1}) + {:ok, child_spec, %{}} end @impl Nebulex.Adapter.Queryable - def execute(adapter_meta, :delete_all, query, opts) do + def execute(adapter_meta, %{op: :delete_all} = query_meta, opts) do deleted = Agent.get(adapter_meta.pid, &map_size/1) + Agent.update(adapter_meta.pid, fn _state -> %{} end) - deleted + wrap_ok deleted end end ``` @@ -301,29 +308,29 @@ one-by-one or define them all in bulk. For posterity, we put a complete ```elixir defmodule NebulexMemoryAdapter do @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable + import Nebulex.Utils + @impl Nebulex.Adapter defmacro __before_compile__(_env), do: :ok @impl Nebulex.Adapter def init(_opts) do child_spec = Supervisor.child_spec({Agent, fn -> %{} end}, id: {Agent, 1}) + {:ok, child_spec, %{}} end - @impl Nebulex.Adapter.Entry - def get(adapter_meta, key, _opts) do - Agent.get(adapter_meta.pid, &Map.get(&1, key)) + @impl Nebulex.Adapter.KV + def fetch(adapter_meta, key, _opts) do + wrap_ok Agent.get(adapter_meta.pid, &Map.get(&1, key)) end - @impl Nebulex.Adapter.Entry - def get_all(adapter_meta, keys, _opts) do - Agent.get(adapter_meta.pid, &Map.take(&1, keys)) - end + @impl Nebulex.Adapter.KV + def put(adapter_meta, key, value, ttl, op, opts) - @impl Nebulex.Adapter.Entry def put(adapter_meta, key, value, ttl, :put_new, opts) do if get(adapter_meta, key, []) do false @@ -331,103 +338,128 @@ defmodule NebulexMemoryAdapter do put(adapter_meta, key, value, ttl, :put, opts) true end + |> wrap_ok() end def put(adapter_meta, key, value, ttl, :replace, opts) do if get(adapter_meta, key, []) do put(adapter_meta, key, value, ttl, :put, opts) + true else false end + |> wrap_ok() end def put(adapter_meta, key, value, _ttl, _on_write, _opts) do Agent.update(adapter_meta.pid, &Map.put(&1, key, value)) - true + + {:ok, true} end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV + def put_all(adapter_meta, entries, ttl, op, opts) + def put_all(adapter_meta, entries, ttl, :put_new, opts) do if get_all(adapter_meta, Map.keys(entries), []) == %{} do put_all(adapter_meta, entries, ttl, :put, opts) + true else false end + |> wrap_ok() end def put_all(adapter_meta, entries, _ttl, _on_write, _opts) do entries = Map.new(entries) + Agent.update(adapter_meta.pid, &Map.merge(&1, entries)) - true + + {:ok, true} end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def delete(adapter_meta, key, _opts) do - Agent.update(adapter_meta.pid, &Map.delete(&1, key)) + wrap_ok Agent.update(adapter_meta.pid, &Map.delete(&1, key)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def take(adapter_meta, key, _opts) do value = get(adapter_meta, key, []) + delete(adapter_meta, key, []) - value + + {:ok, value} end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def update_counter(adapter_meta, key, amount, _ttl, default, _opts) do Agent.update(adapter_meta.pid, fn state -> Map.update(state, key, default + amount, fn v -> v + amount end) end) - get(adapter_meta, key, []) + wrap_ok get(adapter_meta, key, []) end - @impl Nebulex.Adapter.Entry - def has_key?(adapter_meta, key) do - Agent.get(adapter_meta.pid, &Map.has_key?(&1, key)) + @impl Nebulex.Adapter.KV + def has_key?(adapter_meta, key, _opts) do + wrap_ok Agent.get(adapter_meta.pid, &Map.has_key?(&1, key)) end - @impl Nebulex.Adapter.Entry - def ttl(_adapter_meta, _key) do - nil + @impl Nebulex.Adapter.KV + def ttl(_adapter_meta, _key, _opts) do + {:ok, nil} end - @impl Nebulex.Adapter.Entry - def expire(_adapter_meta, _key, _ttl) do - true + @impl Nebulex.Adapter.KV + def expire(_adapter_meta, _key, _ttl, _opts) do + {:ok, true} end - @impl Nebulex.Adapter.Entry - def touch(_adapter_meta, _key) do - true + @impl Nebulex.Adapter.KV + def touch(_adapter_meta, _key, _opts) do + {:ok, true} end @impl Nebulex.Adapter.Queryable - def execute(adapter_meta, :delete_all, _query, _opts) do - deleted = execute(adapter_meta, :count_all, nil, []) + def execute(adapter_meta, query_meta, _opts) do + do_execute(adapter_meta.pid, query_meta) + end + + def do_execute(pid, %{op: :delete_all} = query_meta) do + deleted = do_execute(pid, %{query_meta | op: :count_all}) + Agent.update(adapter_meta.pid, fn _state -> %{} end) - deleted + {:ok, deleted} + end + + def do_execute(pid, %{op: :count_all}) do + wrap_ok Agent.get(pid, &map_size/1) end - def execute(adapter_meta, :count_all, _query, _opts) do - Agent.get(adapter_meta.pid, &map_size/1) + def do_execute(pid, %{op: :get_all, query: {:q, nil}}) do + wrap_ok Agent.get(pid, &Map.values/1) end - def execute(adapter_meta, :all, _query, _opts) do - Agent.get(adapter_meta.pid, &Map.values/1) + # Fetching multiple keys + def do_execute(pid, %{op: :get_all, query: {:in, keys}}) do + pid + |> Agent.get(&Map.take(&1, keys)) + |> Map.to_list() + |> wrap_ok() end @impl Nebulex.Adapter.Queryable - def stream(_adapter_meta, :invalid_query, _opts) do - raise Nebulex.QueryError, message: "foo", query: :invalid_query + def stream(adapter_meta, query_meta, _opts) do + do_stream(adapter_meta.pid, query_meta) end - def stream(adapter_meta, _query, opts) do + def stream(pid, %{query: {:q, q}, select: select}) when q in [nil, :all] do fun = - case Keyword.get(opts, :return) do + case Keyword.get(opts, :select) do :value -> &Map.values/1 @@ -438,10 +470,14 @@ defmodule NebulexMemoryAdapter do &Map.keys/1 end - Agent.get(adapter_meta.pid, fun) + wrap_ok Agent.get(pid, fun) + end + + def stream(_pid_, query) do + wrap_error Nebulex.QueryError, query: query end end ``` Of course, this isn't a useful adapter in any sense but it should be enough to -get you started with your own. \ No newline at end of file +get you started with your own. diff --git a/guides/getting-started.md b/guides/getting-started.md index 64f76243..0ab54b04 100644 --- a/guides/getting-started.md +++ b/guides/getting-started.md @@ -3,8 +3,8 @@ This guide is an introduction to [Nebulex](https://github.com/cabol/nebulex), a local and distributed caching toolkit for Elixir. Nebulex API is pretty much inspired by [Ecto](https://github.com/elixir-ecto/ecto), taking advantage of -its simplicity, flexibility and pluggable architecture. In the same way -as Ecto, developers can provide their own cache (adapter) implementations. +its simplicity, flexibility and pluggable architecture. Same as Ecto, +developers can provide their own cache (adapter) implementations. In this guide, we're going to learn some basics about Nebulex, such as insert, retrieve and destroy cache entries. @@ -23,39 +23,39 @@ which will be needed by Nebulex later on. To add Nebulex to this application, there are a few steps that we need to take. -The first step will be adding Nebulex to our `mix.exs` file, which we'll do by -changing the `deps` definition in that file to this: +The first step will be adding both Nebulex and the cache adapter as a dependency +to our `mix.exs` file, which we'll do by changing the `deps` definition in that +file to this: ```elixir defp deps do [ - {:nebulex, "~> 2.6"}, - {:shards, "~> 1.0"}, #=> When using :shards as backend - {:decorator, "~> 1.4"}, #=> When using Caching Annotations - {:telemetry, "~> 1.0"} #=> When using the Telemetry events (Nebulex stats) + {:nebulex, "~> 3.0"}, + {:nebulex_adapters_local, "~> 3.0"}, + #=> When using :shards as backend for local adapter + {:shards, "~> 1.1"}, + #=> When using Caching decorators (recommended adding it) + {:decorator, "~> 1.4"}, + #=> When using the Telemetry events (recommended adding it) + {:telemetry, "~> 1.0"} ] end ``` -In order to give more flexibility and loading only needed dependencies, Nebulex -makes all its dependencies as optional. For example: +To give more flexibility and load only needed dependencies, Nebulex makes all +dependencies optional, including the adapters. For example: - * For intensive workloads, you may want to use `:shards` as the backend for - the local adapter and having partitioned tables. In such a case, you have - to add `:shards` to the dependency list. + * For intensive workloads when using `Nebulex.Adapters.Local` adapter, you may + want to use `:shards` as the backend for partitioned ETS tables. In such a + case, you have to add `:shards` to the dependency list. - * For enabling the usage of - [declarative annotation-based caching via decorators][nbx_caching], - you have to add `:decorator` to the dependency list. + * For enabling [declarative decorator-based caching][nbx_caching], you have + to add `:decorator` to the dependency list. - * For enabling Telemetry events to be dispatched when using Nebulex, - you have to add `:telemetry` to the dependency list. - See [telemetry guide][telemetry]. + * For enabling Telemetry events dispatched by Nebulex, you have to add + `:telemetry` to the dependency list. See [telemetry guide][telemetry]. - * If you want to use an external adapter (e.g: Cachex or Redis adapter), you - have to add the adapter dependency too. - -[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.html +[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html [telemetry]: http://hexdocs.pm/nebulex/telemetry.html To install these dependencies, we will run this command: @@ -92,8 +92,7 @@ config :blog, Blog.Cache, gc_cleanup_max_timeout: :timer.minutes(10) ``` -Assuming we will use `:shards` as backend, can add uncomment the first line in -the config +Assuming you want to use `:shards` as backend, uncomment the `backend:` option: ```elixir config :blog, Blog.Cache, @@ -156,9 +155,9 @@ against our cache. **IMPORTANT:** Make sure the cache is put in first place within the children list, or at least before the process or processes using it. Otherwise, there -could be race conditions causing `Nebulex.RegistryLookupError` errors; -processes attempting to use the cache and this one hasn't been even -started. +could be race conditions causing exceptions or errors `Nebulex.Error` +(with reason `:registry_lookup_error`); processes attempting to use +the cache and this one hasn't been even started. ## Inserting entries @@ -200,13 +199,14 @@ Let's try `put_new` and `put_new!` functions: ```elixir iex> new_user = %{id: 4, first_name: "John", last_name: "Doe"} iex> Blog.Cache.put_new(new_user.id, new_user, ttl: 900) -true +{:ok, true} iex> Blog.Cache.put_new(new_user.id, new_user) -false +{:ok, false} -# same as previous one but raises `Nebulex.KeyAlreadyExistsError` +# same as previous one but raises `Nebulex.Error` in case of error iex> Blog.Cache.put_new!(new_user.id, new_user) +false ``` Now `replace` and `replace!` functions: @@ -214,16 +214,20 @@ Now `replace` and `replace!` functions: ```elixir iex> existing_user = %{id: 5, first_name: "John", last_name: "Doe2"} iex> Blog.Cache.replace(existing_user.id, existing_user) -false +{:ok, false} iex> Blog.Cache.put_new(existing_user.id, existing_user) -true +{:ok, true} iex> Blog.Cache.replace(existing_user.id, existing_user, ttl: 900) +{:ok, true} + +# same as previous one but raises `Nebulex.Error` in case of error +iex> Blog.Cache.replace!(existing_user.id, existing_user) true -# same as previous one but raises `KeyError` -iex> Blog.Cache.replace!(100, existing_user) +iex> Blog.Cache.replace!("unknown", existing_user) +false ``` It is also possible to insert multiple new entries at once: @@ -234,10 +238,14 @@ iex> new_users = %{ ...> 7 => %{id: 7, first_name: "Marie", last_name: "Curie"} ...> } iex> Blog.Cache.put_new_all(new_users) -true +{:ok, true} # none of the entries is inserted if at least one key already exists iex> Blog.Cache.put_new_all(new_users) +{:ok, false} + +# same as previous one but raises `Nebulex.Error` in case of error +iex> Blog.Cache.put_new_all!(new_users) false ``` @@ -247,11 +255,43 @@ Let’s start off with fetching data by the key, which is the most basic and common operation to retrieve data from a cache. ```elixir -iex> Blog.Cache.get(1) -_user_1 +# Using `fetch` callback +iex> {:ok, user1} = Blog.Cache.fetch(1) +iex> user1.id +1 + +# If the key doesn't exist an error tuple is returned +iex> {:error, %Nebulex.KeyError{} = e} = Blog.Cache.fetch("unknown") +iex> e.key +"unknown" + +# Using `fetch!` (same as `fetch` but raises an exception in case of error) +iex> user1 = Blog.Cache.fetch!(1) +iex> user1.id +1 + +# Using `get` callback (returns the default in case the key doesn't exist) +iex> {:ok, user1} = Blog.Cache.get(1) +iex> user1.id +1 + +# Returns the default because the key doesn't exist +iex> Blog.Cache.get("unknown") +{:ok, nil} +iex> Blog.Cache.get("unknown", "default") +{:ok, "default"} + +# Using `get!` (same as `get` but raises an exception in case of error) +iex> user1 = Blog.Cache.get!(1) +iex> user1.id +1 +iex> Blog.Cache.get!("unknown") +nil +iex> Blog.Cache.get!("unknown", "default") +"default" iex> for key <- 1..3 do -...> user = Blog.Cache.get(key) +...> user = Blog.Cache.get!(key) ...> user.first_name ...> end ["Galileo", "Charles", "Albert"] @@ -261,17 +301,10 @@ There is a function `has_key?` to check if a key exist in cache: ```elixir iex> Blog.Cache.has_key?(1) -true +{:ok, true} iex> Blog.Cache.has_key?(10) -false -``` - -Retrieving multiple entries - -```elixir -iex> Blog.Cache.get_all([1, 2, 3]) -_users +{:ok, false} ``` ## Updating entries @@ -286,13 +319,16 @@ iex> initial = %{id: 1, first_name: "", last_name: ""} iex> Blog.Cache.get_and_update(1, fn v -> ...> if v, do: {v, %{v | first_name: "X"}}, else: {v, initial} ...> iex> end) -{_old, _updated} +{:ok, {_old, _updated}} # using `update` iex> Blog.Cache.update(1, initial, &(%{&1 | first_name: "Y"})) -_updated +{:ok, _updated} ``` +> You can also use the version with the trailing bang (`!`) `get_and_update!` +> and `!update`. + ## Counters The function `incr` is provided to increment or decrement a counter; by default, @@ -301,15 +337,19 @@ a counter is initialized to `0`. Let's see how counters works: ```elixir # by default, the counter is incremented by 1 iex> Blog.Cache.incr(:my_counter) -1 +{:ok, 1} # but we can also provide a custom increment value iex> Blog.Cache.incr(:my_counter, 5) -6 +{:ok, 6} # to decrement the counter, just pass a negative value iex> Blog.Cache.incr(:my_counter, -5) -1 +{:ok, 1} + +# using `incr!` +iex> Blog.Cache.incr!(:my_counter) +2 ``` ## Deleting entries @@ -320,6 +360,10 @@ delete an entry using Nebulex. ```elixir iex> Blog.Cache.delete(1) :ok + +# or `delete!` +iex> Blog.Cache.delete!(1) +:ok ``` ### Take @@ -329,29 +373,49 @@ before its delete it: ```elixir iex> Blog.Cache.take(1) -_entry +{:ok, _entry} -# returns `nil` if `key` doesn't exist -iex> Blog.Cache.take("nonexistent") -nil +# If the key doesn't exist an error tuple is returned +iex> {:error, %Nebulex.KeyError{} = e} = Blog.Cache.take("nonexistent") +iex> e.key +"nonexistent" -# same as previous one but raises `KeyError` +# same as previous one but raises `Nebulex.KeyError` iex> Blog.Cache.take!("nonexistent") ``` -## Info - -The last thing we’ll cover in this guide is how to retrieve information about -cached objects or the cache itself. +## Entry expiration -### Remaining TTL +You can get the remaining TTL or expiration time for a key like so: ```elixir +# If no TTL is set when the entry is created, `:infinity` is set by default iex> Blog.Cache.ttl(1) -_remaining_ttl +{:ok, :infinity} -iex> Blog.Cache.ttl("nonexistent") -nil +# If the key doesn't exist an error tuple is returned +iex> {:error, %Nebulex.KeyError{} = e} = Blog.Cache.ttl("nonexistent") +iex> e.key +"nonexistent" + +# Same as `ttl` but an exception is raised if an error occurs +iex> Blog.Cache.ttl!(1) +:infinity +``` + +You could also change or update the expiration time using `expire`, like so: + +```elixir +iex> Blog.Cache.expire(1, :timer.hours(1)) +{:ok, true} + +# When the key doesn't exist false is returned +iex> Blog.Cache.expire("nonexistent", :timer.hours(1)) +{:ok, false} + +# Same as `expire` but an exception is raised if an error occurs +iex> Blog.Cache.expire!(1, :timer.hours(1)) +true ``` ## Query and/or Stream entries @@ -362,34 +426,49 @@ cache matching the given query. ### Fetch all entries from cache matching the given query ```elixir -# by default, returns all keys -iex> Blog.Cache.all() -_all_entries +# by default, returns all entries +iex> Blog.Cache.get_all() #=> The query is set to nil by default +{:ok, _all_entries} # fetch all entries and return the keys -iex> Blog.Cache.all(nil, return: :key) -_keys +iex> Blog.Cache.get_all(select: :key) +{:ok, _all_keys} + +# fetch all entries and return the values +iex> Blog.Cache.get_all(select: :value) +{:ok, _all_values} + +# fetch entries associated to the requested keys +iex> Blog.Cache.get_all(in: [1, 2]) +{:ok, _fetched_entries} + +# raises an exception in case of error +iex> Blog.Cache.get_all!() +_all_entries + +# raises an exception in case of error +iex> Blog.Cache.get_all!(in: [1, 2]) +_fetched_entries # built-in queries in `Nebulex.Adapters.Local` adapter -iex> Blog.Cache.all(nil) -iex> Blog.Cache.all(:unexpired) -iex> Blog.Cache.all(:expired) +iex> Blog.Cache.get_all() #=> Equivalent to Blog.Cache.get_all(query: nil) +iex> Blog.Cache.get_all(query: :expired) # if we are using `Nebulex.Adapters.Local` adapter, the stored entry # is a tuple `{:entry, key, value, touched, ttl}`, then the match spec # could be something like: -iex> spec = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] -iex> Blog.Cache.all(spec) -_all_matched +iex> spec = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$1", 10}], [{{:"$1", :"$2"}}]}] +iex> Blog.Cache.get_all(query: spec) +{:ok, _all_matched} # using Ex2ms iex> import Ex2ms iex> spec = ...> fun do -...> {_, key, value, _, _} when value > 10 -> {key, value} +...> {_, key, value, _, _} when key > 10 -> {key, value} ...> end -iex> Blog.Cache.all(spec) -_all_matched +iex> Blog.Cache.get_all(query: spec) +{:ok, _all_matched} ``` ### Count all entries from cache matching the given query @@ -397,36 +476,17 @@ _all_matched For example, to get the total number of cached objects (cache size): ```elixir -iex> Blog.Cache.count_all() -_num_cached_entries -``` - -> By default, since none query is given to `count_all/2`, all entries - in cache match. +# by default, counts all entries +iex> Blog.Cache.count_all() #=> The query is set to nil by default +{:ok, _num_cached_entries} -In the same way as `all/2`, you can pass a query to count only the matched -entries: - -```elixir -# using Ex2ms -iex> import Ex2ms -iex> spec = -...> fun do -...> {_, value, _, _} when rem(value, 2) == 0 -> true -...> end -iex> Blog.Cache.count_all(spec) -_num_of_matched_entries +# raises an exception in case of error +iex> Blog.Cache.count_all!() +_num_cached_entries ``` -> The previous example assumes you are using the built-in local adapter. - -Also, if you are using the built-in local adapter, you can use the queries -`:expired` and `:unexpired` too, like so: - -```elixir -iex> expired_entries = Blog.Cache.count_all(:expired) -iex> unexpired_entries = Blog.Cache.count_all(:unexpired) -``` +Similar to `get_all`, you can pass a query to count only the matched entries. +For example, `Blog.Cache.count_all(query: query)`. ### Delete all entries from cache matching the given query @@ -439,66 +499,94 @@ the default behavior when none query is provided): ```elixir iex> Blog.Cache.delete_all() +{:ok, _num_of_removed_entries} + +# raises an exception in case of error +iex> Blog.Cache.delete_all!() _num_of_removed_entries ``` -And just like `count_all/2`, you can also provide a custom query to delete only -the matched entries, or if you are using the built-in local adapter you can also -use the queries `:expired` and `:unexpired`. For example: +One may also delete a list of keys at once (like a bulk delete): ```elixir -iex> expired_entries = Blog.Cache.delete_all(:expired) -iex> unexpired_entries = Blog.Cache.delete_all(:unexpired) +iex> Blog.Cache.delete_all(in: ["k1", "k2"]) +{:ok, _num_of_removed_entries} -# using Ex2ms -iex> import Ex2ms -iex> spec = -...> fun do -...> {_, value, _, _} when rem(value, 2) == 0 -> true -...> end -iex> Blog.Cache.delete_all(spec) -_num_of_matched_entries +# raises an exception in case of error +iex> Blog.Cache.delete_all!(in: ["k1", "k2"]) +_num_of_removed_entries ``` -> These examples assumes you are using the built-in local adapter. - ### Stream all entries from cache matching the given query -Similar to `all/2` but returns a lazy enumerable that emits all entries from the -cache matching the provided query. +Similar to `get_all` but returns a lazy enumerable that emits all entries from +the cache matching the provided query. If the query is `nil`, then all entries in cache match and are returned when the -stream is evaluated; based on the `:return` option. +stream is evaluated (based on the `:select` option). ```elixir -iex> Blog.Cache.stream() -iex> Blog.Cache.stream(nil, page_size: 100, return: :value) -iex> Blog.Cache.stream(nil, page_size: 100, return: :entry) +iex> {:ok, stream} = Blog.Cache.stream() +iex> Enum.to_list(stream) +_all_matched + +iex> {:ok, stream} = Blog.Cache.stream(select: :key) +iex> Enum.to_list(stream) +_all_matched + +iex> {:ok, stream} = Blog.Cache.stream([select: :value], max_entries: 100) +iex> Enum.to_list(stream) +_all_matched + +# raises an exception in case of error +iex> stream = Blog.Cache.stream!() +iex> Enum.to_list(stream) +_all_matched # using `Nebulex.Adapters.Local` adapter -iex> spec = [{{:"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] -iex> Blog.Cache.stream(spec) +iex> spec = [{{:entry, :"$1", :"$2", :_, :_}, [{:<, :"$1", 3}], [{{:"$1", :"$2"}}]}] +iex> {:ok, stream} = Blog.Cache.stream(query: spec) +iex> Enum.to_list(stream) _all_matched # using Ex2ms iex> import Ex2ms iex> spec = ...> fun do -...> {key, value, _, _} when value > 10 -> {key, value} +...> {:entry, key, value, _, _} when key < 3 -> {key, value} ...> end -iex> Blog.Cache.stream(spec) +iex> {:ok, stream} = Blog.Cache.stream(query: spec) +iex> Enum.to_list(stream) _all_matched ``` ## Partitioned Cache Nebulex provides the adapter `Nebulex.Adapters.Partitioned`, which allows to -set up a partitioned cache topology. +set up a partitioned cache topology. First of all, we need to add +`:nebulex_adapters_partitioned` to the dependencies in the `mix.exs`: + +```elixir +defp deps do + [ + {:nebulex, "~> 3.0"}, + {:nebulex_adapters_local, "~> 3.0"}, + {:nebulex_adapters_partitioned, "~> 3.0"}, + #=> When using :shards as backend for local adapter + {:shards, "~> 1.0"}, + #=> When using Caching decorators (recommended adding it) + {:decorator, "~> 1.4"}, + #=> When using the Telemetry events (recommended adding it) + {:telemetry, "~> 1.0"} + ] +end +``` -Let's set up the partitioned cache by using the `mix` task `mix nbx.gen.cache`: +Let's set up the partitioned cache by using the `mix` task +`mix nbx.gen.cache.partitioned`: ``` -mix nbx.gen.cache -c Blog.PartitionedCache -a Nebulex.Adapters.Partitioned +mix nbx.gen.cache.partitioned -c Blog.PartitionedCache ``` As we saw previously, this command will generate the cache in @@ -558,11 +646,11 @@ milliseconds for the command that will be executed. ```elixir iex> Blog.PartitionedCache.get("foo", timeout: 10) -_value +#=> {:ok, value} -# if the timeout is exceeded, then the current process will exit +# when the command's call timed out an error is returned iex> Blog.PartitionedCache.put("foo", "bar", timeout: 10) -# ** (EXIT) time out +#=> {:error, %Nebulex.Error{reason: :timeout}} ``` To learn more about how partitioned cache works, please check @@ -574,16 +662,36 @@ To learn more about how partitioned cache works, please check Nebulex also provides the adapter `Nebulex.Adapters.Multilevel`, which allows to setup a multi-level caching hierarchy. -First, let's set up the multi-level cache by using the `mix` task -`mix nbx.gen.cache`: +Same as any other adapter, we have to add `:nebulex_adapters_multilevel` to the +dependencies in the `mix.exs`: + +```elixir +defp deps do + [ + {:nebulex, "~> 3.0"}, + {:nebulex_adapters_local, "~> 3.0"}, + {:nebulex_adapters_partitioned, "~> 3.0"}, + {:nebulex_adapters_multilevel, "~> 3.0"}, + #=> When using :shards as backend for local adapter + {:shards, "~> 1.0"}, + #=> When using Caching decorators (recommended adding it) + {:decorator, "~> 1.4"}, + #=> When using the Telemetry events (recommended adding it) + {:telemetry, "~> 1.0"} + ] +end +``` + +Let's set up the multilevel cache by using the `mix` task +`mix nbx.gen.cache.multilevel`: ``` -mix nbx.gen.cache -c Blog.NearCache -a Nebulex.Adapters.Multilevel +mix nbx.gen.cache.multilevel -c Blog.NearCache ``` By default, the command generates a 2-level near-cache topology. The first -level or `L1` using the built-in local adapter, and the second one or `L2` -using the built-in partitioned adapter. +level or `L1` using `Nebulex.Adapters.Local` adapter, and the second one or `L2` +using `Nebulex.Adapters.Partitioned` adapter. The generated cache module `lib/blog/near_cache.ex`: @@ -660,9 +768,9 @@ Let's try it out! ```elixir iex> Blog.NearCache.put("foo", "bar", ttl: :timer.hours(1)) -"bar" +:ok -iex> Blog.NearCache.get("foo") +iex> Blog.NearCache.get!("foo") "bar" ``` @@ -672,5 +780,6 @@ To learn more about how multilevel-cache works, please check ## Next - * [Cache Usage Patterns via Nebulex.Caching](http://hexdocs.pm/nebulex/cache-usage-patterns.html) - - Annotations-based DSL to implement different cache usage patterns. +* [Decorators-based DSL for cache usage patterns][cache-usage-patterns]. + +[cache-usage-patterns]: http://hexdocs.pm/nebulex/cache-usage-patterns.html diff --git a/guides/migrating-to-v2.md b/guides/migrating-to-v2.md deleted file mode 100644 index 7d8184aa..00000000 --- a/guides/migrating-to-v2.md +++ /dev/null @@ -1,94 +0,0 @@ -# Migrating to v2.x - -For the v2, Nebulex introduces several breaking changes, so this guide aims to -highlight most of these changes to make easier the transition to v2. Be aware -this guide won't focus on every change, just the most significant ones that can -affect how your application code interacts with the cache. Also, it is not a -detailed guide about how to translate the current code from older versions to -v2, just pointing out the areas the new documentation should be consulted on. - -## Configuration - -This is one of the biggest changes. Version 1.x, most of the configuration -options are resolved in compile-time, which has a lot of limitations. -Since version 2.x, only few arguments are configured in compile-time when -defining a cache, e.g.: `otp_app:`, `adapter:`, and `primary_storage_adapter:` -(for partitioned and replicated adapters). The rest of configuration parameters -are given via config file or at startup time. For more information and examples, -see `Nebulex.Cache`, `Nebulex.Adapters.Local`, `Nebulex.Adapters.Partitioned`, -`Nebulex.Adapters.Replicated`, `Nebulex.Adapters.Multilevel`. - -## Cache API - -There are several changes on the `Nebulex.Cache` API: - - * The `:return` option is not available anymore, so it has to be removed. - * The `:version` option is not available anymore, so it has to be removed. - * Callback `set/3` was refactored to `put/3`. - * Callback `set_many/2` was refactored to `put_all/2`. - * Callback `get_many/2` was refactored to `get_all/2`. - * Callbacks `add/3` and `add!/3` were refactored to `put_new/3` and - `put_new!/3`. - * Callback `update_counter/3` was refactored to `incr/3` and `decr/3`. - * Callback `add_or_replace/3` was removed. - * Callback `object_info/2` was removed, and callbacks `ttl/1` and - `touch/1` were added instead. - -## Declarative annotation-based caching via decorators - - * Module `Nebulex.Caching.Decorators` was refactored to `Nebulex.Caching` – - Keep in mind that since v1.2.x the caching decorators were included instead - of the previous macros or DSL (this applies for version lower than v1.1.x). - * Decorator `cache/3` was refactored to `cacheable/3`. - * Decorator `evict/3` was refactored to `cache_evict/3`. - * Decorator `update/3` was refactored to `cache_put/3`. - * Improved the `:match` option to return not only a boolean but return a - specific value to be cached `(term -> boolean | {true, term})` – If `true` - the code-block evaluation result is cached as it is (the default). If - `{true, value}` is returned, then the `value` is what is cached. - -## Hooks - -Since v2.x, pre/post hooks are deprecated and won't be longer supported by -`Nebulex`, at least not directly. Mainly, because the hooks feature is not a -common use-case and also it is something that can be be easily implemented -on top of the Cache at the application level. However, to keep backward -compatibility somehow, `Nebulex` provides decorators for implementing -pre/post hooks very easily. For that reason, it is highly recommended -to removed all pre/post hooks related code and adapt it to the new way. -See `Nebulex.Hook` for more information. - -## Built-In Adapters - -There have been several and significant improvements on the built-in adapters, -so it is also highly recommended to take a look at them; -`Nebulex.Adapters.Local`, `Nebulex.Adapters.Partitioned`, -`Nebulex.Adapters.Replicated`, and `Nebulex.Adapters.Multilevel`. - -In case of using a distributed adapter, the module `Nebulex.Adapter.HashSlot` -was refactored to `Nebulex.Adapter.Keyslot` and the callback `keyslot /2` to -`hash_slot/2`. - -## Statistics - -For older versions (<= 1.x), the stats were implemented via a post-hook and the -measurements were oriented for counting the number of times a cache function is -called. But what is interesting and useful to see is, for instance, the number -of writes, hits, misses, evictions, etc. Therefore, the whole stats' -functionality was refactored entirely. - - 1. This feature is not longer using pre/post hooks. Besides, pre/post hooks - are deprecated in v2.x. - 2. The stats support is optional by implementing the `Nebulex.Adapter.Stats` - behaviour from the adapter. However, Nebulex provides a default - implementation using [Erlang counters][https://erlang.org/doc/man/counters.html] - which is supported by the local built-in adapter. - See the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) for - more information. - 3. Since Nebulex 2.x on-wards, enabling stats is a matter of setting the - option `:stats` to `true`. See `Nebulex.Cache` for more information. - -## Mix Tasks - - * `mix nebulex.gen.cache` was refactored to `mix nbx.gen.cache`. - * `mix nebulex` was refactored to `mix nbx`. diff --git a/guides/migrating-to-v3.md b/guides/migrating-to-v3.md new file mode 100644 index 00000000..ce8847ab --- /dev/null +++ b/guides/migrating-to-v3.md @@ -0,0 +1,172 @@ +# Migrating to v3.x + +For the v3, Nebulex introduces several breaking changes, including the Cache +API itself. This guide aims to highlight most of these changes to make easier +the transition to v3. Be aware this guide won't focus on every change, just +the most significant ones that can affect how your application code interacts +with the cache. Also, it is not a detailed guide about how to translate the +current code from older versions to v3, just pointing out the areas the new +documentation should be consulted on. + +## Built-In Adapters + +All previously built-in adapters (`Nebulex.Adapters.Local`, +`Nebulex.Adapters.Partitioned`, `Nebulex.Adapters.Replicated`, and +`Nebulex.Adapters.Multilevel`) have been moved to separate repositories. +Therefore, you must add the adapter dependency to the list of dependencies +in your `mix.exs` file. + +## Cache API + +The most significant change is on the [Cache API][cache_api]. Nebulex v3 has a +new API based on ok/error tuples. + +Nebulex v3 brings a new API with two flavors: + +* An ok/error tuple API for all cache functions. This new approach is preferred + when you want to handle different outcomes using pattern-matching. +* An alternative API version with trailing bang (`!`) functions. This approach + is preferred if you expect the outcome to always be successful. + +[cache_api]: https://hexdocs.pm/nebulex/Nebulex.Cache.html + +### Migrating to the new API + +There are two ways to address the API changes. The first is to review all the +cache calls in your code and handle the new ok/error tuple response. +For example, wherever you were calling: + +```elixir +:ok = MyApp.Cache.put("key", "value") + +value = MyApp.Cache.get("key") +``` + +Now you should change it to handle the ok/error response, like so: + +```elixir +case MyApp.Cache.put("key", "value") do + :ok -> + #=> your logic handling success + + {:error, reason} -> + #=> your logic handling the error +end + +case MyApp.Cache.get("key") do + {:ok, value} -> + #=> your logic handling success + + {:error, reason} -> + #=> your logic handling the error +end +``` + +The same applies to ALL the Cache API functions. For this reason, it is highly +recommended you check your code and add the proper changes based on the new +ok/error tuple API. + +The second way to address the API changes (and perhaps the easiest one) is to +replace your cache calls by using the function version with the trailing bang +(`!`). For example, wherever you were calling: + +```elixir +:ok = MyApp.Cache.put("key", "value") + +value = MyApp.Cache.get("key") +``` + +You could change it to: + +```elixir +:ok = MyApp.Cache.put!("key", "value") + +value = MyApp.Cache.get!("key") +``` + +As you may notice, the new Cache API exposes ok/error functions, as well as +bang functions (`!`) to give more flexibility to the users; now you can decide +whether you like to handle the errors or not. + +> #### DISCLAIMER {: .warning} +> Despite this fix of using bang functions (`!`) may work for most cases, there +> may be a few where the outcome is not the same or the patch is just not +> applicable. Hence, it is recommended to review carefully the +> [Cache API docs][cache_api] anyway. + +### API changes + +The following are some of the changes you should be aware of: + +* The stats callbacks `stats/0` and `dispatch_stats/1` are deprecated. + See the **"Info API"** section down below for more information. +* The callback `flush/0` is deprecated, you should use `delete_all/2` + instead (e.g., `MyApp.Cache.delete_all()`). +* The callback `all/2` was merged into `get_all/2`; only the latter can be + used now. The new version of `get_all/2` accepts a query-spec as a first + argument. For example, wherever you were doing `MyApp.get_all([k1, k2, ...])`, + you must change it to `MyApp.get_all(in: [k1, k2, ...])`. Similarly, wherever + you were doing `MyApp.all(some_query)`, you must change it to + `MyApp.get_all(query: some_query)`. +* Regarding the previous change, the option `:return` is deprecated. Overall, + for all "Query API" callbacks, is recommended to see the documentation to be + aware of the new query specification and available options. +* The previous callback `get/2` has changed the semantics a bit (aside from + the ok/error tuple API). Previously, it did return `nil` when the given key + wasn't in the cache. Now, the callback accepts an argument to specify the + default value when the key is not found. + +### Info API + +Since Nebulex v3, the adapter's Info API is introduced. This is a more generic +API to get information about the cache, including the stats. Adapters are +responsible for implementing the Info API and are also free to add the +information specification keys they want. See +[c:Nebulex.Cache.info/2][info_cb] and the ["Cache Info Guide"][cache_info_guide] +for more information. + +[info_cb]: https://hexdocs.pm/nebulex/Nebulex.Cache.html#c:info/2 +[cache_info_guide]: https://hexdocs.pm/nebulex/cache-info.html + +## `nil` values + +Previously, Nebulex used to skip storing `nil` values in the cache. The main +reason is the semantics the `nil` value had behind, being used to validate +whether a key existed in the cache or not. However, this is a limitation too. + +Since Nebulex v3, any Elixir term can be stored in the cache (including `nil`), +Nebulex doesn't perform any validation whatsoever. Any meaning or semantics +behind `nil` (or any other term) is up to the user. + +Additionally, a new callback `fetch/2` is introduced, which is the base or +main function for retrieving a key from the cache; in fact, the `get` callback +is implemented using `fetch` underneath. + +## Caching decorators (declarative caching) + +Nebulex v3 introduces some changes and new features to the Declarative Caching +API (a.k.a caching decorators). We will highlight mostly the changes and perhaps +a few new features. However, it is highly recommended you check the +documentation for more information about all the new features and changes. + +* The `:cache` option can be set globally for all decorated functions in a + module when defining the caching usage via `use Nebulex.Caching`. For example: + `use Nebulex.Caching, cache: MyApp.Cache`. In that way, you don't need to add + the cache to all decorated functions, unless the cache for some of them is + different or when using references to an external cache. +* The `cache` option doesn't support MFA tuples anymore. The possible values are + a cache module, a dynamic cache spec, or an anonymous function that receives + the decorator's context as an argument and must return the cache to use + (a cache module or a dynamic cache spec). +* The `:key_generator` option is deprecated. Instead, you can use the `:key` + option with an anonymous function that receives the decorator's context as an + argument and must return the key to use. +* The `:default_key_generator` option must be defined when using + `Nebulex.Caching`. For example: + `use Nebulex.Caching, default_key_generator: MyApp.MyKeygen`. +* The `:references` option in the `cacheable` decorator supports a reference + to a dynamic cache. + +> See the ["Caching Decorators Docs"][caching_decorators] for more info. + +[caching_decorators]: https://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html diff --git a/guides/telemetry.md b/guides/telemetry.md index 059a81b8..7ea2f91f 100644 --- a/guides/telemetry.md +++ b/guides/telemetry.md @@ -17,58 +17,10 @@ Many Elixir libraries (including Nebulex) are already using the `:telemetry` package as a way to give users more insight into the behavior of their applications, by emitting events at key moments in the application lifecycle. -### Nebulex built-in events +See ["Telemetry Events"][nbx_telemetry_events] documentation for more +information about the emitted events, their measurements, and metadata. -The following events are emitted by all Nebulex caches: - - * `[:nebulex, :cache, :init]` - it is dispatched whenever a cache starts. - - * Measurement: `%{system_time: System.monotonic_time()}` - * Metadata: `%{cache: atom, opts: [term]}` - -### Adapter-specific events - -Nebulex currently suggests the adapters to dispatch the following Telemetry -events: - - * `[:my_app, :cache, :command, :start]` - Dispatched by the underlying cache - adapter before an adapter callback is executed. - - * Measurement: `%{system_time: System.monotonic_time()}` - * Metadata: `%{adapter_meta: map, function_name: atom, args: [term]}` - - * `[:my_app, :cache, :command, :stop]` - Dispatched by the underlying cache - adapter after an adapter callback has been successfully executed. - - * Measurement: `%{duration: native_time}` - * Metadata: - - ```elixir - %{ - adapter_meta: map, - function_name: atom, - args: [term], - result: term - } - ``` - - * `[:my_app, :cache, :command, :exception]` - Dispatched by the underlying - cache adapter when an exception is raised while the adapter callback is - executed. - - * Measurement: `%{duration: native_time}` - * Metadata: - - ```elixir - %{ - adapter_meta: map, - function_name: atom, - args: [term], - kind: :error | :exit | :throw, - reason: term, - stacktrace: term - } - ``` +[nbx_telemetry_events]: https://hexdocs.pm/nebulex/Nebulex.Cache.html#module-telemetry-events ## Nebulex Metrics @@ -77,7 +29,7 @@ Assuming you have defined the cache `MyApp.Cache` with the default counter metric, which counts how many cache commands were completed: ```elixir -Telemetry.Metrics.counter("my_app.cache.command.stop.duration") +Telemetry.Metrics.counter("nebulex.cache.command.stop.duration") ``` or you could use a distribution metric to see how many commands were completed @@ -85,7 +37,7 @@ in particular time buckets: ```elixir Telemetry.Metrics.distribution( - "my_app.cache.command.stop.duration", + "nebulex.cache.command.stop.duration", buckets: [100, 200, 300] ) ``` @@ -97,386 +49,30 @@ or callback name? In this case, one could define a summary metric like so: ```elixir Telemetry.Metrics.summary( - "my_app.cache.command.stop.duration", + "nebulex.cache.command.stop.duration", unit: {:native, :millisecond}, - tags: [:function_name] + tags: [:command] ) ``` -As it is described above in the **"Adapter-specific events"** section, the event -includes the invoked callback name into the metadata as `:function_name`, then -we can add it to the metric's tags. - ### Extracting tag values from adapter's metadata -Let's add another metric for the command event, this time to group by **cache**, -**adapter**, and **function_name** (adapter's callback): +Let's add another metric for the command event, this time to group by +**command**, **cache**, and **name** (in case of dynamic caches): ```elixir Telemetry.Metrics.summary( - "my_app.cache.command.stop.duration", + "nebulex.cache.command.stop.duration", unit: {:native, :millisecond}, - tags: [:cache, :adapter, :function_name], + tags: [:command, :cache, :name], tag_values: - &Map.merge(&1, %{ + &%{ cache: &1.adapter_meta.cache, - adapter: &1.adapter_meta.cache.__adapter__() - }) + name: &1.adapter_meta.name, + command: &1.command + } ) ``` We've introduced the `:tag_values` option here, because we need to perform a transformation on the event metadata in order to get to the values we need. - -## Cache Stats - -Each adapter is responsible for providing stats by implementing -`Nebulex.Adapter.Stats` behaviour. However, Nebulex provides a simple default -implementation using [Erlang counters][erl_counters], which is used by -the built-in local adapter. The local adapter uses -`Nebulex.Telemetry.StatsHandler` to aggregate the stats and keep -them updated, therefore, it requires the Telemetry events are dispatched -by the adapter, otherwise, it won't work properly. - -[erl_counters]: https://erlang.org/doc/man/counters.html - -Furthermore, when the `:stats` option is enabled, we can use Telemetry for -emitting the current stat values. - -First of all, make sure you have added `:telemetry`, `:telemetry_metrics`, and -`:telemetry_poller` packages as dependencies to your `mix.exs` file. - -Let's define out cache module: - -```elixir -defmodule MyApp.Cache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local -end -``` - -Make sure the `:stats` option is set to `true`, for example in the -configuration: - -```elixir -config :my_app, MyApp.Cache, - stats: true, - backend: :shards, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10) -``` - -Create your Telemetry supervisor at `lib/my_app/telemetry.ex`: - -```elixir -# lib/my_app/telemetry.ex -defmodule MyApp.Telemetry do - use Supervisor - import Telemetry.Metrics - - def start_link(arg) do - Supervisor.start_link(__MODULE__, arg, name: __MODULE__) - end - - def init(_arg) do - children = [ - # Configure `:telemetry_poller` for reporting the cache stats - {:telemetry_poller, measurements: periodic_measurements(), period: 10_000}, - - # For example, we use the console reporter, but you can change it. - # See `:telemetry_metrics` for for information. - {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} - ] - - Supervisor.init(children, strategy: :one_for_one) - end - - defp metrics do - [ - # Nebulex Stats Metrics - last_value("my_app.cache.stats.hits", tags: [:cache]), - last_value("my_app.cache.stats.misses", tags: [:cache]), - last_value("my_app.cache.stats.writes", tags: [:cache]), - last_value("my_app.cache.stats.updates", tags: [:cache]), - last_value("my_app.cache.stats.evictions", tags: [:cache]), - last_value("my_app.cache.stats.expirations", tags: [:cache]) - ] - end - - defp periodic_measurements do - [ - {MyApp.Cache, :dispatch_stats, []} - ] - end -end -``` - -Then add to your main application's supervision tree -(usually in `lib/my_app/application.ex`): - -```elixir -children = [ - MyApp.Cache, - MyApp.Telemetry, - ... -] -``` - -Now start an IEx session and call the server: - -``` -iex(1)> MyApp.Cache.get 1 -nil -iex(2)> MyApp.Cache.put 1, 1, ttl: 10 -:ok -iex(3)> MyApp.Cache.get 1 -1 -iex(4)> MyApp.Cache.put 2, 2 -:ok -iex(5)> MyApp.Cache.delete 2 -:ok -iex(6)> Process.sleep(20) -:ok -iex(7)> MyApp.Cache.get 1 -nil -iex(2)> MyApp.Cache.replace 1, 11 -true -``` - -and you should see something like the following output: - -``` -[Telemetry.Metrics.ConsoleReporter] Got new event! -Event name: my_app.cache.stats -All measurements: %{evictions: 2, expirations: 1, hits: 1, misses: 2, updates: 1, writes: 2} -All metadata: %{cache: MyApp.Cache} - -Metric measurement: :hits (last_value) -With value: 1 -Tag values: %{cache: MyApp.Cache} - -Metric measurement: :misses (last_value) -With value: 2 -Tag values: %{cache: MyApp.Cache} - -Metric measurement: :writes (last_value) -With value: 2 -Tag values: %{cache: MyApp.Cache} - -Metric measurement: :updates (last_value) -With value: 1 -Tag values: %{cache: MyApp.Cache} - -Metric measurement: :evictions (last_value) -With value: 2 -Tag values: %{cache: MyApp.Cache} - -Metric measurement: :expirations (last_value) -With value: 1 -Tag values: %{cache: MyApp.Cache} -``` - -### Custom metrics - -In the same way, for instance, you can add another periodic measurement for -reporting the cache size: - -```elixir -defmodule MyApp.Cache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local - - def dispatch_cache_size do - :telemetry.execute( - [:my_app, :cache, :size], - %{value: size()}, - %{cache: __MODULE__, node: node()} - ) - end -end -``` - -Now let's add a new periodic measurement to invoke `dispatch_cache_size()` -through `:telemetry_poller`: - -```elixir -defp periodic_measurements do - [ - {MyApp.Cache, :dispatch_stats, [[metadata: %{node: node()}]]}, - {MyApp.Cache, :dispatch_cache_size, []} - ] -end -``` - -> Notice the node name was added to the metadata so we can use it in the - metric's tags. - -Metrics: - -```elixir -defp metrics do - [ - # Nebulex Stats Metrics - last_value("my_app.cache.stats.hits", tags: [:cache, :node]), - last_value("my_app.cache.stats.misses", tags: [:cache, :node]), - last_value("my_app.cache.stats.writes", tags: [:cache, :node]), - last_value("my_app.cache.stats.updates", tags: [:cache, :node]), - last_value("my_app.cache.stats.evictions", tags: [:cache, :node]), - last_value("my_app.cache.stats.expirations", tags: [:cache, :node]), - - # Nebulex custom Metrics - last_value("my_app.cache.size.value", tags: [:cache, :node]) - ] -end -``` - -If you start an IEx session like previously, you should see the new metric too: - -``` -[Telemetry.Metrics.ConsoleReporter] Got new event! -Event name: my_app.cache.stats -All measurements: %{evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0} -All metadata: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :hits (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :misses (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :writes (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :updates (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :evictions (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :expirations (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - -[Telemetry.Metrics.ConsoleReporter] Got new event! -Event name: my_app.cache.size -All measurements: %{value: 0} -All metadata: %{cache: MyApp.Cache, node: :nonode@nohost} - -Metric measurement: :value (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} -``` - -## Multi-level Cache Stats - -When using the multi-level adapter the returned stats measurements may look -a bit different, because the multi-level adapter works as a wrapper for the -configured cache levels, so the returned measurements are grouped and -consolidated by level. For example, suppose you have the cache: - -```elixir -defmodule MyApp.Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end -end -``` - -Then, when you run `MyApp.Multilevel.stats()` you get something like: - -```elixir -%Nebulex.Stats{ - measurements: %{ - l1: %{evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0}, - l2: %{evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0} - }, - metadata: %{ - l1: %{ - cache: NMyApp.Multilevel.L1, - started_at: ~U[2021-01-10 13:06:04.075084Z] - }, - l2: %{ - cache: MyApp.Multilevel.L2.Primary, - started_at: ~U[2021-01-10 13:06:04.089888Z] - }, - cache: MyApp.Multilevel, - started_at: ~U[2021-01-10 13:06:04.066750Z] - } -} -``` - -As you can see, the measurements map has the stats grouped by level, every key -is an atom specifying the level and the value is a map with the stats and/or -measurements for that level. Based on that, you could define the Telemetry -metrics in this way: - -```elixir -[ - # L1 metrics - last_value("my_app.cache.stats.l1.hits", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :hits]), - tags: [:cache] - ), - last_value("my_app.cache.stats.l1.misses", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :misses]), - tags: [:cache] - ), - last_value("my_app.cache.stats.l1.writes", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :writes]), - tags: [:cache] - ), - last_value("my_app.cache.stats.l1.updates", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :updates]), - tags: [:cache] - ), - last_value("my_app.cache.stats.l1.evictions", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :evictions]), - tags: [:cache] - ), - last_value("my_app.cache.stats.l1.expirations", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :expirations]), - tags: [:cache] - ), - - # L2 metrics - last_value("my_app.cache.stats.l2.hits", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l2, :hits]), - tags: [:cache] - ), - ... -] -``` - -If what you need is the aggregated stats for all levels, you can always define -your own function to emit the Telemetry events. You just need to call -`MyApp.Multilevel.stats()` and then you add the logic to process the results -in the way you need. On the other hand, if you are using Datadog through the -StatsD reporter, you could do the aggregation directly in Datadog. diff --git a/lib/mix/tasks/nbx.ex b/lib/mix/tasks/nbx.ex index 23424e50..788b700b 100644 --- a/lib/mix/tasks/nbx.ex +++ b/lib/mix/tasks/nbx.ex @@ -24,7 +24,8 @@ defmodule Mix.Tasks.Nbx do defp general do _ = Application.ensure_all_started(:nebulex) - Mix.shell().info("Nebulex v#{Application.spec(:nebulex, :vsn)}") + + Mix.shell().info("Nebulex v#{Nebulex.vsn()}") Mix.shell().info("In-Process and Distributed Cache Toolkit for Elixir.") Mix.shell().info( diff --git a/lib/mix/tasks/nbx.gen.cache.ex b/lib/mix/tasks/nbx.gen.cache.ex index 54e31c1f..9c92b8a5 100644 --- a/lib/mix/tasks/nbx.gen.cache.ex +++ b/lib/mix/tasks/nbx.gen.cache.ex @@ -83,6 +83,8 @@ defmodule Mix.Tasks.Nbx.Gen.Cache do {#{inspect(cache)}, []} + And for more information about configuration options, check + adapters documentation and Nebulex.Cache shared options. """) end diff --git a/lib/nebulex.ex b/lib/nebulex.ex index 540e5156..35becd92 100644 --- a/lib/nebulex.ex +++ b/lib/nebulex.ex @@ -1,23 +1,33 @@ defmodule Nebulex do @moduledoc ~S""" - Nebulex is split into 2 main components: - - * `Nebulex.Cache` - caches are wrappers around the in-memory data store. - Via the cache, we can put, get, update, delete and query existing entries. - A cache needs an adapter to communicate to the in-memory data store. - - * `Nebulex.Caching` - Declarative annotation-based caching via - **`Nebulex.Caching.Decorators`**. Decorators provide n elegant way of - annotating functions to be cached or evicted. Caching decorators also - enable the usage and/or implementation of cache usage patterns like - **Read-through**, **Write-through**, **Cache-as-SoR**, etc. - See [Cache Usage Patters Guide](http://hexdocs.pm/nebulex/cache-usage-patterns.html). - - In the following sections, we will provide an overview of those components and - how they interact with each other. Feel free to access their respective module - documentation for more specific examples, options and configuration. - - If you want to quickly check a sample application using Nebulex, please check + Nebulex is split into two main components: + + * `Nebulex.Cache` - Defines a standard Cache API for caching data. + This API implementation is intended to create a way for different + technologies to provide a common caching interface. It defines the + mechanism for creating, accessing, updating, and removing information + from a cache. This common interface makes it easier for software + developers to leverage various technologies as caches since the + software they write using the Nebulex Cache API does not need + to be rewritten to work with different underlying technologies. + + * `Nebulex.Caching` - Defines a Cache Abstraction for transparently adding + caching into an existing Elixir application. The caching abstraction + allows consistent use of various caching solutions with minimal impact + on the code. This Cache Abstraction enables declarative decorator-based + caching via **`Nebulex.Caching.Decorators`**. Decorators provide an + elegant way of annotating functions to be cached or evicted. Caching + decorators also enable the adoption or implementation of cache usage + patterns such as **Read-through**, **Write-through**, **Cache-as-SoR**, + etc. See the [Cache Usage Patterns][cache-patterns] guide. + + [cache-patterns]: http://hexdocs.pm/nebulex/cache-usage-patterns.html + + The following sections will provide an overview of those components and their + usage. Feel free to access their respective module documentation for more + specific examples, options, and configurations. + + If you want to check a sample application using Nebulex quickly, please check the [getting started guide](http://hexdocs.pm/nebulex/getting-started.html). ## Caches @@ -35,7 +45,7 @@ defmodule Nebulex do environment, usually defined in your `config/config.exs`: config :my_app, MyApp.MyCache, - gc_interval: 3_600_000, #=> 1 hr + gc_interval: :timer.hours(1), backend: :shards, partitions: 2 @@ -61,8 +71,19 @@ defmodule Nebulex do Otherwise, you can start and stop the cache directly at any time by calling `MyApp.Cache.start_link/1` and `MyApp.Cache.stop/1`. - ## Declarative annotation-based caching + ## Declarative decorator-based caching See [Nebulex.Caching](http://hexdocs.pm/nebulex/Nebulex.Caching.html). """ + + ## API + + @doc """ + Returns the current Nebulex version. + """ + @spec vsn() :: binary() + def vsn do + Application.spec(:nebulex, :vsn) + |> to_string() + end end diff --git a/lib/nebulex/adapter.ex b/lib/nebulex/adapter.ex index 8be20bc4..9bb724b7 100644 --- a/lib/nebulex/adapter.ex +++ b/lib/nebulex/adapter.ex @@ -3,25 +3,27 @@ defmodule Nebulex.Adapter do Specifies the minimal API required from adapters. """ + alias Nebulex.Cache.Options alias Nebulex.Telemetry @typedoc "Adapter" @type t :: module - @typedoc "Metadata type" - @type metadata :: %{optional(atom) => term} - @typedoc """ The metadata returned by the adapter `c:init/1`. - It must be a map and Nebulex itself will always inject two keys into - the meta: + It must be a map and Nebulex itself will always inject + the following keys into the meta: * `:cache` - The cache module. - * `:pid` - The PID returned by the child spec returned in `c:init/1` + * `:name` - The name of the cache supervisor process. + * `:pid` - The PID returned by the child spec returned in `c:init/1`. + * `:adapter` - The defined cache adapter. """ - @type adapter_meta :: metadata + @type adapter_meta() :: %{optional(term) => term} + + ## Callbacks @doc """ The callback invoked in case the adapter needs to inject code. @@ -29,9 +31,31 @@ defmodule Nebulex.Adapter do @macrocallback __before_compile__(env :: Macro.Env.t()) :: Macro.t() @doc """ - Initializes the adapter supervision tree by returning the children. + Initializes the adapter supervision tree by returning the children + and adapter metadata. """ - @callback init(config :: Keyword.t()) :: {:ok, :supervisor.child_spec(), adapter_meta} + @callback init(config :: keyword()) :: {:ok, :supervisor.child_spec(), adapter_meta()} + + # Define optional callbacks + @optional_callbacks __before_compile__: 1 + + ## API + + # Inline common instructions + @compile {:inline, lookup_meta: 1} + + @doc """ + Returns the adapter metadata from its `c:init/1` callback. + + It expects a process name of the cache. The name is either + an atom or a PID. For a given cache, you often want to call + this function based on the dynamic cache: + + Nebulex.Adapter.lookup_meta(cache.get_dynamic_cache()) + + """ + @spec lookup_meta(atom() | pid()) :: {:ok, adapter_meta()} | {:error, Nebulex.Error.t()} + defdelegate lookup_meta(name_or_pid), to: Nebulex.Cache.Registry, as: :lookup @doc """ Executes the function `fun` passing as parameters the adapter and metadata @@ -39,81 +63,116 @@ defmodule Nebulex.Adapter do It expects a name or a PID representing the cache. """ - @spec with_meta(atom | pid, (module, adapter_meta -> term)) :: term + @spec with_meta(atom() | pid(), (adapter_meta() -> any())) :: any() | {:error, Nebulex.Error.t()} def with_meta(name_or_pid, fun) do - {adapter, adapter_meta} = Nebulex.Cache.Registry.lookup(name_or_pid) - fun.(adapter, adapter_meta) + with {:ok, adapter_meta} <- lookup_meta(name_or_pid) do + fun.(adapter_meta) + end end - # FIXME: ExCoveralls does not mark most of this section as covered - # coveralls-ignore-start + ## Helpers @doc """ - Helper macro for the adapters so they can add the logic for emitting the - recommended Telemetry events. + Builds up a public wrapper function for invoking an adapter command. - See the built-in adapters for more information on how to use this macro. + **NOTE:** Internal purposes only. """ - defmacro defspan(fun, opts \\ [], do: block) do - {name, [adapter_meta | args_tl], as, [_ | as_args_tl] = as_args} = build_defspan(fun, opts) - - quote do - def unquote(name)(unquote_splicing(as_args)) + defmacro defcommand(fun, opts \\ []) do + build_defcommand(:public, fun, opts) + end - def unquote(name)(%{telemetry: false} = unquote(adapter_meta), unquote_splicing(args_tl)) do - unquote(block) - end + @doc """ + Builds up a private wrapper function for invoking an adapter command. - def unquote(name)(unquote_splicing(as_args)) do - metadata = %{ - adapter_meta: unquote(adapter_meta), - function_name: unquote(as), - args: unquote(as_args_tl) - } - - Telemetry.span( - unquote(adapter_meta).telemetry_prefix ++ [:command], - metadata, - fn -> - result = - unquote(name)( - Map.merge(unquote(adapter_meta), %{telemetry: false, in_span?: true}), - unquote_splicing(as_args_tl) - ) - - {result, Map.put(metadata, :result, result)} - end - ) - end - end + **NOTE:** Internal purposes only. + """ + defmacro defcommandp(fun, opts \\ []) do + build_defcommand(:private, fun, opts) end - ## Private Functions + defp build_defcommand(public_or_private, fun, opts) do + # Decompose the function call + {function_name, [name_or_pid | args_tl] = args} = Macro.decompose_call(fun) - defp build_defspan(fun, opts) when is_list(opts) do - {name, args} = - case Macro.decompose_call(fun) do - {_, _} = pair -> pair - _ -> raise ArgumentError, "invalid syntax in defspan #{Macro.to_string(fun)}" - end + # Get options + command = Keyword.get(opts, :command, function_name) + l_args = Keyword.get(opts, :largs, []) + r_args = Keyword.get(opts, :rargs, []) - as = Keyword.get(opts, :as, name) - as_args = build_as_args(args) + # Build command args + command_args = l_args ++ args_tl ++ r_args - {name, args, as, as_args} + # Build the function + case public_or_private do + :public -> + quote do + def unquote(function_name)(unquote_splicing(args)) do + unquote(command_call(name_or_pid, command, command_args)) + end + end + + :private -> + quote do + defp unquote(function_name)(unquote_splicing(args)) do + unquote(command_call(name_or_pid, command, command_args)) + end + end + end end - defp build_as_args(args) do - for {arg, idx} <- Enum.with_index(args) do - arg - |> Macro.to_string() - |> build_as_arg({arg, idx}) + defp command_call(name_or_pid, command, args) do + quote do + with {:ok, adapter_meta} <- unquote(__MODULE__).lookup_meta(unquote(name_or_pid)) do + unquote(__MODULE__).run_command(adapter_meta, unquote(command), unquote(args)) + end end end - # sobelow_skip ["DOS.BinToAtom"] - defp build_as_arg("_" <> _, {{_e1, e2, e3}, idx}), do: {:"var#{idx}", e2, e3} - defp build_as_arg(_, {arg, _idx}), do: arg + @doc """ + Convenience function for invoking the adapter running a command. + + **NOTE:** Internal purposes only. + """ + @spec run_command(adapter_meta(), atom(), [any()]) :: any() + def run_command(adapter_meta, command, args) + + def run_command( + %{ + telemetry: true, + telemetry_prefix: telemetry_prefix, + adapter: adapter + } = adapter_meta, + command, + args + ) do + opts = + args + # TODO: Replace with `List.last/2` when required Elixir version is >= 1.12 + |> List.last() + |> Kernel.||([]) + |> Keyword.take([:telemetry_event, :telemetry_metadata]) + |> Options.validate_runtime_shared_opts!() + + metadata = %{ + adapter_meta: adapter_meta, + command: command, + args: args, + extra_metadata: Keyword.fetch!(opts, :telemetry_metadata) + } + + opts + |> Keyword.get(:telemetry_event, telemetry_prefix ++ [:command]) + |> Telemetry.span( + metadata, + fn -> + result = apply(adapter, command, [adapter_meta | args]) + + {result, Map.put(metadata, :result, result)} + end + ) + end - # coveralls-ignore-stop + def run_command(%{adapter: adapter} = adapter_meta, command, args) do + apply(adapter, command, [adapter_meta | args]) + end end diff --git a/lib/nebulex/adapter/entry.ex b/lib/nebulex/adapter/entry.ex deleted file mode 100644 index ccf6efff..00000000 --- a/lib/nebulex/adapter/entry.ex +++ /dev/null @@ -1,165 +0,0 @@ -defmodule Nebulex.Adapter.Entry do - @moduledoc """ - Specifies the entry API required from adapters. - - This behaviour specifies all read/write key-based functions, - the ones applied to a specific cache entry. - """ - - @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.adapter_meta() - - @typedoc "Proxy type to the cache key" - @type key :: Nebulex.Cache.key() - - @typedoc "Proxy type to the cache value" - @type value :: Nebulex.Cache.value() - - @typedoc "Proxy type to the cache options" - @type opts :: Nebulex.Cache.opts() - - @typedoc "Proxy type to the cache entries" - @type entries :: Nebulex.Cache.entries() - - @typedoc "TTL for a cache entry" - @type ttl :: timeout - - @typedoc "Write command" - @type on_write :: :put | :put_new | :replace - - @doc """ - Gets the value for a specific `key` in `cache`. - - See `c:Nebulex.Cache.get/2`. - """ - @callback get(adapter_meta, key, opts) :: value - - @doc """ - Gets a collection of entries from the Cache, returning them as `Map.t()` of - the values associated with the set of keys requested. - - For every key that does not hold a value or does not exist, that key is - simply ignored. Because of this, the operation never fails. - - See `c:Nebulex.Cache.get_all/2`. - """ - @callback get_all(adapter_meta, [key], opts) :: map - - @doc """ - Puts the given `value` under `key` into the `cache`. - - Returns `true` if the `value` with key `key` is successfully inserted; - otherwise `false` is returned. - - The `ttl` argument sets the time-to-live for the stored entry. If it is not - set, it means the entry hasn't a time-to-live, then it shouldn't expire. - - ## OnWrite - - The `on_write` argument supports the following values: - - * `:put` - If the `key` already exists, it is overwritten. Any previous - time-to-live associated with the key is discarded on successful `write` - operation. - - * `:put_new` - It only stores the entry if the `key` does not already exist, - otherwise, `false` is returned. - - * `:replace` - Alters the value stored under the given `key`, but only - if the key already exists into the cache, otherwise, `false` is - returned. - - See `c:Nebulex.Cache.put/3`, `c:Nebulex.Cache.put_new/3`, - `c:Nebulex.Cache.replace/3`. - """ - @callback put(adapter_meta, key, value, ttl, on_write, opts) :: boolean - - @doc """ - Puts the given `entries` (key/value pairs) into the `cache`. - - Returns `true` if all the keys were inserted. If no key was inserted - (at least one key already existed), `false` is returned. - - The `ttl` argument sets the time-to-live for the stored entry. If it is not - set, it means the entry hasn't a time-to-live, then it shouldn't expire. - The given `ttl` is applied to all keys. - - ## OnWrite - - The `on_write` argument supports the following values: - - * `:put` - If the `key` already exists, it is overwritten. Any previous - time-to-live associated with the key is discarded on successful `write` - operation. - - * `:put_new` - It only stores the entry if the `key` does not already exist, - otherwise, `false` is returned. - - Ideally, this operation should be atomic, so all given keys are set at once. - But it depends purely on the adapter's implementation and the backend used - internally by the adapter. Hence, it is recommended to checkout the - adapter's documentation. - - See `c:Nebulex.Cache.put_all/2`. - """ - @callback put_all(adapter_meta, entries, ttl, on_write, opts) :: boolean - - @doc """ - Deletes a single entry from cache. - - See `c:Nebulex.Cache.delete/2`. - """ - @callback delete(adapter_meta, key, opts) :: :ok - - @doc """ - Returns and removes the entry with key `key` in the cache. - - See `c:Nebulex.Cache.take/2`. - """ - @callback take(adapter_meta, key, opts) :: value - - @doc """ - Updates the counter mapped to the given `key`. - - If `amount` > 0, the counter is incremented by the given `amount`. - If `amount` < 0, the counter is decremented by the given `amount`. - If `amount` == 0, the counter is not updated. - - See `c:Nebulex.Cache.incr/3`. - See `c:Nebulex.Cache.decr/3`. - """ - @callback update_counter(adapter_meta, key, amount, ttl, default, opts) :: - integer - when amount: integer, default: integer - - @doc """ - Returns whether the given `key` exists in cache. - - See `c:Nebulex.Cache.has_key?/1`. - """ - @callback has_key?(adapter_meta, key) :: boolean - - @doc """ - Returns the TTL (time-to-live) for the given `key`. If the `key` does not - exist, then `nil` is returned. - - See `c:Nebulex.Cache.ttl/1`. - """ - @callback ttl(adapter_meta, key) :: ttl | nil - - @doc """ - Returns `true` if the given `key` exists and the new `ttl` was successfully - updated, otherwise, `false` is returned. - - See `c:Nebulex.Cache.expire/2`. - """ - @callback expire(adapter_meta, key, ttl) :: boolean - - @doc """ - Returns `true` if the given `key` exists and the last access time was - successfully updated, otherwise, `false` is returned. - - See `c:Nebulex.Cache.touch/1`. - """ - @callback touch(adapter_meta, key) :: boolean -end diff --git a/lib/nebulex/adapter/info.ex b/lib/nebulex/adapter/info.ex new file mode 100644 index 00000000..361b6de8 --- /dev/null +++ b/lib/nebulex/adapter/info.ex @@ -0,0 +1,38 @@ +defmodule Nebulex.Adapter.Info do + @moduledoc """ + Specifies the adapter Info API. + + See `Nebulex.Adapters.Common.Info`. + """ + + @doc """ + Returns `{:ok, info}` where `info` contains the requested cache information, + as specified by the `spec`. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + The `spec` (information specification key) can be: + + * **The atom `:all`**: returns a map with all information items. + * **An atom**: returns the value for the requested information item. + * **A list of atoms**: returns a map only with the requested information + items. + + The adapters are free to add the information specification keys they want, + however, Nebulex suggests the adapters add the following specs: + + * `:server` - General information about the cache server (e.g., cache name, + adapter, PID, etc.). + * `:memory` - Memory consumption information (e.g., used memory, + allocated memory, etc.). + * `:stats` - Cache statistics (e.g., hits, misses, etc.). + + See `c:Nebulex.Cache.info/2`. + """ + @callback info( + Nebulex.Adapter.adapter_meta(), + Nebulex.Cache.info_spec(), + Nebulex.Cache.opts() + ) :: Nebulex.Cache.ok_error_tuple(Nebulex.Cache.info_data()) +end diff --git a/lib/nebulex/adapter/keyslot.ex b/lib/nebulex/adapter/keyslot.ex deleted file mode 100644 index 58d94930..00000000 --- a/lib/nebulex/adapter/keyslot.ex +++ /dev/null @@ -1,51 +0,0 @@ -defmodule Nebulex.Adapter.Keyslot do - @moduledoc """ - This behaviour provides a callback to compute the hash slot for a specific - key based on the number of slots (partitions, nodes, ...). - - The purpose of this module is to allow users to implement a custom - hash-slot function to distribute the keys. It can be used to select - the node/slot where a specific key is supposed to be. - - > It is highly recommended to use a **Consistent Hashing** algorithm. - - ## Example - - defmodule MyApp.Keyslot do - use Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end - - This example uses [Jumping Consistent Hash](https://github.com/cabol/jchash). - """ - - @doc """ - Returns an integer within the range `0..range-1` identifying the hash slot - the specified `key` hashes to. - - ## Example - - iex> MyKeyslot.hash_slot("mykey", 10) - 2 - - """ - @callback hash_slot(key :: any, range :: pos_integer) :: non_neg_integer - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - defdelegate hash_slot(key, range), to: :erlang, as: :phash2 - - defoverridable hash_slot: 2 - end - end -end diff --git a/lib/nebulex/adapter/kv.ex b/lib/nebulex/adapter/kv.ex new file mode 100644 index 00000000..b46b4d87 --- /dev/null +++ b/lib/nebulex/adapter/kv.ex @@ -0,0 +1,197 @@ +defmodule Nebulex.Adapter.KV do + @moduledoc """ + Specifies the adapter Key/Value API. + + This behaviour specifies all read/write key-based functions, + applied to a specific cache key. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() + + @typedoc "Proxy type to the cache key" + @type key() :: Nebulex.Cache.key() + + @typedoc "Proxy type to the cache value" + @type value() :: Nebulex.Cache.value() + + @typedoc "Proxy type to the cache options" + @type opts() :: Nebulex.Cache.opts() + + @typedoc "Proxy type to the cache entries" + @type entries() :: Nebulex.Cache.entries() + + @typedoc "TTL for a cache entry" + @type ttl() :: timeout() + + @typedoc "Write command type" + @type on_write() :: :put | :put_new | :replace + + @doc """ + Fetches the value for a specific `key` in the cache. + + If the cache contains the given `key`, then its value is returned + in the shape of `{:ok, value}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + See `c:Nebulex.Cache.fetch/2`. + """ + @callback fetch(adapter_meta(), key(), opts()) :: + Nebulex.Cache.ok_error_tuple(value, Nebulex.Cache.fetch_error_reason()) + + @doc """ + Puts the given `value` under `key` into the `cache`. + + Returns `{:ok, true}` if the `value` with key `key` is successfully inserted. + Otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + The `ttl` argument defines the time-to-live for the stored entry. If it is not + set, it means the entry doesn't have a time-to-live, so it shouldn't expire. + + ## `on_write` argument + + The `on_write` argument supports the following values: + + * `:put` - If the `key` already exists, it is overwritten. Any previous + time-to-live associated with the key is discarded on a successful `write` + operation. + + * `:put_new` - It only stores the entry if the `key` does not exist. + Otherwise, `{:ok, false}` is returned. + + * `:replace` - Alters the value stored under the given `key`, but only + if the key already exists in the cache. Otherwise, `{ok, false}` is + returned. + + See `c:Nebulex.Cache.put/3`, `c:Nebulex.Cache.put_new/3`, + `c:Nebulex.Cache.replace/3`. + """ + @callback put(adapter_meta(), key(), value(), ttl(), on_write(), opts()) :: + Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Puts the given `entries` (key/value pairs) into the `cache` atomically + or fail otherwise. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + The `ttl` argument works the same as `c:put/6` but applies to all keys. + + ## `on_write` argument + + The `on_write` argument supports the following values: + + * `:put` - If the `key` already exists, it is overwritten. Any previous + time-to-live associated with the key is discarded on a successful `write` + operation. + + * `:put_new` - Insert all entries only if none exist in the cache, and it + returns `{:ok, true}` . Otherwise, `{:ok, false}` is returned. + + See `c:Nebulex.Cache.put_all/2` and `c:Nebulex.Cache.put_new_all/2`. + """ + @callback put_all(adapter_meta(), entries(), ttl(), on_write(), opts()) :: + Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Deletes a single entry from cache. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.delete/2`. + """ + @callback delete(adapter_meta(), key(), opts()) :: :ok | Nebulex.Cache.error_tuple() + + @doc """ + Removes and returns the value associated with `key` in the cache. + + If `key` is present in the cache, its value is removed and returned as + `{:ok, value}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key` or + `Nebulex.Error` otherwise. + + See `c:Nebulex.Cache.take/2`. + """ + @callback take(adapter_meta(), key(), opts()) :: + Nebulex.Cache.ok_error_tuple(value(), Nebulex.Cache.fetch_error_reason()) + + @doc """ + Updates the counter mapped to the given `key`. + + If `amount` > 0, the counter is incremented by the given `amount`. + If `amount` < 0, the counter is decremented by the given `amount`. + If `amount` == 0, the counter is not updated. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.incr/3`. + See `c:Nebulex.Cache.decr/3`. + """ + @callback update_counter(adapter_meta(), key(), amount, ttl(), default, opts()) :: + Nebulex.Cache.ok_error_tuple(integer()) + when amount: integer(), default: integer() + + @doc """ + Determines if the cache contains an entry for the specified `key`. + + More formally, it returns `{:ok, true}` if the cache contains the given `key`. + If the cache doesn't contain `key`, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.has_key?/2`. + """ + @callback has_key?(adapter_meta(), key(), opts()) :: Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Returns the remaining time-to-live for the given `key`. + + If `key` is present in the cache, then its remaining TTL is returned + in the shape of `{:ok, ttl}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + See `c:Nebulex.Cache.ttl/2`. + """ + @callback ttl(adapter_meta(), key(), opts()) :: + Nebulex.Cache.ok_error_tuple(value(), Nebulex.Cache.fetch_error_reason()) + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the new `ttl` is + successfully updated; otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned; where `reason` is the cause of the error. + + See `c:Nebulex.Cache.expire/3`. + """ + @callback expire(adapter_meta(), key(), ttl(), opts()) :: Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the last access time is + successfully updated; otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.touch/2`. + """ + @callback touch(adapter_meta(), key(), opts()) :: Nebulex.Cache.ok_error_tuple(boolean()) +end diff --git a/lib/nebulex/adapter/persistence.ex b/lib/nebulex/adapter/persistence.ex index c2f9f5fa..bfa906f1 100644 --- a/lib/nebulex/adapter/persistence.ex +++ b/lib/nebulex/adapter/persistence.ex @@ -1,106 +1,25 @@ defmodule Nebulex.Adapter.Persistence do @moduledoc """ - Specifies the adapter persistence API. - - ## Default implementation - - This module provides a default implementation that uses `File` and `Stream` - under-the-hood. For dumping a cache to a file, the entries are streamed from - the cache and written in chunks (one chunk per line), and each chunk contains - N number of entries. For loading the entries from a file, the file is read - and streamed line-by-line, so that the entries collected on each line are - inserted in streaming fashion as well. - - The default implementation accepts the following options only for `dump` - operation (there are not options for `load`): - - * `entries_per_line` - The number of entries to be written per line in the - file. Defaults to `10`. - - * `compression` - The compression level. The values are the same as - `:erlang.term_to_binary /2`. Defaults to `6`. - - See `c:Nebulex.Cache.dump/2` and `c:Nebulex.Cache.load/2` for more - information. + Specifies the adapter Persistence API. """ @doc """ Dumps a cache to the given file `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. See `c:Nebulex.Cache.dump/2`. """ @callback dump(Nebulex.Adapter.adapter_meta(), Path.t(), Nebulex.Cache.opts()) :: - :ok | {:error, term} + :ok | Nebulex.Cache.error_tuple() @doc """ Loads a dumped cache from the given `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. See `c:Nebulex.Cache.load/2`. """ @callback load(Nebulex.Adapter.adapter_meta(), Path.t(), Nebulex.Cache.opts()) :: - :ok | {:error, term} - - alias Nebulex.Entry - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Persistence - - # sobelow_skip ["Traversal.FileModule"] - @impl true - def dump(%{cache: cache}, path, opts) do - path - |> File.open([:read, :write], fn io_dev -> - nil - |> cache.stream(return: :entry) - |> Stream.filter(&(not Entry.expired?(&1))) - |> Stream.map(&{&1.key, &1.value}) - |> Stream.chunk_every(Keyword.get(opts, :entries_per_line, 10)) - |> Enum.each(fn entries -> - bin = Entry.encode(entries, get_compression(opts)) - :ok = IO.puts(io_dev, bin) - end) - end) - |> handle_response() - end - - # sobelow_skip ["Traversal.FileModule"] - @impl true - def load(%{cache: cache}, path, opts) do - path - |> File.open([:read], fn io_dev -> - io_dev - |> IO.stream(:line) - |> Stream.map(&String.trim/1) - |> Enum.each(fn line -> - entries = Entry.decode(line, [:safe]) - cache.put_all(entries, opts) - end) - end) - |> handle_response() - end - - defoverridable dump: 3, load: 3 - - ## Helpers - - defp handle_response({:ok, _}), do: :ok - defp handle_response({:error, _} = error), do: error - - defp get_compression(opts) do - case Keyword.get(opts, :compression) do - value when is_integer(value) and value >= 0 and value < 10 -> - [compressed: value] - - _ -> - [:compressed] - end - end - end - end + :ok | Nebulex.Cache.error_tuple() end diff --git a/lib/nebulex/adapter/queryable.ex b/lib/nebulex/adapter/queryable.ex index d4a7c530..1c0c3b10 100644 --- a/lib/nebulex/adapter/queryable.ex +++ b/lib/nebulex/adapter/queryable.ex @@ -1,66 +1,110 @@ defmodule Nebulex.Adapter.Queryable do @moduledoc """ - Specifies the query API required from adapters. + Specifies the adapter Query API. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() + + @typedoc "Proxy type to the cache options" + @type opts() :: Nebulex.Cache.opts() + + @typedoc """ + Query metadata fields. - ## Query values + ## Operation - There are two types of query values. The ones shared and implemented - by all adapters and the ones that are adapter specific. + The field `:op` defines the type of query operation. The possible values are: - ### Common queries + * `:get_all` - Returns a list with all entries from cache matching the given + query. + * `:count_all` - Returns the number of matched entries with the given query. + * `:delete_all` - Deletes all entries matching the given query. It returns + the number of deleted entries. + * `:stream` - Similar to `:get_all` but returns a lazy enumerable that emits + all entries from the cache matching the given query. - The following query values are shared and/or supported for all adapters: + ## Query - * `nil` - Matches all cached entries. + The field `:query` defines the entries in the cache to match. There are two + types of query values. The ones recommended by Nebulex for all adapters to + implement (shared queries) and the adapter-specific ones. + + ### Shared queries + + The following query values are the ones recommended by Nebulex + for all adapters to implement: + + * `{:in, keys}` - Query the entries associated with the set of `keys` + requested. + * `{:q, nil}` - Query all the entries in the cache. ### Adapter-specific queries - The `query` value depends entirely on the adapter implementation; it could + For any other query the value must be: + + * `{:q, query_value}` - Queries the entries in the cache matching with the + given `query_value`. + + The `query_value` depends entirely on the adapter implementation, it could any term. Therefore, it is highly recommended to see adapters' documentation - for more information about building queries. For example, the built-in - `Nebulex.Adapters.Local` adapter uses `:ets.match_spec()` for queries, - as well as other pre-defined ones like `:unexpired` and `:expired`. - """ + for more information about building queries. For example, the + `Nebulex.Adapters.Local` adapter supports + [**"ETS Match Spec"**](https://www.erlang.org/doc/man/ets#match_spec). - @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.adapter_meta() + ## Select fields to return - @typedoc "Proxy type to the cache options" - @type opts :: Nebulex.Cache.opts() + The field `:select` defines the fields of the cached entry to return. + + See [Nebulex.Cache.get_all/2 options][get_all_opts] for the possible values. + + [get_all_opts]: https://hexdocs.pm/nebulex/Nebulex.Cache.html#c:get_all/2-options + """ + @type query_meta() :: %{ + op: :get_all | :count_all | :delete_all, + query: {:in, [keys :: any()]} | {:q, query_value :: any()}, + select: {:key, :value} | :key | :value | :entry + } @doc """ - Executes the `query` according to the given `operation`. + Executes a previously prepared query. + + The `query_meta` field is a map containing some fields found in the query + options after they have been normalized. For example, the query value and + the selected entry fields to return can be found in the `query_meta`. - Raises `Nebulex.QueryError` if query is invalid. + Finally, `opts` is a keyword list of options given to the `Nebulex.Cache` + operation that triggered the adapter call. Any option is allowed, as this + is a mechanism to customize the adapter behavior per operation. - In the the adapter does not support the given `operation`, an `ArgumentError` - exception should be raised. + This callback returns: - ## Operations + * `{:ok, result}` - The query was successfully executed. The `result` + could be a list with the matched entries or its count. - * `:all` - Returns a list with all entries from cache matching the given - `query`. - * `:count_all` - Returns the number of matched entries with the given - `query`. - * `:delete_all` - Deletes all entries matching the given `query`. - It returns the number of deleted entries. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. - It is used on `c:Nebulex.Cache.all/2`, `c:Nebulex.Cache.count_all/2`, + It is used on `c:Nebulex.Cache.get_all/2`, `c:Nebulex.Cache.count_all/2`, and `c:Nebulex.Cache.delete_all/2`. """ - @callback execute( - adapter_meta, - operation :: :all | :count_all | :delete_all, - query :: term, - opts - ) :: [term] | integer + @callback execute(adapter_meta(), query_meta(), opts()) :: + Nebulex.Cache.ok_error_tuple([any()] | non_neg_integer()) @doc """ - Streams the given `query`. + Streams a previously prepared query. + + See `c:execute/3` for a description of arguments. + + This callback returns: + + * `{:ok, stream}` - The query is valid, then the stream is returned. - Raises `Nebulex.QueryError` if query is invalid. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. See `c:Nebulex.Cache.stream/2`. """ - @callback stream(adapter_meta, query :: term, opts) :: Enumerable.t() + @callback stream(adapter_meta(), query_meta(), opts()) :: + Nebulex.Cache.ok_error_tuple(Enumerable.t()) end diff --git a/lib/nebulex/adapter/stats.ex b/lib/nebulex/adapter/stats.ex deleted file mode 100644 index 0cc983c8..00000000 --- a/lib/nebulex/adapter/stats.ex +++ /dev/null @@ -1,109 +0,0 @@ -defmodule Nebulex.Adapter.Stats do - @moduledoc """ - Specifies the stats API required from adapters. - - Each adapter is responsible for providing support for stats by implementing - this behaviour. However, this module brings with a default implementation - using [Erlang counters][https://erlang.org/doc/man/counters.html], with all - callbacks overridable, which is supported by the built-in adapters. - - See `Nebulex.Adapters.Local` for more information about how this can be used - from the adapter, and also [Nebulex Telemetry Guide][telemetry_guide] to learn - how to use the Cache with Telemetry. - - [telemetry_guide]: http://hexdocs.pm/nebulex/telemetry.html - """ - - @doc """ - Returns `Nebulex.Stats.t()` with the current stats values. - - If the stats are disabled for the cache, then `nil` is returned. - - The adapter may also include additional custom measurements, - as well as metadata. - - See `c:Nebulex.Cache.stats/0`. - """ - @callback stats(Nebulex.Adapter.adapter_meta()) :: Nebulex.Stats.t() | nil - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Stats - - @impl true - def stats(adapter_meta) do - if counter_ref = adapter_meta[:stats_counter] do - %Nebulex.Stats{ - measurements: %{ - hits: :counters.get(counter_ref, 1), - misses: :counters.get(counter_ref, 2), - writes: :counters.get(counter_ref, 3), - updates: :counters.get(counter_ref, 4), - evictions: :counters.get(counter_ref, 5), - expirations: :counters.get(counter_ref, 6) - }, - metadata: %{ - cache: adapter_meta[:name] || adapter_meta[:cache] - } - } - end - end - - defoverridable stats: 1 - end - end - - import Nebulex.Helpers - - @doc """ - Initializes the Erlang's counter to be used by the adapter. See the module - documentation for more information about the stats default implementation. - - Returns `nil` is the option `:stats` is set to `false` or it is not set at - all; the stats will be skipped. - - ## Example - - Nebulex.Adapter.Stats.init(opts) - - > **NOTE:** This function is usually called by the adapter in case it uses - the default implementation; the adapter should feed `Nebulex.Stats.t()` - counters. - - See adapters documentation for more information about stats implementation. - """ - @spec init(Keyword.t()) :: :counters.counters_ref() | nil - def init(opts) do - case get_boolean_option(opts, :stats, false) do - true -> :counters.new(6, [:write_concurrency]) - false -> nil - end - end - - @doc """ - Increments the `counter`'s `stat_name` by the given `incr` value. - - ## Examples - - Nebulex.Adapter.Stats.incr(stats_counter, :hits) - - Nebulex.Adapter.Stats.incr(stats_counter, :writes, 10) - - > **NOTE:** This function is usually called by the adapter in case it uses - the default implementation; the adapter should feed `Nebulex.Stats.t()` - counters. - - See adapters documentation for more information about stats implementation. - """ - @spec incr(:counters.counters_ref() | nil, atom, integer) :: :ok - def incr(counter, stat_name, incr \\ 1) - - def incr(nil, _stat, _incr), do: :ok - def incr(ref, :hits, incr), do: :counters.add(ref, 1, incr) - def incr(ref, :misses, incr), do: :counters.add(ref, 2, incr) - def incr(ref, :writes, incr), do: :counters.add(ref, 3, incr) - def incr(ref, :updates, incr), do: :counters.add(ref, 4, incr) - def incr(ref, :evictions, incr), do: :counters.add(ref, 5, incr) - def incr(ref, :expirations, incr), do: :counters.add(ref, 6, incr) -end diff --git a/lib/nebulex/adapter/transaction.ex b/lib/nebulex/adapter/transaction.ex index f17fd9e6..0e3e7f92 100644 --- a/lib/nebulex/adapter/transaction.ex +++ b/lib/nebulex/adapter/transaction.ex @@ -1,6 +1,6 @@ defmodule Nebulex.Adapter.Transaction do @moduledoc """ - Specifies the adapter transactions API. + Specifies the adapter Transaction API. ## Default implementation @@ -9,22 +9,7 @@ defmodule Nebulex.Adapter.Transaction do This implementation accepts the following options: - * `:keys` - The list of the keys that will be locked. Since the lock id is - generated based on the key, if this option is not set, a fixed/constant - lock id is used to perform the transaction, then all further transactions - (without this option set) are serialized and the performance is affected - significantly. For that reason it is recommended to pass the list of keys - involved in the transaction. - - * `:nodes` - The list of the nodes where the lock will be set, or on - all nodes if none are specified. - - * `:retries` - If the key has been locked by other process already, and - `:retries` is not equal to 0, the process sleeps for a while and tries - to execute the action later. When `:retries` attempts have been made, - an exception is raised. If `:retries` is `:infinity` (the default), - the function will eventually be executed (unless the lock is never - released). + #{Nebulex.Adapter.Transaction.Options.options_docs()} Let's see an example: @@ -35,66 +20,93 @@ defmodule Nebulex.Adapter.Transaction do Locking only the involved key (recommended): - MyCache.transaction [keys: [:counter]], fn -> - counter = MyCache.get(:counter) - MyCache.set(:counter, counter + 1) - end - - MyCache.transaction [keys: [:alice, :bob]], fn -> - alice = MyCache.get(:alice) - bob = MyCache.get(:bob) - MyCache.set(:alice, %{alice | balance: alice.balance + 100}) - MyCache.set(:bob, %{bob | balance: bob.balance + 100}) - end + MyCache.transaction( + fn -> + counter = MyCache.get(:counter) + MyCache.set(:counter, counter + 1) + end, + [keys: [:counter]] + ) + + MyCache.transaction( + fn -> + alice = MyCache.get(:alice) + bob = MyCache.get(:bob) + MyCache.set(:alice, %{alice | balance: alice.balance + 100}) + MyCache.set(:bob, %{bob | balance: bob.balance + 100}) + end, + [keys: [:alice, :bob]] + ) """ @doc """ Runs the given function inside a transaction. - A successful transaction returns the value returned by the function. + If an Elixir exception occurs, the exception will bubble up from the + transaction function. If the cache aborts the transaction, it returns + `{:error, reason}`. + + A successful transaction returns the value returned by the function wrapped + in a tuple as `{:ok, value}`. See `c:Nebulex.Cache.transaction/2`. """ - @callback transaction(Nebulex.Adapter.adapter_meta(), Nebulex.Cache.opts(), fun) :: any + @callback transaction(Nebulex.Adapter.adapter_meta(), fun(), Nebulex.Cache.opts()) :: + Nebulex.Cache.ok_error_tuple(any()) @doc """ - Returns `true` if the given process is inside a transaction. + Returns `{:ok, true}` if the current process is inside a transaction; + otherwise, `{:ok, false}` is returned. - See `c:Nebulex.Cache.in_transaction?/0`. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.in_transaction?/1`. """ - @callback in_transaction?(Nebulex.Adapter.adapter_meta()) :: boolean + @callback in_transaction?(Nebulex.Adapter.adapter_meta(), Nebulex.Cache.opts()) :: + Nebulex.Cache.ok_error_tuple(boolean()) @doc false defmacro __using__(_opts) do quote do @behaviour Nebulex.Adapter.Transaction + import Nebulex.Utils, only: [wrap_ok: 1, wrap_error: 2] + + alias Nebulex.Adapter.Transaction.Options + @impl true - def transaction(%{cache: cache, pid: pid} = adapter_meta, opts, fun) do + def transaction(%{cache: cache, pid: pid} = adapter_meta, fun, opts) do + opts = Options.validate!(opts) + adapter_meta - |> in_transaction?() + |> do_in_transaction?() |> do_transaction( pid, adapter_meta[:name] || cache, - Keyword.get(opts, :keys, []), + Keyword.fetch!(opts, :keys), Keyword.get(opts, :nodes, [node()]), - Keyword.get(opts, :retries, :infinity), + Keyword.fetch!(opts, :retries), fun ) end @impl true - def in_transaction?(%{pid: pid}) do - !!Process.get({pid, self()}) + def in_transaction?(adapter_meta, _opts) do + wrap_ok do_in_transaction?(adapter_meta) end - defoverridable transaction: 3, in_transaction?: 1 + defoverridable transaction: 3, in_transaction?: 2 ## Helpers + defp do_in_transaction?(%{pid: pid}) do + !!Process.get({pid, self()}) + end + defp do_transaction(true, _pid, _name, _keys, _nodes, _retries, fun) do - fun.() + {:ok, fun.()} end defp do_transaction(false, pid, name, keys, nodes, retries, fun) do @@ -105,7 +117,7 @@ defmodule Nebulex.Adapter.Transaction do try do _ = Process.put({pid, self()}, %{keys: keys, nodes: nodes}) - fun.() + {:ok, fun.()} after _ = Process.delete({pid, self()}) @@ -113,7 +125,7 @@ defmodule Nebulex.Adapter.Transaction do end false -> - raise "transaction aborted" + wrap_error Nebulex.Error, reason: :transaction_aborted, nodes: nodes, cache: name end end diff --git a/lib/nebulex/adapter/transaction/options.ex b/lib/nebulex/adapter/transaction/options.ex new file mode 100644 index 00000000..97d2939d --- /dev/null +++ b/lib/nebulex/adapter/transaction/options.ex @@ -0,0 +1,62 @@ +defmodule Nebulex.Adapter.Transaction.Options do + @moduledoc false + + # Transaction options + opts_defs = [ + keys: [ + type: {:list, :any}, + required: false, + default: [], + doc: """ + The list of keys the transaction will lock. Since the lock ID is generated + based on the key, the transaction uses a fixed lock ID if the option is + not provided or is an empty list. Then, all subsequent transactions + without this option (or set to an empty list) are serialized, and + performance is significantly affected. For that reason, it is recommended + to pass the list of keys involved in the transaction. + """ + ], + nodes: [ + type: {:list, :atom}, + required: false, + doc: """ + The list of the nodes where to set the lock. + + The default value is `[node()]`. + """ + ], + retries: [ + type: :timeout, + required: false, + default: :infinity, + doc: """ + If the key has already been locked by another process and retries are not + equal to 0, the process sleeps for a while and tries to execute the action + later. When `:retries` attempts have been made, an exception is raised. If + `:retries` is `:infinity` (the default), the function will eventually be + executed (unless the lock is never released). + """ + ] + ] + + # Transaction options schema + @opts_schema NimbleOptions.new!(opts_defs) + + ## Docs API + + # coveralls-ignore-start + + @spec options_docs() :: binary() + def options_docs do + NimbleOptions.docs(@opts_schema) + end + + # coveralls-ignore-stop + + ## Validation API + + @spec validate!(keyword()) :: keyword() + def validate!(opts) do + NimbleOptions.validate!(opts, @opts_schema) + end +end diff --git a/lib/nebulex/adapters/common/info.ex b/lib/nebulex/adapters/common/info.ex new file mode 100644 index 00000000..de2234d0 --- /dev/null +++ b/lib/nebulex/adapters/common/info.ex @@ -0,0 +1,111 @@ +defmodule Nebulex.Adapters.Common.Info do + @moduledoc """ + A simple/default implementation for `Nebulex.Adapter.Info` behaviour. + + The implementation defines the following information specifications: + + * `:server` - A map with general information about the cache server. + Includes the following keys: + + * `:nbx_version` - The Nebulex version. + * `:cache_module` - The defined cache module. + * `:cache_adapter` - The cache adapter. + * `:cache_name` - The cache name. + * `:cache_pid` - The cache PID. + + * `:stats` - A map with the cache statistics keys, as specified by + `Nebulex.Adapters.Common.Info.Stats`. + + The info data will look like this: + + %{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + }, + stats: %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } + } + + """ + + @doc false + defmacro __using__(_opts) do + quote do + @behaviour Nebulex.Adapter.Info + + alias Nebulex.Adapters.Common.Info + + @impl true + def info(adapter_meta, path, _opts) do + {:ok, Info.info(adapter_meta, path)} + end + + defoverridable info: 3 + end + end + + alias __MODULE__.Stats + + @doc false + def info(adapter_meta, spec) + + def info(adapter_meta, spec) when is_list(spec) do + for i <- spec, into: %{} do + {i, do_info!(adapter_meta, i)} + end + end + + def info(adapter_meta, spec) do + do_info!(adapter_meta, spec) + end + + defp do_info!(adapter_meta, :all) do + %{ + server: server_info(adapter_meta), + stats: stats(adapter_meta) + } + end + + defp do_info!(adapter_meta, :server) do + server_info(adapter_meta) + end + + defp do_info!(adapter_meta, :stats) do + stats(adapter_meta) + end + + defp do_info!(_adapter_meta, spec) do + raise ArgumentError, "invalid information specification key #{inspect(spec)}" + end + + ## Helpers + + defp server_info(adapter_meta) do + %{ + nbx_version: Nebulex.vsn(), + cache_module: adapter_meta[:cache], + cache_adapter: adapter_meta[:adapter], + cache_name: adapter_meta[:name], + cache_pid: adapter_meta[:pid] + } + end + + defp stats(%{stats_counter: ref}) when not is_nil(ref) do + Stats.count(ref) + end + + defp stats(_adapter_meta) do + Stats.new() + end +end diff --git a/lib/nebulex/adapters/common/info/stats.ex b/lib/nebulex/adapters/common/info/stats.ex new file mode 100644 index 00000000..2fd0acad --- /dev/null +++ b/lib/nebulex/adapters/common/info/stats.ex @@ -0,0 +1,181 @@ +defmodule Nebulex.Adapters.Common.Info.Stats do + @moduledoc """ + Stats implementation using [Erlang counters][erl_counters]. + + Adapters are directly responsible for implementing the `Nebulex.Adapter.Info` + behaviour and adding an info spec for stats. However, this module provides a + simple implementation for stats using [Erlang counters][erl_counters]. + + An info specification `stats` is added to the info data, which is a map + with the following keys or measurements: + + * `:hits` - The requested data is successfully retrieved from the cache. + + * `:misses` - When a system or application makes a request to retrieve + data from a cache, but that specific data is not currently in cache + memory. A cache miss occurs either because the data was never placed + in the cache, or because the data was removed (“evicted”) from the + cache by either the caching system itself or an external application + that specifically made that eviction request. + + * `:evictions` - Eviction by the caching system itself occurs when + space needs to be freed up to add new data to the cache, or if + the time-to-live policy on the data expired. + + * `:expirations` - When the time-to-live policy on the data expired. + + * `:updates` - When existing data is successfully updated. + + * `:writes` - When data is inserted or overwritten. + + * `:deletions` - The data was intentionally removed by either the + caching system or an external application that specifically made + that deletion request. + + See the `Nebulex.Adapters.Local` adapter and `Nebulex.Adapters.Common.Info` + for more information about the usage. + + [erl_counters]: https://erlang.org/doc/man/counters.html + """ + + alias __MODULE__.TelemetryHandler + alias Nebulex.Telemetry + + ## Types & Constants + + @typedoc "The stat type" + @type stat() :: + :hits + | :misses + | :evictions + | :expirations + | :writes + | :updates + | :deletions + + @typedoc "Stats type" + @type stats() :: %{required(stat()) => integer()} + + # Supported stats + @stats [ + :hits, + :misses, + :evictions, + :expirations, + :writes, + :updates, + :deletions + ] + + ## API + + @doc """ + Returns the Erlang's counter to be used by the adapter for keeping the cache + stats. It also initiates the Telemetry handler for handling and/or updating + the cache stats in runtime under the hood. + + Any adapter using `Nebulex.Adapters.Common.Info` implementation must call + this init function in the `c:Nebulex.Adapter.init/1` callback and include + the returned counter within the adapter metadata under the key + `:stats_counter`. See the `Nebulex.Adapters.Nil` for example. + + ## Example + + Nebulex.Adapters.Common.Info.Stats.init([:telemetry, :prefix]) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec init(telemetry_prefix :: [atom()]) :: :counters.counters_ref() + def init(telemetry_prefix) do + stats_counter = :counters.new(7, [:write_concurrency]) + + _ = + Telemetry.attach_many( + stats_counter, + [telemetry_prefix ++ [:command, :stop]], + &TelemetryHandler.handle_event/4, + stats_counter + ) + + stats_counter + end + + @doc """ + Increments counter(s) for the given stat(s) by `incr`. + + ## Examples + + Nebulex.Adapters.Common.Info.Stats.incr(stats_counter, :hits) + + Nebulex.Adapters.Common.Info.Stats.incr(stats_counter, :writes, 10) + + Nebulex.Adapters.Common.Info.Stats.incr(stats_counter, [:misses, :deletions]) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec incr(:counters.counters_ref(), atom() | [atom()], integer()) :: :ok + def incr(counter, stats, incr \\ 1) + + def incr(ref, :hits, incr), do: :counters.add(ref, 1, incr) + def incr(ref, :misses, incr), do: :counters.add(ref, 2, incr) + def incr(ref, :evictions, incr), do: :counters.add(ref, 3, incr) + def incr(ref, :expirations, incr), do: :counters.add(ref, 4, incr) + def incr(ref, :writes, incr), do: :counters.add(ref, 5, incr) + def incr(ref, :updates, incr), do: :counters.add(ref, 6, incr) + def incr(ref, :deletions, incr), do: :counters.add(ref, 7, incr) + def incr(ref, l, incr) when is_list(l), do: Enum.each(l, &incr(ref, &1, incr)) + + @doc """ + Returns a map with all counters/stats count. + + ## Examples + + Nebulex.Adapters.Common.Info.Stats.count(stats_counter) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec count(:counters.counters_ref()) :: stats() + def count(ref) do + for s <- @stats, into: %{}, do: {s, count(ref, s)} + end + + @doc """ + Returns the current count for the stats counter given by `stat`. + + ## Examples + + Nebulex.Adapters.Common.Info.Stats.count(stats_counter, :hits) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec count(:counters.counters_ref(), stat()) :: integer() + def count(ref, stat) + + def count(ref, :hits), do: :counters.get(ref, 1) + def count(ref, :misses), do: :counters.get(ref, 2) + def count(ref, :evictions), do: :counters.get(ref, 3) + def count(ref, :expirations), do: :counters.get(ref, 4) + def count(ref, :writes), do: :counters.get(ref, 5) + def count(ref, :updates), do: :counters.get(ref, 6) + def count(ref, :deletions), do: :counters.get(ref, 7) + + @doc """ + Convenience function for returning a map with all stats set to `0`. + """ + @spec new() :: stats() + def new do + %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } + end +end diff --git a/lib/nebulex/adapters/common/info/stats/telemetry_handler.ex b/lib/nebulex/adapters/common/info/stats/telemetry_handler.ex new file mode 100644 index 00000000..b743a775 --- /dev/null +++ b/lib/nebulex/adapters/common/info/stats/telemetry_handler.ex @@ -0,0 +1,145 @@ +defmodule Nebulex.Adapters.Common.Info.Stats.TelemetryHandler do + # Telemetry handler for aggregating cache stats; it relies on the default + # `Nebulex.Adapters.Common.Info` implementation based on Erlang counters. + # See `Nebulex.Adapters.Common.Info.Stats`. + @moduledoc false + + alias Nebulex.Adapters.Common.Info.Stats + + ## Handler + + @doc false + def handle_event( + _event, + _measurements, + %{adapter_meta: %{stats_counter: ref}} = metadata, + ref + ) + when not is_nil(ref) do + update_stats(metadata) + end + + # coveralls-ignore-start + + def handle_event(_event, _measurements, _metadata, _ref) do + :ok + end + + # coveralls-ignore-stop + + defp update_stats(%{ + command: action, + result: {:error, %Nebulex.KeyError{reason: :expired}}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:fetch, :take, :ttl, :has_key?] do + :ok = Stats.incr(ref, [:misses, :evictions, :expirations, :deletions]) + end + + defp update_stats(%{ + command: action, + result: {:error, %Nebulex.KeyError{reason: :not_found}}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:fetch, :take, :ttl, :has_key?] do + :ok = Stats.incr(ref, :misses) + end + + defp update_stats(%{ + command: action, + result: {:ok, _}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:fetch, :ttl, :has_key?] do + :ok = Stats.incr(ref, :hits) + end + + defp update_stats(%{ + command: :take, + result: {:ok, _}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, [:hits, :deletions]) + end + + defp update_stats(%{ + command: :put, + args: [_, _, _, :replace, _], + result: {:ok, true}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :updates) + end + + defp update_stats(%{ + command: :put, + result: {:ok, true}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :writes) + end + + defp update_stats(%{ + command: :put_all, + result: {:ok, true}, + args: [entries | _], + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :writes, Enum.count(entries)) + end + + defp update_stats(%{ + command: :delete, + result: :ok, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :deletions) + end + + defp update_stats(%{ + command: :execute, + args: [%{op: :get_all, query: {:in, keys}} | _], + result: {:ok, list}, + adapter_meta: %{stats_counter: ref} + }) do + len = length(list) + + :ok = Stats.incr(ref, :hits, len) + :ok = Stats.incr(ref, :misses, Enum.count(keys) - len) + end + + defp update_stats(%{ + command: :execute, + args: [%{op: :delete_all} | _], + result: {:ok, result}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :deletions, result) + end + + defp update_stats(%{ + command: action, + result: {:ok, true}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:expire, :touch] do + :ok = Stats.incr(ref, :updates) + end + + defp update_stats(%{ + command: :update_counter, + args: [_, amount, _, default, _], + result: {:ok, result}, + adapter_meta: %{stats_counter: ref} + }) do + offset = if amount >= 0, do: -1, else: 1 + + if result + amount * offset === default do + :ok = Stats.incr(ref, :writes) + else + :ok = Stats.incr(ref, :updates) + end + end + + defp update_stats(_), do: :ok +end diff --git a/lib/nebulex/adapters/local.ex b/lib/nebulex/adapters/local.ex deleted file mode 100644 index 409fd145..00000000 --- a/lib/nebulex/adapters/local.ex +++ /dev/null @@ -1,1010 +0,0 @@ -defmodule Nebulex.Adapters.Local do - @moduledoc ~S""" - Adapter module for Local Generational Cache; inspired by - [epocxy](https://github.com/duomark/epocxy). - - Generational caching using an ets table (or multiple ones when used with - `:shards`) for each generation of cached data. Accesses hit the newer - generation first, and migrate from the older generation to the newer - generation when retrieved from the stale table. When a new generation - is started, the oldest one is deleted. This is a form of mass garbage - collection which avoids using timers and expiration of individual - cached elements. - - This implementation of generation cache uses only two generations - (which is more than enough) also referred like the `newer` and - the `older`. - - ## Overall features - - * Configurable backend (`ets` or `:shards`). - * Expiration – A status based on TTL (Time To Live) option. To maintain - cache performance, expired entries may not be immediately removed or - evicted, they are expired or evicted on-demand, when the key is read. - * Eviction – [Generational Garbage Collection][gc]. - * Sharding – For intensive workloads, the Cache may also be partitioned - (by using `:shards` backend and specifying the `:partitions` option). - * Support for transactions via Erlang global name registration facility. - * Support for stats. - - [gc]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.Generation.html - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:backend` - Defines the backend or storage to be used for the adapter. - Supported backends are: `:ets` and `:shards`. Defaults to `:ets`. - - * `:read_concurrency` - (boolean) Since this adapter uses ETS tables - internally, this option is used when a new table is created; see - `:ets.new/2`. Defaults to `true`. - - * `:write_concurrency` - (boolean) Since this adapter uses ETS tables - internally, this option is used when a new table is created; see - `:ets.new/2`. Defaults to `true`. - - * `:compressed` - (boolean) This option is used when a new ETS table is - created and it defines whether or not it includes X as an option; see - `:ets.new/2`. Defaults to `false`. - - * `:backend_type` - This option defines the type of ETS to be used - (Defaults to `:set`). However, it is highly recommended to keep the - default value, since there are commands not supported (unexpected - exception may be raised) for types like `:bag` or `: duplicate_bag`. - Please see the [ETS](https://erlang.org/doc/man/ets.html) docs - for more information. - - * `:partitions` - If it is set, an integer > 0 is expected, otherwise, - it defaults to `System.schedulers_online()`. This option is only - available for `:shards` backend. - - * `:gc_interval` - If it is set, an integer > 0 is expected defining the - interval time in milliseconds to garbage collection to run, delete the - oldest generation and create a new one. If this option is not set, - garbage collection is never executed, so new generations must be - created explicitly, e.g.: `MyCache.new_generation(opts)`. - - * `:max_size` - If it is set, an integer > 0 is expected defining the - max number of cached entries (cache limit). If it is not set (`nil`), - the check to release memory is not performed (the default). - - * `:allocated_memory` - If it is set, an integer > 0 is expected defining - the max size in bytes allocated for a cache generation. When this option - is set and the configured value is reached, a new cache generation is - created so the oldest is deleted and force releasing memory space. - If it is not set (`nil`), the cleanup check to release memory is - not performed (the default). - - * `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in - milliseconds for triggering the next cleanup and memory check. This will - be the timeout to use when either the max size or max allocated memory - is reached. Defaults to `10_000` (10 seconds). - - * `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in - milliseconds for triggering the next cleanup and memory check. This is - the timeout used when the cache starts and there are few entries or the - consumed memory is near to `0`. Defaults to `600_000` (10 minutes). - - * `:gc_flush_delay` - If it is set, an integer > 0 is expected defining the - delay in milliseconds before objects from the oldest generation are - flushed. Defaults to `10_000` (10 seconds). - - ## Usage - - `Nebulex.Cache` is the wrapper around the cache. We can define a - local cache as follows: - - defmodule MyApp.LocalCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local - end - - Where the configuration for the cache must be in your application - environment, usually defined in your `config/config.exs`: - - config :my_app, MyApp.LocalCache, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - allocated_memory: 2_000_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10) - - For intensive workloads, the Cache may also be partitioned using `:shards` - as cache backend (`backend: :shards`) and configuring the desired number of - partitions via the `:partitions` option. Defaults to - `System.schedulers_online()`. - - config :my_app, MyApp.LocalCache, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - allocated_memory: 2_000_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10), - backend: :shards, - partitions: System.schedulers_online() * 2 - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.LocalCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Eviction configuration - - This section is to understand a bit better how the different configuration - options work and have an idea what values to set; especially if it is the - first time using Nebulex. - - ### `:ttl` option - - The `:ttl` option that is used to set the expiration time for a key, it - doesn't work as eviction mechanism, since the local adapter implements a - generational cache, the options that control the eviction process are: - `:gc_interval`, `:gc_cleanup_min_timeout`, `:gc_cleanup_max_timeout`, - `:max_size` and `:allocated_memory`. The `:ttl` is evaluated on-demand - when a key is retrieved, and at that moment if it s expired, then remove - it from the cache, hence, it can not be used as eviction method, it is - more for keep the integrity and consistency in the cache. For this reason, - it is highly recommended to configure always the eviction options mentioned - before. - - ### Caveats when using `:ttl` option: - - * When using the `:ttl` option, ensure it is less than `:gc_interval`, - otherwise, there may be a situation where the key is evicted and the - `:ttl` hasn't happened yet (maybe because the garbage collector ran - before the key had been fetched). - * Assuming you have `:gc_interval` set to 2 hrs, then you put a new key - with `:ttl` set to 1 hr, and 1 minute later the GC runs, that key will - be moved to the older generation so it can be yet retrieved. On the other - hand, if the key is never fetched till the next GC cycle (causing moving - it to the newer generation), since the key is already in the oldest - generation it will be evicted from the cache so it won't be retrievable - anymore. - - ### Garbage collection or eviction options - - This adapter implements a generational cache, which means its main eviction - mechanism is pushing a new cache generation and remove the oldest one. In - this way, we ensure only the most frequently used keys are always available - in the newer generation and the the least frequently used are evicted when - the garbage collector runs, and the garbage collector is triggered upon - these conditions: - - * When the time interval defined by `:gc_interval` is completed. - This makes the garbage-collector process to run creating a new - generation and forcing to delete the oldest one. - * When the "cleanup" timeout expires, and then the limits `:max_size` - and `:allocated_memory` are checked, if one of those is reached, - then the garbage collector runs (a new generation is created and - the oldest one is deleted). The cleanup timeout is controlled by - `:gc_cleanup_min_timeout` and `:gc_cleanup_max_timeout`, it works - with an inverse linear backoff, which means the timeout is inverse - proportional to the memory growth; the bigger the cache size is, - the shorter the cleanup timeout will be. - - ### First-time configuration - - For configuring the cache with accurate and/or good values it is important - to know several things in advance, like for example the size of an entry - in average so we can calculate a good value for max size and/or allocated - memory, how intensive will be the load in terms of reads and writes, etc. - The problem is most of these aspects are unknown when it is a new app or - we are using the cache for the first time. Therefore, the following - recommendations will help you to configure the cache for the first time: - - * When configuring the `:gc_interval`, think about how that often the - least frequently used entries should be evicted, or what is the desired - retention period for the cached entries. For example, if `:gc_interval` - is set to 1 hr, it means you will keep in cache only those entries that - are retrieved periodically within a 2 hr period; `gc_interval * 2`, - being 2 the number of generations. Longer than that, the GC will - ensure is always evicted (the oldest generation is always deleted). - If it is the first time using Nebulex, perhaps you can start with - `gc_interval: :timer.hours(12)` (12 hrs), so the max retention - period for the keys will be 1 day; but ensure you also set either the - `:max_size` or `:allocated_memory`. - * It is highly recommended to set either `:max_size` or `:allocated_memory` - to ensure the oldest generation is deleted (least frequently used keys - are evicted) when one of these limits is reached and also to avoid - running out of memory. For example, for the `:allocated_memory` we can - set 25% of the total memory, and for the `:max_size` something between - `100_000` and `1_000_000`. - * For `:gc_cleanup_min_timeout` we can set `10_000`, which means when the - cache is reaching the size or memory limit, the polling period for the - cleanup process will be 10 seconds. And for `:gc_cleanup_max_timeout` - we can set `600_000`, which means when the cache is almost empty the - polling period will be close to 10 minutes. - - ## Stats - - This adapter does support stats by using the default implementation - provided by `Nebulex.Adapter.Stats`. The adapter also uses the - `Nebulex.Telemetry.StatsHandler` to aggregate the stats and keep - them updated. Therefore, it requires the Telemetry events are emitted - by the adapter (the `:telemetry` option should not be set to `false` - so the Telemetry events can be dispatched), otherwise, stats won't - work properly. - - ## Queryable API - - Since this adapter is implemented on top of ETS tables, the query must be - a valid match spec given by `:ets.match_spec()`. However, there are some - predefined and/or shorthand queries you can use. See the section - "Predefined queries" below for for information. - - Internally, an entry is represented by the tuple - `{:entry, key, value, touched, ttl}`, which means the match pattern within - the `:ets.match_spec()` must be something like: - `{:entry, :"$1", :"$2", :"$3", :"$4"}`. - In order to make query building easier, you can use `Ex2ms` library. - - ### Predefined queries - - * `nil` - All keys are returned. - - * `:unexpired` - All unexpired keys/entries. - - * `:expired` - All expired keys/entries. - - * `{:in, [term]}` - Only the keys in the given key list (`[term]`) - are returned. This predefined query is only supported for - `c:Nebulex.Cache.delete_all/2`. This is the recommended - way of doing bulk delete of keys. - - ## Examples - - # built-in queries - MyCache.all() - MyCache.all(:unexpired) - MyCache.all(:expired) - MyCache.all({:in, ["foo", "bar"]}) - - # using a custom match spec (all values > 10) - spec = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] - MyCache.all(spec) - - # using Ex2ms - import Ex2ms - - spec = - fun do - {_, key, value, _, _} when value > 10 -> {key, value} - end - - MyCache.all(spec) - - The `:return` option applies only for built-in queries, such as: - `nil | :unexpired | :expired`, if you are using a custom `:ets.match_spec()`, - the return value depends on it. - - The same applies to the `stream` function. - - ## Extended API (convenience functions) - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Creating new generations: - - MyCache.new_generation() - MyCache.new_generation(reset_timer: false) - - Retrieving the current generations: - - MyCache.generations() - - Retrieving the newer generation: - - MyCache.newer_generation() - - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - # Inherit default stats implementation - use Nebulex.Adapter.Stats - - import Nebulex.Adapter - import Nebulex.Helpers - import Record - - alias Nebulex.Adapter.Stats - alias Nebulex.Adapters.Local.{Backend, Generation, Metadata} - alias Nebulex.{Entry, Time} - - # Cache Entry - defrecord(:entry, - key: nil, - value: nil, - touched: nil, - ttl: nil - ) - - # Supported Backends - @backends ~w(ets shards)a - - # Inline common instructions - @compile {:inline, list_gen: 1, newer_gen: 1, test_ms: 0} - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function for creating new generations. - """ - def new_generation(opts \\ []) do - Generation.new(get_dynamic_cache(), opts) - end - - @doc """ - A convenience function for reset the GC timer. - """ - def reset_generation_timer do - Generation.reset_timer(get_dynamic_cache()) - end - - @doc """ - A convenience function for retrieving the current generations. - """ - def generations do - Generation.list(get_dynamic_cache()) - end - - @doc """ - A convenience function for retrieving the newer generation. - """ - def newer_generation do - Generation.newer(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Required options - cache = Keyword.fetch!(opts, :cache) - telemetry = Keyword.fetch!(opts, :telemetry) - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - - # Init internal metadata table - meta_tab = opts[:meta_tab] || Metadata.init() - - # Init stats_counter - stats_counter = Stats.init(opts) - - # Resolve the backend to be used - backend = - opts - |> Keyword.get(:backend, :ets) - |> case do - val when val in @backends -> - val - - val -> - raise "expected backend: option to be one of the supported backends " <> - "#{inspect(@backends)}, got: #{inspect(val)}" - end - - # Internal option for max nested match specs based on number of keys - purge_batch_size = - get_option( - opts, - :purge_batch_size, - "an integer > 0", - &(is_integer(&1) and &1 > 0), - 100 - ) - - # Build adapter metadata - adapter_meta = %{ - cache: cache, - telemetry: telemetry, - telemetry_prefix: telemetry_prefix, - meta_tab: meta_tab, - stats_counter: stats_counter, - backend: backend, - purge_batch_size: purge_batch_size, - started_at: DateTime.utc_now() - } - - # Build adapter child_spec - child_spec = Backend.child_spec(backend, [adapter_meta: adapter_meta] ++ opts) - - {:ok, child_spec, adapter_meta} - end - - ## Nebulex.Adapter.Entry - - @impl true - def get(adapter_meta, key, _opts) do - adapter_meta - |> get_(key) - |> handle_expired() - end - - defspan get_(adapter_meta, key), as: :get do - adapter_meta.meta_tab - |> list_gen() - |> do_get(key, adapter_meta.backend) - |> return(:value) - end - - defp do_get([newer], key, backend) do - gen_fetch(newer, key, backend) - end - - defp do_get([newer, older], key, backend) do - with nil <- gen_fetch(newer, key, backend), - entry(key: ^key) = cached <- gen_fetch(older, key, backend, &pop_entry/4) do - true = backend.insert(newer, cached) - cached - end - end - - defp gen_fetch(gen, key, backend, fun \\ &get_entry/4) do - gen - |> fun.(key, nil, backend) - |> validate_ttl(gen, backend) - end - - @impl true - defspan get_all(adapter_meta, keys, _opts) do - adapter_meta = %{adapter_meta | telemetry: Map.get(adapter_meta, :in_span?, false)} - - Enum.reduce(keys, %{}, fn key, acc -> - case get(adapter_meta, key, []) do - nil -> acc - obj -> Map.put(acc, key, obj) - end - end) - end - - @impl true - defspan put(adapter_meta, key, value, ttl, on_write, _opts) do - do_put( - on_write, - adapter_meta.meta_tab, - adapter_meta.backend, - entry( - key: key, - value: value, - touched: Time.now(), - ttl: ttl - ) - ) - end - - defp do_put(:put, meta_tab, backend, entry) do - put_entries(meta_tab, backend, entry) - end - - defp do_put(:put_new, meta_tab, backend, entry) do - put_new_entries(meta_tab, backend, entry) - end - - defp do_put(:replace, meta_tab, backend, entry(key: key, value: value)) do - update_entry(meta_tab, backend, key, [{3, value}]) - end - - @impl true - defspan put_all(adapter_meta, entries, ttl, on_write, _opts) do - entries = - for {key, value} <- entries, value != nil do - entry(key: key, value: value, touched: Time.now(), ttl: ttl) - end - - do_put_all( - on_write, - adapter_meta.meta_tab, - adapter_meta.backend, - adapter_meta.purge_batch_size, - entries - ) - end - - defp do_put_all(:put, meta_tab, backend, batch_size, entries) do - put_entries(meta_tab, backend, entries, batch_size) - end - - defp do_put_all(:put_new, meta_tab, backend, batch_size, entries) do - put_new_entries(meta_tab, backend, entries, batch_size) - end - - @impl true - defspan delete(adapter_meta, key, _opts) do - adapter_meta.meta_tab - |> list_gen() - |> Enum.each(&adapter_meta.backend.delete(&1, key)) - end - - @impl true - def take(adapter_meta, key, _opts) do - adapter_meta - |> take_(key) - |> handle_expired() - end - - defspan take_(adapter_meta, key), as: :take do - adapter_meta.meta_tab - |> list_gen() - |> Enum.reduce_while(nil, fn gen, acc -> - case pop_entry(gen, key, nil, adapter_meta.backend) do - nil -> - {:cont, acc} - - res -> - value = - res - |> validate_ttl(gen, adapter_meta.backend) - |> return(:value) - - {:halt, value} - end - end) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, ttl, default, _opts) do - # Get needed metadata - meta_tab = adapter_meta.meta_tab - backend = adapter_meta.backend - - # Verify if the key has expired - _ = - meta_tab - |> list_gen() - |> do_get(key, backend) - - # Run the counter operation - meta_tab - |> newer_gen() - |> backend.update_counter( - key, - {3, amount}, - entry(key: key, value: default, touched: Time.now(), ttl: ttl) - ) - end - - @impl true - def has_key?(adapter_meta, key) do - case get(adapter_meta, key, []) do - nil -> false - _ -> true - end - end - - @impl true - defspan ttl(adapter_meta, key) do - adapter_meta.meta_tab - |> list_gen() - |> do_get(key, adapter_meta.backend) - |> return() - |> entry_ttl() - end - - defp entry_ttl(nil), do: nil - defp entry_ttl(:"$expired"), do: nil - defp entry_ttl(entry(ttl: :infinity)), do: :infinity - - defp entry_ttl(entry(ttl: ttl, touched: touched)) do - ttl - (Time.now() - touched) - end - - defp entry_ttl(entries) when is_list(entries) do - for entry <- entries, do: entry_ttl(entry) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - update_entry(adapter_meta.meta_tab, adapter_meta.backend, key, [{4, Time.now()}, {5, ttl}]) - end - - @impl true - defspan touch(adapter_meta, key) do - update_entry(adapter_meta.meta_tab, adapter_meta.backend, key, [{4, Time.now()}]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{meta_tab: meta_tab, backend: backend}, :count_all, nil, _opts) do - meta_tab - |> list_gen() - |> Enum.reduce(0, fn gen, acc -> - gen - |> backend.info(:size) - |> Kernel.+(acc) - end) - end - - defp do_execute(%{meta_tab: meta_tab}, :delete_all, nil, _opts) do - Generation.delete_all(meta_tab) - end - - defp do_execute(%{meta_tab: meta_tab} = adapter_meta, :delete_all, {:in, keys}, _opts) - when is_list(keys) do - meta_tab - |> list_gen() - |> Enum.reduce(0, fn gen, acc -> - do_delete_all(adapter_meta.backend, gen, keys, adapter_meta.purge_batch_size) + acc - end) - end - - defp do_execute(%{meta_tab: meta_tab, backend: backend}, operation, query, opts) do - query = - query - |> validate_match_spec(opts) - |> maybe_match_spec_return_true(operation) - - {reducer, acc_in} = - case operation do - :all -> {&(backend.select(&1, query) ++ &2), []} - :count_all -> {&(backend.select_count(&1, query) + &2), 0} - :delete_all -> {&(backend.select_delete(&1, query) + &2), 0} - end - - meta_tab - |> list_gen() - |> Enum.reduce(acc_in, reducer) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - query - |> validate_match_spec(opts) - |> do_stream(adapter_meta, Keyword.get(opts, :page_size, 20)) - end - - defp do_stream(match_spec, %{meta_tab: meta_tab, backend: backend}, page_size) do - Stream.resource( - fn -> - [newer | _] = generations = list_gen(meta_tab) - result = backend.select(newer, match_spec, page_size) - {result, generations} - end, - fn - {:"$end_of_table", [_gen]} -> - {:halt, []} - - {:"$end_of_table", [_gen | generations]} -> - result = - generations - |> hd() - |> backend.select(match_spec, page_size) - - {[], {result, generations}} - - {{elements, cont}, [_ | _] = generations} -> - {elements, {backend.select(cont), generations}} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, opts, fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - if stats = super(adapter_meta) do - %{stats | metadata: Map.put(stats.metadata, :started_at, adapter_meta.started_at)} - end - end - - ## Helpers - - defp list_gen(meta_tab) do - Metadata.fetch!(meta_tab, :generations) - end - - defp newer_gen(meta_tab) do - meta_tab - |> Metadata.fetch!(:generations) - |> hd() - end - - defp get_entry(tab, key, default, backend) do - case backend.lookup(tab, key) do - [] -> default - [entry] -> entry - entries -> entries - end - end - - defp pop_entry(tab, key, default, backend) do - case backend.take(tab, key) do - [] -> default - [entry] -> entry - entries -> entries - end - end - - defp put_entries(meta_tab, backend, entries, batch_size \\ 0) - - defp put_entries(meta_tab, backend, entries, batch_size) when is_list(entries) do - do_put_entries(meta_tab, backend, entries, fn older_gen -> - keys = Enum.map(entries, fn entry(key: key) -> key end) - - do_delete_all(backend, older_gen, keys, batch_size) - end) - end - - defp put_entries(meta_tab, backend, entry(key: key) = entry, _batch_size) do - do_put_entries(meta_tab, backend, entry, fn older_gen -> - true = backend.delete(older_gen, key) - end) - end - - defp do_put_entries(meta_tab, backend, entry_or_entries, purge_fun) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert(newer_gen, entry_or_entries) - - [newer_gen, older_gen] -> - _ = purge_fun.(older_gen) - - backend.insert(newer_gen, entry_or_entries) - end - end - - defp put_new_entries(meta_tab, backend, entries, batch_size \\ 0) - - defp put_new_entries(meta_tab, backend, entries, batch_size) when is_list(entries) do - do_put_new_entries(meta_tab, backend, entries, fn newer_gen, older_gen -> - with true <- backend.insert_new(older_gen, entries) do - keys = Enum.map(entries, fn entry(key: key) -> key end) - - _ = do_delete_all(backend, older_gen, keys, batch_size) - - backend.insert_new(newer_gen, entries) - end - end) - end - - defp put_new_entries(meta_tab, backend, entry(key: key) = entry, _batch_size) do - do_put_new_entries(meta_tab, backend, entry, fn newer_gen, older_gen -> - with true <- backend.insert_new(older_gen, entry) do - true = backend.delete(older_gen, key) - - backend.insert_new(newer_gen, entry) - end - end) - end - - defp do_put_new_entries(meta_tab, backend, entry_or_entries, purge_fun) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert_new(newer_gen, entry_or_entries) - - [newer_gen, older_gen] -> - purge_fun.(newer_gen, older_gen) - end - end - - defp update_entry(meta_tab, backend, key, updates) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.update_element(newer_gen, key, updates) - - [newer_gen, older_gen] -> - with false <- backend.update_element(newer_gen, key, updates), - entry() = entry <- pop_entry(older_gen, key, false, backend) do - entry = - Enum.reduce(updates, entry, fn - {3, value}, acc -> entry(acc, value: value) - {4, value}, acc -> entry(acc, touched: value) - {5, value}, acc -> entry(acc, ttl: value) - end) - - backend.insert(newer_gen, entry) - end - end - end - - defp do_delete_all(backend, tab, keys, batch_size) do - do_delete_all(backend, tab, keys, batch_size, 0) - end - - defp do_delete_all(backend, tab, [key], _batch_size, deleted) do - true = backend.delete(tab, key) - - deleted + 1 - end - - defp do_delete_all(backend, tab, [k1, k2 | keys], batch_size, deleted) do - k1 = if is_tuple(k1), do: tuple_to_match_spec(k1), else: k1 - k2 = if is_tuple(k2), do: tuple_to_match_spec(k2), else: k2 - - do_delete_all( - backend, - tab, - keys, - batch_size, - deleted, - 2, - {:orelse, {:==, :"$1", k1}, {:==, :"$1", k2}} - ) - end - - defp do_delete_all(backend, tab, [], _batch_size, deleted, _count, acc) do - backend.select_delete(tab, delete_all_match_spec(acc)) + deleted - end - - defp do_delete_all(backend, tab, keys, batch_size, deleted, count, acc) - when count >= batch_size do - deleted = backend.select_delete(tab, delete_all_match_spec(acc)) + deleted - - do_delete_all(backend, tab, keys, batch_size, deleted) - end - - defp do_delete_all(backend, tab, [k | keys], batch_size, deleted, count, acc) do - k = if is_tuple(k), do: tuple_to_match_spec(k), else: k - - do_delete_all( - backend, - tab, - keys, - batch_size, - deleted, - count + 1, - {:orelse, acc, {:==, :"$1", k}} - ) - end - - defp tuple_to_match_spec(data) do - data - |> :erlang.tuple_to_list() - |> tuple_to_match_spec([]) - end - - defp tuple_to_match_spec([], acc) do - {acc |> Enum.reverse() |> :erlang.list_to_tuple()} - end - - defp tuple_to_match_spec([e | tail], acc) do - e = if is_tuple(e), do: tuple_to_match_spec(e), else: e - - tuple_to_match_spec(tail, [e | acc]) - end - - defp return(entry_or_entries, field \\ nil) - - defp return(nil, _field), do: nil - defp return(:"$expired", _field), do: :"$expired" - defp return(entry(value: value), :value), do: value - defp return(entry(key: _) = entry, _field), do: entry - - defp return(entries, field) when is_list(entries) do - Enum.map(entries, &return(&1, field)) - end - - defp validate_ttl(nil, _, _), do: nil - defp validate_ttl(entry(ttl: :infinity) = entry, _, _), do: entry - - defp validate_ttl(entry(key: key, touched: touched, ttl: ttl) = entry, gen, backend) do - if Time.now() - touched >= ttl do - true = backend.delete(gen, key) - :"$expired" - else - entry - end - end - - defp validate_ttl(entries, gen, backend) when is_list(entries) do - Enum.filter(entries, fn entry -> - not is_nil(validate_ttl(entry, gen, backend)) - end) - end - - defp handle_expired(:"$expired"), do: nil - defp handle_expired(result), do: result - - defp validate_match_spec(spec, opts) when spec in [nil, :unexpired, :expired] do - [ - { - entry(key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"), - if(spec = comp_match_spec(spec), do: [spec], else: []), - ret_match_spec(opts) - } - ] - end - - defp validate_match_spec(spec, _opts) do - case :ets.test_ms(test_ms(), spec) do - {:ok, _result} -> - spec - - {:error, _result} -> - raise Nebulex.QueryError, message: "invalid match spec", query: spec - end - end - - defp comp_match_spec(nil), - do: nil - - defp comp_match_spec(:unexpired), - do: {:orelse, {:==, :"$4", :infinity}, {:<, {:-, Time.now(), :"$3"}, :"$4"}} - - defp comp_match_spec(:expired), - do: {:not, comp_match_spec(:unexpired)} - - defp ret_match_spec(opts) do - case Keyword.get(opts, :return, :key) do - :key -> [:"$1"] - :value -> [:"$2"] - {:key, :value} -> [{{:"$1", :"$2"}}] - :entry -> [%Entry{key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"}] - end - end - - defp maybe_match_spec_return_true([{pattern, conds, _ret}], operation) - when operation in [:delete_all, :count_all] do - [{pattern, conds, [true]}] - end - - defp maybe_match_spec_return_true(match_spec, _operation) do - match_spec - end - - defp delete_all_match_spec(conds) do - [ - { - entry(key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"), - [conds], - [true] - } - ] - end - - defp test_ms, do: entry(key: 1, value: 1, touched: Time.now(), ttl: 1000) -end diff --git a/lib/nebulex/adapters/local/backend.ex b/lib/nebulex/adapters/local/backend.ex deleted file mode 100644 index abea8f88..00000000 --- a/lib/nebulex/adapters/local/backend.ex +++ /dev/null @@ -1,81 +0,0 @@ -defmodule Nebulex.Adapters.Local.Backend do - @moduledoc false - - @doc false - defmacro __using__(_opts) do - quote do - import Nebulex.Helpers - - alias Nebulex.Adapters.Local.Generation - - defp generation_spec(opts) do - %{ - id: Module.concat([__MODULE__, GC]), - start: {Generation, :start_link, [opts]} - } - end - - defp sup_spec(children) do - %{ - id: Module.concat([__MODULE__, Supervisor]), - start: {Supervisor, :start_link, [children, [strategy: :one_for_all]]}, - type: :supervisor - } - end - - defp parse_opts(opts, extra \\ []) do - type = get_option(opts, :backend_type, "an atom", &is_atom/1, :set) - - compressed = - case get_option(opts, :compressed, "boolean", &is_boolean/1, false) do - true -> [:compressed] - false -> [] - end - - backend_opts = - [ - type, - :public, - {:keypos, 2}, - {:read_concurrency, - get_option(opts, :read_concurrency, "boolean", &is_boolean/1, true)}, - {:write_concurrency, - get_option(opts, :write_concurrency, "boolean", &is_boolean/1, true)}, - compressed, - extra - ] - |> List.flatten() - |> Enum.filter(&(&1 != :named_table)) - - Keyword.put(opts, :backend_opts, backend_opts) - end - end - end - - @doc """ - Helper function for returning the child spec for the given backend. - """ - def child_spec(backend, opts) do - get_mod(backend).child_spec(opts) - end - - @doc """ - Helper function for creating a new table for the given backend. - """ - def new(backend, meta_tab, tab_opts) do - get_mod(backend).new(meta_tab, tab_opts) - end - - @doc """ - Helper function for deleting a table for the given backend. - """ - def delete(backend, meta_tab, gen_tab) do - get_mod(backend).delete(meta_tab, gen_tab) - end - - defp get_mod(:ets), do: Nebulex.Adapters.Local.Backend.ETS - - if Code.ensure_loaded?(:shards) do - defp get_mod(:shards), do: Nebulex.Adapters.Local.Backend.Shards - end -end diff --git a/lib/nebulex/adapters/local/backend/ets.ex b/lib/nebulex/adapters/local/backend/ets.ex deleted file mode 100644 index d552447d..00000000 --- a/lib/nebulex/adapters/local/backend/ets.ex +++ /dev/null @@ -1,25 +0,0 @@ -defmodule Nebulex.Adapters.Local.Backend.ETS do - @moduledoc false - use Nebulex.Adapters.Local.Backend - - ## API - - @doc false - def child_spec(opts) do - opts - |> parse_opts() - |> generation_spec() - |> List.wrap() - |> sup_spec() - end - - @doc false - def new(_meta_tab, tab_opts) do - :ets.new(__MODULE__, tab_opts) - end - - @doc false - def delete(_meta_tab, gen_tab) do - :ets.delete(gen_tab) - end -end diff --git a/lib/nebulex/adapters/local/backend/shards.ex b/lib/nebulex/adapters/local/backend/shards.ex deleted file mode 100644 index d69ce924..00000000 --- a/lib/nebulex/adapters/local/backend/shards.ex +++ /dev/null @@ -1,87 +0,0 @@ -if Code.ensure_loaded?(:shards) do - defmodule Nebulex.Adapters.Local.Backend.Shards do - @moduledoc false - - defmodule ShardsDynamicSupervisor do - @moduledoc false - use DynamicSupervisor - - alias Nebulex.Adapters.Local.Metadata - - ## API - - @doc false - def start_link(tab) do - DynamicSupervisor.start_link(__MODULE__, tab) - end - - ## DynamicSupervisor Callbacks - - @impl true - def init(meta_tab) do - :ok = Metadata.put(meta_tab, :shards_sup, self()) - DynamicSupervisor.init(strategy: :one_for_one) - end - end - - use Nebulex.Adapters.Local.Backend - - alias Nebulex.Adapters.Local.Metadata - - ## API - - @doc false - def child_spec(opts) do - partitions = - get_option( - opts, - :partitions, - "an integer > 0", - &(is_integer(&1) and &1 > 0), - System.schedulers_online() - ) - - meta_tab = - opts - |> Keyword.fetch!(:adapter_meta) - |> Map.fetch!(:meta_tab) - - sup_spec([ - {ShardsDynamicSupervisor, meta_tab}, - generation_spec(parse_opts(opts, partitions: partitions)) - ]) - end - - @doc false - def new(meta_tab, tab_opts) do - {:ok, _pid, tab} = - meta_tab - |> Metadata.get(:shards_sup) - |> DynamicSupervisor.start_child(table_spec(tab_opts)) - - tab - end - - @doc false - def delete(meta_tab, gen_tab) do - meta_tab - |> Metadata.get(:shards_sup) - |> DynamicSupervisor.terminate_child(:shards_meta.tab_pid(gen_tab)) - end - - @doc false - def start_table(opts) do - tab = :shards.new(__MODULE__, opts) - pid = :shards_meta.tab_pid(tab) - {:ok, pid, tab} - end - - defp table_spec(opts) do - %{ - id: __MODULE__, - start: {__MODULE__, :start_table, [opts]}, - type: :supervisor - } - end - end -end diff --git a/lib/nebulex/adapters/local/generation.ex b/lib/nebulex/adapters/local/generation.ex deleted file mode 100644 index b986cbd5..00000000 --- a/lib/nebulex/adapters/local/generation.ex +++ /dev/null @@ -1,591 +0,0 @@ -defmodule Nebulex.Adapters.Local.Generation do - @moduledoc """ - Generational garbage collection process. - - The generational garbage collector manage the heap as several sub-heaps, - known as generations, based on age of the objects. An object is allocated - in the youngest generation, sometimes called the nursery, and is promoted - to an older generation if its lifetime exceeds the threshold of its current - generation (defined by option `:gc_interval`). Every time the GC runs - (triggered by `:gc_interval` timeout), a new cache generation is created - and the oldest one is deleted. - - The deletion of the oldest generation happens in two steps. First, the - underlying ets table is flushed to release space and only marked for deletion - as there may still be processes referencing it. The actual deletion of the - ets table happens at next GC run. - - However, flushing is a blocking operation, once started, processes wanting - to access the table will need to wait until it finishes. To circumvent this, - flushing can be delayed by configuring `:gc_flush_delay` to allow time for - these processes to finish their work without being accidentally blocked. - - The only way to create new generations is through this module (this server - is the metadata owner) calling `new/2` function. When a Cache is created, - a generational garbage collector is attached to it automatically, - therefore, this server MUST NOT be started directly. - - ## Options - - These options are configured through the `Nebulex.Adapters.Local` adapter: - - * `:gc_interval` - If it is set, an integer > 0 is expected defining the - interval time in milliseconds to garbage collection to run, delete the - oldest generation and create a new one. If this option is not set, - garbage collection is never executed, so new generations must be - created explicitly, e.g.: `MyCache.new_generation(opts)`. - - * `:max_size` - If it is set, an integer > 0 is expected defining the - max number of cached entries (cache limit). If it is not set (`nil`), - the check to release memory is not performed (the default). - - * `:allocated_memory` - If it is set, an integer > 0 is expected defining - the max size in bytes allocated for a cache generation. When this option - is set and the configured value is reached, a new cache generation is - created so the oldest is deleted and force releasing memory space. - If it is not set (`nil`), the cleanup check to release memory is - not performed (the default). - - * `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in - milliseconds for triggering the next cleanup and memory check. This will - be the timeout to use when either the max size or max allocated memory - is reached. Defaults to `10_000` (10 seconds). - - * `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in - milliseconds for triggering the next cleanup and memory check. This is - the timeout used when the cache starts and there are few entries or the - consumed memory is near to `0`. Defaults to `600_000` (10 minutes). - - * `:gc_flush_delay` - If it is set, an integer > 0 is expected defining the - delay in milliseconds before objects from the oldest generation are - flushed. Defaults to `10_000` (10 seconds). - - """ - - # State - defstruct [ - :cache, - :name, - :telemetry, - :telemetry_prefix, - :meta_tab, - :backend, - :backend_opts, - :stats_counter, - :gc_interval, - :gc_heartbeat_ref, - :max_size, - :allocated_memory, - :gc_cleanup_min_timeout, - :gc_cleanup_max_timeout, - :gc_cleanup_ref, - :gc_flush_delay - ] - - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.Adapter.Stats - alias Nebulex.Adapters.Local - alias Nebulex.Adapters.Local.{Backend, Metadata} - alias Nebulex.Telemetry - alias Nebulex.Telemetry.StatsHandler - - @type t :: %__MODULE__{} - @type server_ref :: pid | atom | :ets.tid() - @type opts :: Nebulex.Cache.opts() - - ## API - - @doc """ - Starts the garbage collector for the built-in local cache adapter. - """ - @spec start_link(opts) :: GenServer.on_start() - def start_link(opts) do - GenServer.start_link(__MODULE__, opts) - end - - @doc """ - Creates a new cache generation. Once the max number of generations - is reached, when a new generation is created, the oldest one is - deleted. - - ## Options - - * `:reset_timer` - Indicates if the poll frequency time-out should - be reset or not (default: true). - - ## Example - - Nebulex.Adapters.Local.Generation.new(MyCache) - - Nebulex.Adapters.Local.Generation.new(MyCache, reset_timer: false) - """ - @spec new(server_ref, opts) :: [atom] - def new(server_ref, opts \\ []) do - reset_timer? = get_option(opts, :reset_timer, "boolean", &is_boolean/1, true) - do_call(server_ref, {:new_generation, reset_timer?}) - end - - @doc """ - Removes or flushes all entries from the cache (including all its generations). - - ## Example - - Nebulex.Adapters.Local.Generation.delete_all(MyCache) - """ - @spec delete_all(server_ref) :: integer - def delete_all(server_ref) do - do_call(server_ref, :delete_all) - end - - @doc """ - Reallocates the block of memory that was previously allocated for the given - `server_ref` with the new `size`. In other words, reallocates the max memory - size for a cache generation. - - ## Example - - Nebulex.Adapters.Local.Generation.realloc(MyCache, 1_000_000) - """ - @spec realloc(server_ref, pos_integer) :: :ok - def realloc(server_ref, size) do - do_call(server_ref, {:realloc, size}) - end - - @doc """ - Returns the memory info in a tuple form `{used_mem, total_mem}`. - - ## Example - - Nebulex.Adapters.Local.Generation.memory_info(MyCache) - """ - @spec memory_info(server_ref) :: {used_mem :: non_neg_integer, total_mem :: non_neg_integer} - def memory_info(server_ref) do - do_call(server_ref, :memory_info) - end - - @doc """ - Resets the timer for pushing new cache generations. - - ## Example - - Nebulex.Adapters.Local.Generation.reset_timer(MyCache) - """ - def reset_timer(server_ref) do - server_ref - |> server() - |> GenServer.cast(:reset_timer) - end - - @doc """ - Returns the list of the generations in the form `[newer, older]`. - - ## Example - - Nebulex.Adapters.Local.Generation.list(MyCache) - """ - @spec list(server_ref) :: [:ets.tid()] - def list(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.get(:generations, []) - end - - @doc """ - Returns the newer generation. - - ## Example - - Nebulex.Adapters.Local.Generation.newer(MyCache) - """ - @spec newer(server_ref) :: :ets.tid() - def newer(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.get(:generations, []) - |> hd() - end - - @doc """ - Returns the PID of the GC server for the given `server_ref`. - - ## Example - - Nebulex.Adapters.Local.Generation.server(MyCache) - """ - @spec server(server_ref) :: pid - def server(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.fetch!(:gc_pid) - end - - @doc """ - A convenience function for retrieving the state. - """ - @spec get_state(server_ref) :: t - def get_state(server_ref) do - server_ref - |> server() - |> GenServer.call(:get_state) - end - - defp do_call(tab, message) do - tab - |> server() - |> GenServer.call(message) - end - - defp get_meta_tab(server_ref) when is_atom(server_ref) or is_pid(server_ref) do - Adapter.with_meta(server_ref, fn _, %{meta_tab: meta_tab} -> - meta_tab - end) - end - - defp get_meta_tab(server_ref), do: server_ref - - ## GenServer Callbacks - - @impl true - def init(opts) do - # Trap exit signals to run cleanup process - _ = Process.flag(:trap_exit, true) - - # Initial state - state = struct(__MODULE__, parse_opts(opts)) - - # Init cleanup timer - cleanup_ref = - if state.max_size || state.allocated_memory, - do: start_timer(state.gc_cleanup_max_timeout, nil, :cleanup) - - # Timer ref - {:ok, ref} = - if state.gc_interval, - do: {new_gen(state), start_timer(state.gc_interval)}, - else: {new_gen(state), nil} - - # Update state - state = %{state | gc_cleanup_ref: cleanup_ref, gc_heartbeat_ref: ref} - - {:ok, state, {:continue, :attach_stats_handler}} - end - - defp parse_opts(opts) do - # Get adapter metadata - adapter_meta = Keyword.fetch!(opts, :adapter_meta) - - # Add the GC PID to the meta table - meta_tab = Map.fetch!(adapter_meta, :meta_tab) - :ok = Metadata.put(meta_tab, :gc_pid, self()) - - # Common validators - pos_integer = &(is_integer(&1) and &1 > 0) - pos_integer_or_nil = &((is_integer(&1) and &1 > 0) or is_nil(&1)) - - Map.merge(adapter_meta, %{ - backend_opts: Keyword.get(opts, :backend_opts, []), - gc_interval: get_option(opts, :gc_interval, "an integer > 0", pos_integer_or_nil), - max_size: get_option(opts, :max_size, "an integer > 0", pos_integer_or_nil), - allocated_memory: get_option(opts, :allocated_memory, "an integer > 0", pos_integer_or_nil), - gc_cleanup_min_timeout: - get_option(opts, :gc_cleanup_min_timeout, "an integer > 0", pos_integer, 10_000), - gc_cleanup_max_timeout: - get_option(opts, :gc_cleanup_max_timeout, "an integer > 0", pos_integer, 600_000), - gc_flush_delay: get_option(opts, :gc_flush_delay, "an integer > 0", pos_integer, 10_000) - }) - end - - @impl true - def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: nil} = state) do - {:noreply, state} - end - - def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: stats_counter} = state) do - _ = - Telemetry.attach_many( - stats_counter, - [state.telemetry_prefix ++ [:command, :stop]], - &StatsHandler.handle_event/4, - stats_counter - ) - - {:noreply, state} - end - - @impl true - def terminate(_reason, state) do - if ref = state.stats_counter, do: Telemetry.detach(ref) - end - - @impl true - def handle_call(:delete_all, _from, %__MODULE__{} = state) do - # Get current size - size = - state - |> Map.from_struct() - |> Local.execute(:count_all, nil, []) - - # Create new generation - :ok = new_gen(state) - - # Delete all objects - :ok = - state.meta_tab - |> list() - |> Enum.each(&state.backend.delete_all_objects(&1)) - - {:reply, size, %{state | gc_heartbeat_ref: maybe_reset_timer(true, state)}} - end - - def handle_call({:new_generation, reset_timer?}, _from, state) do - # Create new generation - :ok = new_gen(state) - - # Maybe reset heartbeat timer - heartbeat_ref = maybe_reset_timer(reset_timer?, state) - - {:reply, :ok, %{state | gc_heartbeat_ref: heartbeat_ref}} - end - - def handle_call( - :memory_info, - _from, - %__MODULE__{backend: backend, meta_tab: meta_tab, allocated_memory: allocated} = state - ) do - {:reply, {memory_info(backend, meta_tab), allocated}, state} - end - - def handle_call({:realloc, mem_size}, _from, state) do - {:reply, :ok, %{state | allocated_memory: mem_size}} - end - - def handle_call(:get_state, _from, state) do - {:reply, state, state} - end - - @impl true - def handle_cast(:reset_timer, state) do - {:noreply, %{state | gc_heartbeat_ref: maybe_reset_timer(true, state)}} - end - - @impl true - def handle_info( - :heartbeat, - %__MODULE__{ - gc_interval: gc_interval, - gc_heartbeat_ref: heartbeat_ref - } = state - ) do - # Create new generation - :ok = new_gen(state) - - # Reset heartbeat timer - heartbeat_ref = start_timer(gc_interval, heartbeat_ref) - - {:noreply, %{state | gc_heartbeat_ref: heartbeat_ref}} - end - - def handle_info(:cleanup, state) do - # Check size first, if the cleanup is done, skip checking the memory, - # otherwise, check the memory too. - {_, state} = - with {false, state} <- check_size(state) do - check_memory(state) - end - - {:noreply, state} - end - - def handle_info( - :flush_older_gen, - %__MODULE__{ - meta_tab: meta_tab, - backend: backend - } = state - ) do - if deprecated = Metadata.get(meta_tab, :deprecated) do - true = backend.delete_all_objects(deprecated) - end - - {:noreply, state} - end - - defp check_size(%__MODULE__{max_size: max_size} = state) when not is_nil(max_size) do - maybe_cleanup(:size, state) - end - - defp check_size(state) do - {false, state} - end - - defp check_memory(%__MODULE__{allocated_memory: allocated} = state) when not is_nil(allocated) do - maybe_cleanup(:memory, state) - end - - defp check_memory(state) do - {false, state} - end - - defp maybe_cleanup( - info, - %__MODULE__{ - cache: cache, - name: name, - gc_cleanup_ref: cleanup_ref, - gc_cleanup_min_timeout: min_timeout, - gc_cleanup_max_timeout: max_timeout, - gc_interval: gc_interval, - gc_heartbeat_ref: heartbeat_ref - } = state - ) do - case cleanup_info(info, state) do - {size, max_size} when size >= max_size -> - # Create a new generation - :ok = new_gen(state) - - # Purge expired entries - _ = cache.delete_all(:expired, dynamic_cache: name) - - # Reset the heartbeat timer - heartbeat_ref = start_timer(gc_interval, heartbeat_ref) - - # Reset the cleanup timer - cleanup_ref = - info - |> cleanup_info(state) - |> elem(0) - |> reset_cleanup_timer(max_size, min_timeout, max_timeout, cleanup_ref) - - {true, %{state | gc_heartbeat_ref: heartbeat_ref, gc_cleanup_ref: cleanup_ref}} - - {size, max_size} -> - # Reset the cleanup timer - cleanup_ref = reset_cleanup_timer(size, max_size, min_timeout, max_timeout, cleanup_ref) - - {false, %{state | gc_cleanup_ref: cleanup_ref}} - end - end - - defp cleanup_info(:size, %__MODULE__{backend: mod, meta_tab: tab, max_size: max}) do - {size_info(mod, tab), max} - end - - defp cleanup_info(:memory, %__MODULE__{backend: mod, meta_tab: tab, allocated_memory: max}) do - {memory_info(mod, tab), max} - end - - ## Private Functions - - defp new_gen(%__MODULE__{ - meta_tab: meta_tab, - backend: backend, - backend_opts: backend_opts, - stats_counter: stats_counter, - gc_flush_delay: gc_flush_delay - }) do - # Create new generation - gen_tab = Backend.new(backend, meta_tab, backend_opts) - - # Update generation list - case list(meta_tab) do - [newer, older] -> - # Since the older generation is deleted, update evictions count - :ok = Stats.incr(stats_counter, :evictions, backend.info(older, :size)) - - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab, newer]) - - # Process the older generation: - # - Delete previously stored deprecated generation - # - Flush the older generation - # - Deprecate it (mark it for deletion) - :ok = process_older_gen(meta_tab, backend, older, gc_flush_delay) - - [newer] -> - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab, newer]) - - [] -> - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab]) - end - end - - # The older generation cannot be removed immediately because there may be - # ongoing operations using it, then it may cause race-condition errors. - # Hence, the idea is to keep it alive till a new generation is pushed, but - # flushing its data before so that we release memory space. By the time a new - # generation is pushed, then it is safe to delete it completely. - defp process_older_gen(meta_tab, backend, older, gc_flush_delay) do - if deprecated = Metadata.get(meta_tab, :deprecated) do - # Delete deprecated generation if it does exist - _ = Backend.delete(backend, meta_tab, deprecated) - end - - # Flush older generation to release space so it can be marked for deletion - Process.send_after(self(), :flush_older_gen, gc_flush_delay) - - # Keep alive older generation reference into the metadata - Metadata.put(meta_tab, :deprecated, older) - end - - defp start_timer(time, ref \\ nil, event \\ :heartbeat) - - defp start_timer(nil, _, _), do: nil - - defp start_timer(time, ref, event) do - _ = if ref, do: Process.cancel_timer(ref) - Process.send_after(self(), event, time) - end - - defp maybe_reset_timer(_, %__MODULE__{gc_interval: nil} = state) do - state.gc_heartbeat_ref - end - - defp maybe_reset_timer(false, state) do - state.gc_heartbeat_ref - end - - defp maybe_reset_timer(true, %__MODULE__{} = state) do - start_timer(state.gc_interval, state.gc_heartbeat_ref) - end - - defp reset_cleanup_timer(size, max_size, min_timeout, max_timeout, cleanup_ref) do - size - |> linear_inverse_backoff(max_size, min_timeout, max_timeout) - |> start_timer(cleanup_ref, :cleanup) - end - - defp size_info(backend, meta_tab) do - meta_tab - |> list() - |> Enum.reduce(0, &(backend.info(&1, :size) + &2)) - end - - defp memory_info(backend, meta_tab) do - meta_tab - |> list() - |> Enum.reduce(0, fn gen, acc -> - gen - |> backend.info(:memory) - |> Kernel.*(:erlang.system_info(:wordsize)) - |> Kernel.+(acc) - end) - end - - defp linear_inverse_backoff(size, _max_size, _min_timeout, max_timeout) when size <= 0 do - max_timeout - end - - defp linear_inverse_backoff(size, max_size, min_timeout, _max_timeout) when size >= max_size do - min_timeout - end - - defp linear_inverse_backoff(size, max_size, min_timeout, max_timeout) do - round((min_timeout - max_timeout) / max_size * size + max_timeout) - end -end diff --git a/lib/nebulex/adapters/local/metadata.ex b/lib/nebulex/adapters/local/metadata.ex deleted file mode 100644 index be232bb7..00000000 --- a/lib/nebulex/adapters/local/metadata.ex +++ /dev/null @@ -1,28 +0,0 @@ -defmodule Nebulex.Adapters.Local.Metadata do - @moduledoc false - - @type tab :: :ets.tid() | atom - - @spec init :: tab - def init do - :ets.new(__MODULE__, [:public, read_concurrency: true]) - end - - @spec get(tab, term, term) :: term - def get(tab, key, default \\ nil) do - :ets.lookup_element(tab, key, 2) - rescue - ArgumentError -> default - end - - @spec fetch!(tab, term) :: term - def fetch!(tab, key) do - :ets.lookup_element(tab, key, 2) - end - - @spec put(tab, term, term) :: :ok - def put(tab, key, value) do - true = :ets.insert(tab, {key, value}) - :ok - end -end diff --git a/lib/nebulex/adapters/multilevel.ex b/lib/nebulex/adapters/multilevel.ex deleted file mode 100644 index e799e377..00000000 --- a/lib/nebulex/adapters/multilevel.ex +++ /dev/null @@ -1,632 +0,0 @@ -defmodule Nebulex.Adapters.Multilevel do - @moduledoc ~S""" - Adapter module for Multi-level Cache. - - This is just a simple layer on top of local or distributed cache - implementations that enables to have a cache hierarchy by levels. - Multi-level caches generally operate by checking the fastest, - level 1 (L1) cache first; if it hits, the adapter proceeds at - high speed. If that first cache misses, the next fastest cache - (level 2, L2) is checked, and so on, before accessing external - memory (that can be handled by a `cacheable` decorator). - - For write functions, the "Write Through" policy is applied by default; - this policy ensures that the data is stored safely as it is written - throughout the hierarchy. However, it is possible to force the write - operation in a specific level (although it is not recommended) via - `level` option, where the value is a positive integer greater than 0. - - We can define a multi-level cache as follows: - - defmodule MyApp.Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - end - - Where the configuration for the cache and its levels must be in your - application environment, usually defined in your `config/config.exs`: - - config :my_app, MyApp.Multilevel, - model: :inclusive, - levels: [ - { - MyApp.Multilevel.L1, - gc_interval: :timer.hours(12), - backend: :shards - }, - { - MyApp.Multilevel.L2, - primary: [ - gc_interval: :timer.hours(12), - backend: :shards - ] - } - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.Multilevel, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:levels` - This option is to define the levels, a list of tuples - `{cache_level :: Nebulex.Cache.t(), opts :: Keyword.t()}`, where - the first element is the module that defines the cache for that - level, and the second one is the options that will be passed to - that level in the `start/link/1` (which depends on the adapter - this level is using). The order in which the levels are defined - is the same the multi-level cache will use. For example, the first - cache in the list will be the L1 cache (level 1) and so on; - the Nth element will be the LN cache. This option is mandatory, - if it is not set or empty, an exception will be raised. - - * `:model` - Specifies the cache model: `:inclusive` or `:exclusive`; - defaults to `:inclusive`. In an inclusive cache, the same data can be - present in all caches/levels. In an exclusive cache, data can be present - in only one cache/level and a key cannot be found in the rest of caches - at the same time. This option applies to the `get` callabck only; if the - cache `:model` is `:inclusive`, when the key is found in a level N, - that entry is duplicated backwards (to all previous levels: 1..N-1). - However, when the mode is set to `:inclusive`, the `get_all` operation - is translated into multiple `get` calls underneath (which may be a - significant performance penalty) since is required to replicate the - entries properly with their current TTLs. It is possible to skip the - replication when calling `get_all` using the option `:replicate`. - - * `:replicate` - This option applies only to the `get_all` callback. - Determines whether the entries should be replicated to the backward - levels or not. Defaults to `true`. - - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:level` - It may be an integer greater than 0 that specifies the cache - level where the operation will take place. By default, the evaluation - is performed throughout the whole cache hierarchy (all levels). - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the multi-level adapter is a layer/wrapper on top of other existing - adapters, each cache level may Telemetry emit events independently. - For example, for the cache defined before `MyApp.Multilevel`, the next - events will be emitted for the main multi-level cache: - - * `[:my_app, :multilevel, :command, :start]` - * `[:my_app, :multilevel, :command, :stop]` - * `[:my_app, :multilevel, :command, :exception]` - - For the L1 (configured with the local adapter): - - * `[:my_app, :multilevel, :l1, :command, :start]` - * `[:my_app, :multilevel, :l1, :command, :stop]` - * `[:my_app, :multilevel, :l1, :command, :exception]` - - For the L2 (configured with the partitioned adapter): - - * `[:my_app, :multilevel, :l2, :command, :start]` - * `[:my_app, :multilevel, :l2, :primary, :command, :start]` - * `[:my_app, :multilevel, :l2, :command, :stop]` - * `[:my_app, :multilevel, :l2, :primary, :command, :stop]` - * `[:my_app, :multilevel, :l2, :command, :exception]` - * `[:my_app, :multilevel, :l2, :primary, :command, :exception]` - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Stats - - Since the multi-level adapter works as a wrapper for the configured cache - levels, the support for stats depends on the underlying levels. Also, the - measurements are consolidated per level, they are not aggregated. For example, - if we enable the stats for the multi-level cache defined previously and run: - - MyApp.Multilevel.stats() - - The returned stats will look like: - - %Nebulex.Stats{ - measurements: %{ - l1: %{evictions: 0, expirations: 0, hits: 0, misses: 0, writes: 0}, - l2: %{evictions: 0, expirations: 0, hits: 0, misses: 0, writes: 0} - }, - metadata: %{ - l1: %{ - cache: NMyApp.Multilevel.L1, - started_at: ~U[2021-01-10 13:06:04.075084Z] - }, - l2: %{ - cache: MyApp.Multilevel.L2.Primary, - started_at: ~U[2021-01-10 13:06:04.089888Z] - }, - cache: MyApp.Multilevel, - started_at: ~U[2021-01-10 13:06:04.066750Z] - } - } - - **IMPORTANT:** Those cache levels with stats disabled won't be included - into the returned stats (they are skipped). If a cache level is using - an adapter that does not support stats, you may get unexpected errors. - Therefore, and as overall recommendation, check out the documentation - for adapters used by the underlying cache levels and ensure they - implement the `Nebulex.Adapter.Stats` behaviour. - - ### Stats with Telemetry - - In case you are using Telemetry metrics, you can define the metrics per - level, for example: - - last_value("nebulex.cache.stats.l1.hits", - event_name: "nebulex.cache.stats", - measurement: &get_in(&1, [:l1, :hits]), - tags: [:cache] - ) - last_value("nebulex.cache.stats.l1.misses", - event_name: "nebulex.cache.stats", - measurement: &get_in(&1, [:l1, :misses]), - tags: [:cache] - ) - - > See the section **"Instrumenting Multi-level caches"** in the - [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information. - - ## Extended API - - This adapter provides one additional convenience function for retrieving - the cache model for the given cache `name`: - - MyCache.model() - MyCache.model(:cache_name) - - ## Caveats of multi-level adapter - - Because this adapter reuses other existing/configured adapters, it inherits - all their limitations too. Therefore, it is highly recommended to check the - documentation of the adapters to use. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - - # Multi-level Cache Models - @models [:inclusive, :exclusive] - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function to get the cache model. - """ - def model(name \\ __MODULE__) do - with_meta(name, fn _adapter, %{model: model} -> - model - end) - end - end - end - - @impl true - def init(opts) do - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = get_boolean_option(opts, :stats) - - # Get cache levels - levels = - get_option( - opts, - :levels, - "a list with at least one level definition", - &(Keyword.keyword?(&1) && length(&1) > 0) - ) - - # Get multilevel-cache model - model = get_option(opts, :model, ":inclusive or :exclusive", &(&1 in @models), :inclusive) - - # Build multi-level specs - {children, meta_list, _} = children(levels, telemetry_prefix, telemetry, stats) - - # Build adapter spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :one_for_one, - children: children - ) - - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - levels: meta_list, - model: model, - stats: stats, - started_at: DateTime.utc_now() - } - - {:ok, child_spec, adapter_meta} - end - - # sobelow_skip ["DOS.BinToAtom"] - defp children(levels, telemetry_prefix, telemetry, stats) do - levels - |> Enum.reverse() - |> Enum.reduce({[], [], length(levels)}, fn {l_cache, l_opts}, {child_acc, meta_acc, n} -> - l_opts = - Keyword.merge( - [ - telemetry_prefix: telemetry_prefix ++ [:"l#{n}"], - telemetry: telemetry, - stats: stats - ], - l_opts - ) - - meta = %{cache: l_cache, name: l_opts[:name]} - - {[{l_cache, l_opts} | child_acc], [meta | meta_acc], n - 1} - end) - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({nil, []}, fn level, {default, prev} -> - value = with_dynamic_cache(level, :get, [key, opts]) - - if is_nil(value) do - {:cont, {default, [level | prev]}} - else - {:halt, {value, [level | prev]}} - end - end) - |> maybe_replicate(key, adapter_meta.model) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - {replicate?, opts} = Keyword.pop(opts, :replicate, true) - - do_get_all(adapter_meta, keys, replicate?, opts) - end - - defp do_get_all(%{model: :inclusive} = adapter_meta, keys, true, opts) do - Enum.reduce(keys, %{}, fn key, acc -> - if obj = get(adapter_meta, key, opts), - do: Map.put(acc, key, obj), - else: acc - end) - end - - defp do_get_all(%{levels: levels}, keys, _replicate?, opts) do - opts - |> levels(levels) - |> Enum.reduce_while({keys, %{}}, fn level, {keys_acc, map_acc} -> - map = with_dynamic_cache(level, :get_all, [keys_acc, opts]) - map_acc = Map.merge(map_acc, map) - - case keys_acc -- Map.keys(map) do - [] -> {:halt, {[], map_acc}} - keys_acc -> {:cont, {keys_acc, map_acc}} - end - end) - |> elem(1) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case on_write do - :put -> - :ok = eval(adapter_meta, :put, [key, value, opts], opts) - true - - :put_new -> - eval(adapter_meta, :put_new, [key, value, opts], opts) - - :replace -> - eval(adapter_meta, :replace, [key, value, opts], opts) - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - action = if on_write == :put_new, do: :put_new_all, else: :put_all - - reducer = fn level, {_, level_acc} -> - case with_dynamic_cache(level, action, [entries, opts]) do - :ok -> - {:cont, {true, [level | level_acc]}} - - true -> - {:cont, {true, [level | level_acc]}} - - false -> - _ = delete_from_levels(level_acc, entries) - {:halt, {on_write == :put, level_acc}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({true, []}, reducer) - |> elem(0) - end - - @impl true - defspan delete(adapter_meta, key, opts) do - eval(adapter_meta, :delete, [key, opts], Keyword.put(opts, :reverse, true)) - end - - @impl true - defspan take(adapter_meta, key, opts) do - opts - |> levels(adapter_meta.levels) - |> do_take(nil, key, opts) - end - - defp do_take([], result, _key, _opts), do: result - - defp do_take([l_meta | rest], nil, key, opts) do - result = with_dynamic_cache(l_meta, :take, [key, opts]) - do_take(rest, result, key, opts) - end - - defp do_take(levels, result, key, _opts) do - _ = eval(levels, :delete, [key, []], reverse: true) - result - end - - @impl true - defspan has_key?(adapter_meta, key) do - eval_while(adapter_meta, :has_key?, [key], false) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - eval(adapter_meta, :incr, [key, amount, opts], opts) - end - - @impl true - defspan ttl(adapter_meta, key) do - eval_while(adapter_meta, :ttl, [key], nil) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - Enum.reduce(adapter_meta.levels, false, fn l_meta, acc -> - with_dynamic_cache(l_meta, :expire, [key, ttl]) or acc - end) - end - - @impl true - defspan touch(adapter_meta, key) do - Enum.reduce(adapter_meta.levels, false, fn l_meta, acc -> - with_dynamic_cache(l_meta, :touch, [key]) or acc - end) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - {levels, reducer, acc_in} = - case operation do - :all -> {adapter_meta.levels, &(&1 ++ &2), []} - :delete_all -> {Enum.reverse(adapter_meta.levels), &(&1 + &2), 0} - _ -> {adapter_meta.levels, &(&1 + &2), 0} - end - - Enum.reduce(levels, acc_in, fn level, acc -> - level - |> with_dynamic_cache(operation, [query, opts]) - |> reducer.(acc) - end) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - Stream.resource( - fn -> - adapter_meta.levels - end, - fn - [] -> - {:halt, []} - - [level | levels] -> - elements = - level - |> with_dynamic_cache(:stream, [query, opts]) - |> Enum.to_list() - - {elements, levels} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - # Perhaps one of the levels is a distributed adapter, - # then ensure the lock on the right cluster nodes. - nodes = - adapter_meta.levels - |> Enum.reduce([node()], fn %{name: name, cache: cache}, acc -> - if cache.__adapter__ in [Nebulex.Adapters.Partitioned, Nebulex.Adapters.Replicated] do - Cluster.get_nodes(name || cache) ++ acc - else - acc - end - end) - |> Enum.uniq() - - super(adapter_meta, Keyword.put(opts, :nodes, nodes), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - if adapter_meta.stats do - init_acc = %Nebulex.Stats{ - metadata: %{ - cache: adapter_meta.name || adapter_meta.cache, - started_at: adapter_meta.started_at - } - } - - adapter_meta.levels - |> Enum.with_index(1) - |> Enum.reduce(init_acc, &update_stats/2) - end - end - - # We can safely disable this warning since the atom created dynamically is - # always re-used; the number of levels is limited and known before hand. - # sobelow_skip ["DOS.BinToAtom"] - defp update_stats({meta, idx}, stats_acc) do - if stats = with_dynamic_cache(meta, :stats, []) do - level_idx = :"l#{idx}" - measurements = Map.put(stats_acc.measurements, level_idx, stats.measurements) - metadata = Map.put(stats_acc.metadata, level_idx, stats.metadata) - %{stats_acc | measurements: measurements, metadata: metadata} - else - stats_acc - end - end - - ## Helpers - - defp with_dynamic_cache(%{cache: cache, name: nil}, action, args) do - apply(cache, action, args) - end - - defp with_dynamic_cache(%{cache: cache, name: name}, action, args) do - cache.with_dynamic_cache(name, fn -> - apply(cache, action, args) - end) - end - - defp eval(%{levels: levels}, fun, args, opts) do - eval(levels, fun, args, opts) - end - - defp eval(levels, fun, args, opts) when is_list(levels) do - opts - |> levels(levels) - |> eval(fun, args) - end - - defp eval([level_meta | next], fun, args) do - Enum.reduce(next, with_dynamic_cache(level_meta, fun, args), fn l_meta, acc -> - ^acc = with_dynamic_cache(l_meta, fun, args) - end) - end - - defp levels(opts, levels) do - levels = - case Keyword.get(opts, :level) do - nil -> levels - level -> [Enum.at(levels, level - 1)] - end - - if Keyword.get(opts, :reverse) do - Enum.reverse(levels) - else - levels - end - end - - defp eval_while(%{levels: levels}, fun, args, init) do - Enum.reduce_while(levels, init, fn level_meta, acc -> - if return = with_dynamic_cache(level_meta, fun, args), - do: {:halt, return}, - else: {:cont, acc} - end) - end - - defp delete_from_levels(levels, entries) do - for level_meta <- levels, {key, _} <- entries do - with_dynamic_cache(level_meta, :delete, [key, []]) - end - end - - defp maybe_replicate({nil, _}, _, _) do - nil - end - - defp maybe_replicate({value, [level_meta | [_ | _] = levels]}, key, :inclusive) do - ttl = with_dynamic_cache(level_meta, :ttl, [key]) || :infinity - - :ok = - Enum.each(levels, fn l_meta -> - _ = with_dynamic_cache(l_meta, :put, [key, value, [ttl: ttl]]) - end) - - value - end - - defp maybe_replicate({value, _levels}, _key, _model) do - value - end -end diff --git a/lib/nebulex/adapters/nil.ex b/lib/nebulex/adapters/nil.ex index 1203f47a..f078e564 100644 --- a/lib/nebulex/adapters/nil.ex +++ b/lib/nebulex/adapters/nil.ex @@ -1,23 +1,23 @@ defmodule Nebulex.Adapters.Nil do @moduledoc """ - The **Nil adapter** is a special cache adapter that disables the cache; - it loses all the items saved on it and it returns `nil` for all the read - and `true` for all save operations. This adapter is mostly useful for tests. + The Nil adapter is a special cache adapter that turns off the cache. It loses + all the items saved on it and returns nil for all read operations and true for + all save operations. This adapter is mostly useful for tests. ## Example Suppose you have an application using Ecto for database access and Nebulex for caching. Then, you have defined a cache and a repo within it. Since you - are using a database, there might be some cases you may want to disable the - cache to avoid issues when running the test, for example, in some test cases, - when accessing the database you expect no data at all, but you could retrieve - the data from cache anyway because maybe it was cached in a previous test. - Therefore, you have to delete all entries from the cache before to run each - test to make sure the cache is always empty. This is where the Nil adapter - comes in, instead of adding code to flush the cache before each test, you - could define a test cache using the Nil adapter for the tests. - - One one hand, you have defined the cache in your application within + are using a database, there might be some cases where you may want to turn + off the cache to avoid issues when running the test. For example, in some + test cases, when accessing the database, you expect no data, but you can + still get unexpected data since the cache is not flushed. Therefore, you + must delete all entries from the cache before running each test to ensure + the cache is always empty. Here is where the Nil adapter comes in, instead + of adding code to flush the cache before each test, you could define a test + cache using the Nil adapter for the tests. + + On one hand, you have defined the cache in your application within `lib/my_app/cache.ex`: defmodule MyApp.Cache do @@ -26,7 +26,7 @@ defmodule Nebulex.Adapters.Nil do adapter: Nebulex.Adapters.Local end - And on the other hand, in the tests you have defined the test cache within + On the other hand, in the tests, you have defined the test cache within `test/support/test_cache.ex`: defmodule MyApp.TestCache do @@ -35,18 +35,17 @@ defmodule Nebulex.Adapters.Nil do adapter: Nebulex.Adapters.Nil end - Now, we have to tell the app what cache to use depending on the environment, - for tests we want `MyApp.TestCache`, otherwise it is always `MyApp.Cache`. - We can do this very easy by introducing a new config parameter to decide - what cache module to use. For tests you can define the config - `config/test.exs`: + You must tell the app what cache to use depending on the environment. For + tests, you configure `MyApp.TestCache`; otherwise, it is always `MyApp.Cache`. + You can do this by introducing a new config parameter to decide which cache + module to use. For tests, you can define the config `config/test.exs`: config :my_app, nebulex_cache: MyApp.TestCache, ... The final piece is to read the config parameter and start the cache properly. - Within `lib/my_app/application.ex` you could have: + Within `lib/my_app/application.ex`, you could have: def start(_type, _args) do children = [ @@ -55,75 +54,89 @@ defmodule Nebulex.Adapters.Nil do ... - As you can see, by default `MyApp.Cache` is always used, unless the - `:nebulex_cache` option points to a different module, which will be - when tests are executed (`:test` env). + As you may notice, `MyApp.Cache` is used by default unless the + `:nebulex_cache` option points to a different module, which will + be when tests are executed (`test` env). """ # Provide Cache Implementation @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable @behaviour Nebulex.Adapter.Persistence - @behaviour Nebulex.Adapter.Stats # Inherit default transaction implementation use Nebulex.Adapter.Transaction + # Inherit default info implementation + use Nebulex.Adapters.Common.Info + + import Nebulex.Utils, only: [wrap_error: 2] + + alias Nebulex.Adapters.Common.Info.Stats + ## Nebulex.Adapter @impl true defmacro __before_compile__(_env), do: :ok @impl true - def init(_opts) do - child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: {Agent, 1}) - {:ok, child_spec, %{}} - end + def init(opts) do + telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - ## Nebulex.Adapter.Entry + child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: Agent) - @impl true - def get(_, _, _), do: nil + {:ok, child_spec, %{stats_counter: Stats.init(telemetry_prefix)}} + end + + ## Nebulex.Adapter.KV @impl true - def get_all(_, _, _), do: %{} + def fetch(_adapter_meta, key, _) do + wrap_error Nebulex.KeyError, key: key + end @impl true - def put(_, _, _, _, _, _), do: true + def put(_, _, _, _, _, _), do: {:ok, true} @impl true - def put_all(_, _, _, _, _), do: true + def put_all(_, _, _, _, _), do: {:ok, true} @impl true def delete(_, _, _), do: :ok @impl true - def take(_, _, _), do: nil + def take(_adapter_meta, key, _) do + wrap_error Nebulex.KeyError, key: key + end @impl true - def has_key?(_, _), do: false + def has_key?(_, _, _), do: {:ok, false} @impl true - def ttl(_, _), do: nil + def ttl(_adapter_meta, key, _opts) do + wrap_error Nebulex.KeyError, key: key + end @impl true - def expire(_, _, _), do: true + def expire(_, _, _, _), do: {:ok, false} @impl true - def touch(_, _), do: true + def touch(_, _, _), do: {:ok, false} @impl true - def update_counter(_, _, amount, _, default, _), do: default + amount + def update_counter(_, _, amount, _, default, _) do + {:ok, default + amount} + end ## Nebulex.Adapter.Queryable @impl true - def execute(_, :all, _, _), do: [] - def execute(_, _, _, _), do: 0 + def execute(_, %{op: :get_all}, _), do: {:ok, []} + def execute(_, _, _), do: {:ok, 0} @impl true - def stream(_, _, _), do: Stream.each([], & &1) + def stream(_, _, _), do: {:ok, Stream.each([], & &1)} ## Nebulex.Adapter.Persistence @@ -132,9 +145,4 @@ defmodule Nebulex.Adapters.Nil do @impl true def load(_, _, _), do: :ok - - ## Nebulex.Adapter.Stats - - @impl true - def stats(_), do: %Nebulex.Stats{} end diff --git a/lib/nebulex/adapters/partitioned.ex b/lib/nebulex/adapters/partitioned.ex deleted file mode 100644 index b8b67c15..00000000 --- a/lib/nebulex/adapters/partitioned.ex +++ /dev/null @@ -1,865 +0,0 @@ -defmodule Nebulex.Adapters.Partitioned do - @moduledoc ~S""" - Built-in adapter for partitioned cache topology. - - ## Overall features - - * Partitioned cache topology (Sharding Distribution Model). - * Configurable primary storage adapter. - * Configurable Keyslot to distributed the keys across the cluster members. - * Support for transactions via Erlang global name registration facility. - * Stats support rely on the primary storage adapter. - - ## Partitioned Cache Topology - - There are several key points to consider about a partitioned cache: - - * _**Partitioned**_: The data in a distributed cache is spread out over - all the servers in such a way that no two servers are responsible for - the same piece of cached data. This means that the size of the cache - and the processing power associated with the management of the cache - can grow linearly with the size of the cluster. Also, it means that - operations against data in the cache can be accomplished with a - "single hop," in other words, involving at most one other server. - - * _**Load-Balanced**_: Since the data is spread out evenly over the - servers, the responsibility for managing the data is automatically - load-balanced across the cluster. - - * _**Ownership**_: Exactly one node in the cluster is responsible for each - piece of data in the cache. - - * _**Point-To-Point**_: The communication for the partitioned cache is all - point-to-point, enabling linear scalability. - - * _**Location Transparency**_: Although the data is spread out across - cluster nodes, the exact same API is used to access the data, and the - same behavior is provided by each of the API methods. This is called - location transparency, which means that the developer does not have to - code based on the topology of the cache, since the API and its behavior - will be the same with a local cache, a replicated cache, or a distributed - cache. - - * _**Failover**_: Failover of a distributed cache involves promoting backup - data to be primary storage. When a cluster node fails, all remaining - cluster nodes determine what data each holds in backup that the failed - cluster node had primary responsible for when it died. Those data becomes - the responsibility of whatever cluster node was the backup for the data. - However, this adapter does not provide fault-tolerance implementation, - each piece of data is kept in a single node/machine (via sharding), then, - if a node fails, the data kept by this node won't be available for the - rest of the cluster members. - - > Based on **"Distributed Caching Essential Lessons"** by **Cameron Purdy** - and [Coherence Partitioned Cache Service][oracle-pcs]. - - [oracle-pcs]: https://docs.oracle.com/cd/E13924_01/coh.340/e13819/partitionedcacheservice.htm - - ## Additional implementation notes - - `:pg2` or `:pg` (>= OTP 23) is used under-the-hood by the adapter to manage - the cluster nodes. When the partitioned cache is started in a node, it creates - a group and joins it (the cache supervisor PID is joined to the group). Then, - when a function is invoked, the adapter picks a node from the group members, - and then the function is executed on that specific node. In the same way, - when a partitioned cache supervisor dies (the cache is stopped or killed for - some reason), the PID of that process is automatically removed from the PG - group; this is why it's recommended to use consistent hashing for distributing - the keys across the cluster nodes. - - > **NOTE:** `pg2` will be replaced by `pg` in future, since the `pg2` module - is deprecated as of OTP 23 and scheduled for removal in OTP 24. - - This adapter depends on a local cache adapter (primary storage), it adds - a thin layer on top of it in order to distribute requests across a group - of nodes, where is supposed the local cache is running already. However, - you don't need to define any additional cache module for the primary - storage, instead, the adapter initializes it automatically (it adds the - primary storage as part of the supervision tree) based on the given - options within the `primary_storage_adapter:` argument. - - ## Usage - - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example: - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned - end - - Optionally, you can configure the desired primary storage adapter with the - option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`. - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local - end - - Also, you can provide a custom keyslot function: - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local - - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end - - Where the configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.PartitionedCache, - keyslot: MyApp.PartitionedCache, - primary: [ - gc_interval: 3_600_000, - backend: :shards - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.PartitionedCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:primary` - The options that will be passed to the adapter associated - with the local primary storage. These options will depend on the local - adapter to use. - - * `:keyslot` - Defines the module implementing `Nebulex.Adapter.Keyslot` - behaviour. - - * `:task_supervisor_opts` - Start-time options passed to - `Task.Supervisor.start_link/1` when the adapter is initialized. - - * `:join_timeout` - Interval time in milliseconds for joining the - running partitioned cache to the cluster. This is to ensure it is - always joined. Defaults to `:timer.seconds(180)`. - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:timeout` - The time-out value in milliseconds for the command that - will be executed. If the timeout is exceeded, then the current process - will exit. For executing a command on remote nodes, this adapter uses - `Task.await/2` internally for receiving the result, so this option tells - how much time the adapter should wait for it. If the timeout is exceeded, - the task is shut down but the current process doesn't exit, only the - result associated with that task is skipped in the reduce phase. - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the partitioned adapter depends on the configured primary storage - adapter (local cache adapter), this one may also emit Telemetry events. - Therefore, there will be events emitted by the partitioned adapter as well - as the primary storage adapter. For example, for the cache defined before - `MyApp.PartitionedCache`, these would be the emitted events: - - * `[:my_app, :partitioned_cache, :command, :start]` - * `[:my_app, :partitioned_cache, :primary, :command, :start]` - * `[:my_app, :partitioned_cache, :command, :stop]` - * `[:my_app, :partitioned_cache, :primary, :command, :stop]` - * `[:my_app, :partitioned_cache, :command, :exception]` - * `[:my_app, :partitioned_cache, :primary, :command, :exception]` - - As you may notice, the telemetry prefix by default for the partitioned cache - is `[:my_app, :partitioned_cache]`, and the prefix for its primary storage - `[:my_app, :partitioned_cache, :primary]`. - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Adapter-specific telemetry events - - This adapter exposes following Telemetry events: - - * `telemetry_prefix ++ [:bootstrap, :started]` - Dispatched by the adapter - when the bootstrap process is started. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node] - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :stopped]` - Dispatched by the adapter - when the bootstrap process is stopped. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node], - reason: term - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :exit]` - Dispatched by the adapter - when the bootstrap has received an exit signal. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node], - reason: term - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :joined]` - Dispatched by the adapter - when the bootstrap has joined the cache to the cluster. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node] - } - ``` - - ## Stats - - This adapter depends on the primary storage adapter for the stats support. - Therefore, it is important to ensure the underlying primary storage adapter - does support stats, otherwise, you may get unexpected errors. - - ## Extended API - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Retrieving the primary storage or local cache module: - - MyCache.__primary__() - - Retrieving the cluster nodes associated with the given cache `name`: - - MyCache.nodes() - - Get a cluster node based on the given `key`: - - MyCache.get_node("mykey") - - Joining the cache to the cluster: - - MyCache.join_cluster() - - Leaving the cluster (removes the cache from the cluster): - - MyCache.leave_cluster() - - ## Caveats of partitioned adapter - - For `c:Nebulex.Cache.get_and_update/3` and `c:Nebulex.Cache.update/4`, - they both have a parameter that is the anonymous function, and it is compiled - into the module where it is created, which means it necessarily doesn't exists - on remote nodes. To ensure they work as expected, you must provide functions - from modules existing in all nodes of the group. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - # Inherit default keyslot implementation - use Nebulex.Adapter.Keyslot - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.RPC - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(env) do - otp_app = Module.get_attribute(env.module, :otp_app) - opts = Module.get_attribute(env.module, :opts) - primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local) - - quote do - defmodule Primary do - @moduledoc """ - This is the cache for the primary storage. - """ - use Nebulex.Cache, - otp_app: unquote(otp_app), - adapter: unquote(primary) - end - - @doc """ - A convenience function for getting the primary storage cache. - """ - def __primary__, do: Primary - - @doc """ - A convenience function for getting the cluster nodes. - """ - def nodes do - Cluster.get_nodes(get_dynamic_cache()) - end - - @doc """ - A convenience function to get the node of the given `key`. - """ - def get_node(key) do - with_meta(get_dynamic_cache(), fn _adapter, %{name: name, keyslot: keyslot} -> - Cluster.get_node(name, key, keyslot) - end) - end - - @doc """ - A convenience function for joining the cache to the cluster. - """ - def join_cluster do - Cluster.join(get_dynamic_cache()) - end - - @doc """ - A convenience function for removing the cache from the cluster. - """ - def leave_cluster do - Cluster.leave(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = get_boolean_option(opts, :stats) - - # Primary cache options - primary_opts = - Keyword.merge( - [telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats], - Keyword.get(opts, :primary, []) - ) - - # Maybe put a name to primary storage - primary_opts = - if opts[:name], - do: [name: normalize_module_name([name, Primary])] ++ primary_opts, - else: primary_opts - - # Keyslot module for selecting nodes - keyslot = - opts - |> get_option(:keyslot, "an atom", &is_atom/1, __MODULE__) - |> assert_behaviour(Nebulex.Adapter.Keyslot, "keyslot") - - # Maybe task supervisor for distributed tasks - {task_sup_name, children} = task_sup_child_spec(name, opts) - - # Prepare metadata - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - primary_name: primary_opts[:name], - task_sup: task_sup_name, - keyslot: keyslot, - stats: stats - } - - # Prepare child_spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :rest_for_one, - children: [ - {cache.__primary__, primary_opts}, - {__MODULE__.Bootstrap, {Map.put(adapter_meta, :cache, cache), opts}} - | children - ] - ) - - {:ok, child_spec, adapter_meta} - end - - if Code.ensure_loaded?(:erpc) do - defp task_sup_child_spec(_name, _opts) do - {nil, []} - end - else - defp task_sup_child_spec(name, opts) do - # task supervisor to execute parallel and/or remote commands - task_sup_name = normalize_module_name([name, TaskSupervisor]) - task_sup_opts = Keyword.get(opts, :task_supervisor_opts, []) - - children = [ - {Task.Supervisor, [name: task_sup_name] ++ task_sup_opts} - ] - - {task_sup_name, children} - end - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - call(adapter_meta, key, :get, [key, opts], opts) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - map_reduce( - keys, - adapter_meta, - :get_all, - [opts], - Keyword.get(opts, :timeout), - { - %{}, - fn - {:ok, res}, _, acc when is_map(res) -> - Map.merge(acc, res) - - _, _, acc -> - acc - end - } - ) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case on_write do - :put -> - :ok = call(adapter_meta, key, :put, [key, value, opts], opts) - true - - :put_new -> - call(adapter_meta, key, :put_new, [key, value, opts], opts) - - :replace -> - call(adapter_meta, key, :replace, [key, value, opts], opts) - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - case on_write do - :put -> - do_put_all(:put_all, adapter_meta, entries, opts) - - :put_new -> - do_put_all(:put_new_all, adapter_meta, entries, opts) - end - end - - def do_put_all(action, adapter_meta, entries, opts) do - reducer = { - {true, []}, - fn - {:ok, :ok}, {_, {_, _, [_, _, [kv, _]]}}, {bool, acc} -> - {bool, Enum.reduce(kv, acc, &[elem(&1, 0) | &2])} - - {:ok, true}, {_, {_, _, [_, _, [kv, _]]}}, {bool, acc} -> - {bool, Enum.reduce(kv, acc, &[elem(&1, 0) | &2])} - - {:ok, false}, _, {_, acc} -> - {false, acc} - - {:error, _}, _, {_, acc} -> - {false, acc} - end - } - - entries - |> map_reduce( - adapter_meta, - action, - [opts], - Keyword.get(opts, :timeout), - reducer - ) - |> case do - {true, _} -> - true - - {false, keys} -> - :ok = Enum.each(keys, &delete(adapter_meta, &1, [])) - action == :put_all - end - end - - @impl true - defspan delete(adapter_meta, key, opts) do - call(adapter_meta, key, :delete, [key, opts], opts) - end - - @impl true - defspan take(adapter_meta, key, opts) do - call(adapter_meta, key, :take, [key, opts], opts) - end - - @impl true - defspan has_key?(adapter_meta, key) do - call(adapter_meta, key, :has_key?, [key]) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - call(adapter_meta, key, :incr, [key, amount, opts], opts) - end - - @impl true - defspan ttl(adapter_meta, key) do - call(adapter_meta, key, :ttl, [key]) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - call(adapter_meta, key, :expire, [key, ttl]) - end - - @impl true - defspan touch(adapter_meta, key) do - call(adapter_meta, key, :touch, [key]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - reducer = - case operation do - :all -> &List.flatten/1 - _ -> &Enum.sum/1 - end - - adapter_meta.task_sup - |> RPC.multi_call( - Cluster.get_nodes(adapter_meta.name), - __MODULE__, - :with_dynamic_cache, - [adapter_meta, operation, [query, opts]], - opts - ) - |> handle_rpc_multi_call(operation, reducer) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - Stream.resource( - fn -> - Cluster.get_nodes(adapter_meta.name) - end, - fn - [] -> - {:halt, []} - - [node | nodes] -> - elements = - rpc_call( - adapter_meta.task_sup, - node, - __MODULE__, - :eval_stream, - [adapter_meta, query, opts], - opts - ) - - {elements, nodes} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, Keyword.put(opts, :nodes, Cluster.get_nodes(adapter_meta.name)), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with_dynamic_cache(adapter_meta, :stats, []) - end - - ## Helpers - - @doc """ - Helper function to use dynamic cache for internal primary cache storage - when needed. - """ - def with_dynamic_cache(adapter_meta, action, args) - - def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do - apply(cache.__primary__, action, args) - end - - def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do - cache.__primary__.with_dynamic_cache(primary_name, fn -> - apply(cache.__primary__, action, args) - end) - end - - @doc """ - Helper to perform `stream/3` locally. - """ - def eval_stream(meta, query, opts) do - meta - |> with_dynamic_cache(:stream, [query, opts]) - |> Enum.to_list() - end - - ## Private Functions - - defp get_node(%{name: name, keyslot: keyslot}, key) do - Cluster.get_node(name, key, keyslot) - end - - defp call(adapter_meta, key, action, args, opts \\ []) do - adapter_meta - |> get_node(key) - |> rpc_call(adapter_meta, action, args, opts) - end - - defp rpc_call(node, %{task_sup: task_sup} = meta, fun, args, opts) do - rpc_call(task_sup, node, __MODULE__, :with_dynamic_cache, [meta, fun, args], opts) - end - - if Code.ensure_loaded?(:erpc) do - defp rpc_call(supervisor, node, mod, fun, args, opts) do - RPC.call(supervisor, node, mod, fun, args, opts[:timeout] || 5000) - end - else - defp rpc_call(supervisor, node, mod, fun, args, opts) do - case RPC.call(supervisor, node, mod, fun, args, opts[:timeout] || 5000) do - {:badrpc, remote_ex} -> - raise remote_ex - - response -> - response - end - end - end - - defp group_keys_by_node(enum, adapter_meta, :get_all) do - Enum.reduce(enum, %{}, fn - key, acc -> - node = get_node(adapter_meta, key) - Map.put(acc, node, [key | Map.get(acc, node, [])]) - end) - end - - @put_all_actions [:put_all, :put_new_all] - defp group_keys_by_node(enum, adapter_meta, put_all_action) - when put_all_action in @put_all_actions do - Enum.reduce(enum, %{}, fn - {key, _} = entry, acc -> - node = get_node(adapter_meta, key) - Map.put(acc, node, [entry | Map.get(acc, node, [])]) - end) - end - - defp map_reduce( - enum, - %{task_sup: task_sup} = meta, - action, - args, - timeout, - reducer - ) do - groups = - enum - |> group_keys_by_node(meta, action) - |> Enum.map(fn {node, group} -> - {node, {__MODULE__, :with_dynamic_cache, [meta, action, [group | args]]}} - end) - - RPC.multi_call(task_sup, groups, timeout: timeout, reducer: reducer) - end - - defp handle_rpc_multi_call({res, []}, _action, fun) do - fun.(res) - end - - defp handle_rpc_multi_call({responses, errors}, action, _) do - raise Nebulex.RPCMultiCallError, action: action, responses: responses, errors: errors - end -end - -defmodule Nebulex.Adapters.Partitioned.Bootstrap do - @moduledoc false - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.Telemetry - - # Default join timeout - @join_timeout :timer.seconds(180) - - # State - defstruct [:adapter_meta, :join_timeout] - - ## API - - @doc false - def start_link({%{name: name}, _} = state) do - GenServer.start_link( - __MODULE__, - state, - name: normalize_module_name([name, Bootstrap]) - ) - end - - ## GenServer Callbacks - - @impl true - def init({adapter_meta, opts}) do - # Trap exit signals to run cleanup job - _ = Process.flag(:trap_exit, true) - - # Bootstrap started - :ok = dispatch_telemetry_event(:started, adapter_meta) - - # Ensure joining the cluster when the cache supervision tree is started - :ok = Cluster.join(adapter_meta.name) - - # Bootstrap joined the cache to the cluster - :ok = dispatch_telemetry_event(:joined, adapter_meta) - - # Build initial state - state = build_state(adapter_meta, opts) - - # Start bootstrap process - {:ok, state, state.join_timeout} - end - - @impl true - def handle_info(message, state) - - def handle_info(:timeout, %__MODULE__{adapter_meta: adapter_meta} = state) do - # Ensure it is always joined to the cluster - :ok = Cluster.join(adapter_meta.name) - - # Bootstrap joined the cache to the cluster - :ok = dispatch_telemetry_event(:joined, adapter_meta) - - {:noreply, state, state.join_timeout} - end - - def handle_info({:EXIT, _from, reason}, %__MODULE__{adapter_meta: adapter_meta} = state) do - # Bootstrap received exit signal - :ok = dispatch_telemetry_event(:exit, adapter_meta, %{reason: reason}) - - {:stop, reason, state} - end - - @impl true - def terminate(reason, %__MODULE__{adapter_meta: adapter_meta}) do - # Ensure leaving the cluster when the cache stops - :ok = Cluster.leave(adapter_meta.name) - - # Bootstrap stopped or terminated - :ok = dispatch_telemetry_event(:stopped, adapter_meta, %{reason: reason}) - end - - ## Private Functions - - defp build_state(adapter_meta, opts) do - # Join timeout to ensure it is always joined to the cluster - join_timeout = - get_option( - opts, - :join_timeout, - "an integer > 0", - &(is_integer(&1) and &1 > 0), - @join_timeout - ) - - %__MODULE__{adapter_meta: adapter_meta, join_timeout: join_timeout} - end - - defp dispatch_telemetry_event(event, adapter_meta, meta \\ %{}) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:bootstrap, event], - %{system_time: System.system_time()}, - Map.merge(meta, %{ - adapter_meta: adapter_meta, - cluster_nodes: Cluster.get_nodes(adapter_meta.name) - }) - ) - end -end diff --git a/lib/nebulex/adapters/replicated.ex b/lib/nebulex/adapters/replicated.ex deleted file mode 100644 index 7502be76..00000000 --- a/lib/nebulex/adapters/replicated.ex +++ /dev/null @@ -1,833 +0,0 @@ -defmodule Nebulex.Adapters.Replicated do - @moduledoc ~S""" - Built-in adapter for replicated cache topology. - - ## Overall features - - * Replicated cache topology. - * Configurable primary storage adapter. - * Cache-level locking when deleting all entries or adding new nodes. - * Key-level (or entry-level) locking for key-based write-like operations. - * Support for transactions via Erlang global name registration facility. - * Stats support rely on the primary storage adapter. - - ## Replicated Cache Topology - - A replicated cache is a clustered, fault tolerant cache where data is fully - replicated to every member in the cluster. This cache offers the fastest read - performance with linear performance scalability for reads but poor scalability - for writes (as writes must be processed by every member in the cluster). - Because data is replicated to all servers, adding servers does not increase - aggregate cache capacity. - - There are several challenges to building a reliably replicated cache. The - first is how to get it to scale and perform well. Updates to the cache have - to be sent to all cluster nodes, and all cluster nodes have to end up with - the same data, even if multiple updates to the same piece of data occur at - the same time. Also, if a cluster node requests a lock, ideally it should - not have to get all cluster nodes to agree on the lock or at least do it in - a very efficient way (`:global` is used here), otherwise it will scale - extremely poorly; yet in the case of a cluster node failure, all of the data - and lock information must be kept safely. - - The best part of a replicated cache is its access speed. Since the data is - replicated to each cluster node, it is available for use without any waiting. - This is referred to as "zero latency access," and is perfect for situations - in which an application requires the highest possible speed in its data - access. - - However, there are some limitations: - - * _**Cost Per Update**_ - Updating a replicated cache requires pushing - the new version of the data to all other cluster members, which will - limit scalability if there is a high frequency of updates per member. - - * _**Cost Per Entry**_ - The data is replicated to every cluster member, - so Memory Heap space is used on each member, which will impact - performance for large caches. - - > Based on **"Distributed Caching Essential Lessons"** by **Cameron Purdy**. - - ## Usage - - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example: - - defmodule MyApp.ReplicatedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Replicated - end - - Optionally, you can configure the desired primary storage adapter with the - option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`. - - defmodule MyApp.ReplicatedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Nebulex.Adapters.Local - end - - The configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.ReplicatedCache, - primary: [ - gc_interval: 3_600_000, - backend: :shards - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.ReplicatedCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:primary` - The options that will be passed to the adapter associated - with the local primary storage. These options will depend on the local - adapter to use. - - * `:task_supervisor_opts` - Start-time options passed to - `Task.Supervisor.start_link/1` when the adapter is initialized. - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:timeout` - The time-out value in milliseconds for the command that - will be executed. If the timeout is exceeded, then the current process - will exit. For executing a command on remote nodes, this adapter uses - `Task.await/2` internally for receiving the result, so this option tells - how much time the adapter should wait for it. If the timeout is exceeded, - the task is shut down but the current process doesn't exit, only the - result associated with that task is skipped in the reduce phase. - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the replicated adapter depends on the configured primary storage - adapter (local cache adapter), this one may also emit Telemetry events. - Therefore, there will be events emitted by the replicated adapter as well - as the primary storage adapter. For example, for the cache defined before - `MyApp.ReplicatedCache`, these would be the emitted events: - - * `[:my_app, :replicated_cache, :command, :start]` - * `[:my_app, :replicated_cache, :primary, :command, :start]` - * `[:my_app, :replicated_cache, :command, :stop]` - * `[:my_app, :replicated_cache, :primary, :command, :stop]` - * `[:my_app, :replicated_cache, :command, :exception]` - * `[:my_app, :replicated_cache, :primary, :command, :exception]` - - As you may notice, the telemetry prefix by default for the replicated cache - is `[:my_app, :replicated_cache]`, and the prefix for its primary storage - `[:my_app, :replicated_cache, :primary]`. - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Stats - - This adapter depends on the primary storage adapter for the stats support. - Therefore, it is important to ensure the underlying primary storage adapter - does support stats, otherwise, you may get unexpected errors. - - ## Extended API - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Retrieving the primary storage or local cache module: - - MyCache.__primary__() - - Retrieving the cluster nodes associated with the given cache name: - - MyCache.nodes() - - Joining the cache to the cluster: - - MyCache.join_cluster() - - Leaving the cluster (removes the cache from the cluster): - - MyCache.leave_cluster() - - ## Adapter-specific telemetry events - - This adapter exposes following Telemetry events: - - * `telemetry_prefix ++ [:replication]` - Dispatched by the adapter - when a replication error occurs due to a write-like operation - under-the-hood. - - * Measurements: `%{rpc_errors: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - rpc_errors: [{node, error :: term}] - } - ``` - - * `telemetry_prefix ++ [:bootstrap]` - Dispatched by the adapter at start - time when there are errors while syncing up with the cluster nodes. - - * Measurements: - - ``` - %{ - failed_nodes: non_neg_integer, - remote_errors: non_neg_integer - } - ``` - - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - failed_nodes: [node], - remote_errors: [term] - } - ``` - - ## Caveats of replicated adapter - - As it is explained in the beginning, a replicated topology not only brings - with advantages (mostly for reads) but also with some limitations and - challenges. - - This adapter uses global locks (via `:global`) for all operation that modify - or alter the cache somehow to ensure as much consistency as possible across - all members of the cluster. These locks may be per key or for the entire cache - depending on the operation taking place. For that reason, it is very important - to be aware about those operation that can potentially lead to performance and - scalability issues, so that you can do a better usage of the replicated - adapter. The following is with the operations and aspects you should pay - attention to: - - * Starting and joining a new replicated node to the cluster is the most - expensive action, because all write-like operations across all members of - the cluster are blocked until the new node completes the synchronization - process, which involves copying cached data from any of the existing - cluster nodes into the new node, and this could be very expensive - depending on the number of caches entries. For that reason, adding new - nodes is considered an expensive operation that should happen only from - time to time. - - * Deleting all entries. When `c:Nebulex.Cache.delete_all/2` action is - executed, like in the previous case, all write-like operations in all - members of the cluster are blocked until the deletion action is completed - (this implies deleting all cached data from all cluster nodes). Therefore, - deleting all entries from cache is also considered an expensive operation - that should happen only from time to time. - - * Write-like operations based on a key only block operations related to - that key across all members of the cluster. This is not as critical as - the previous two cases but it is something to keep in mind anyway because - if there is a highly demanded key in terms of writes, that could be also - a potential bottleneck. - - Summing up, the replicated cache topology along with this adapter should - be used mainly when the the reads clearly dominate over the writes (e.g.: - Reads 80% and Writes 20% or less). Besides, operations like deleting all - entries from cache or adding new nodes must be executed only once in a while - to avoid performance issues, since they are very expensive. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - import Bitwise, only: [<<<: 2] - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.{RPC, Telemetry} - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(env) do - otp_app = Module.get_attribute(env.module, :otp_app) - opts = Module.get_attribute(env.module, :opts) - primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local) - - quote do - defmodule Primary do - @moduledoc """ - This is the cache for the primary storage. - """ - use Nebulex.Cache, - otp_app: unquote(otp_app), - adapter: unquote(primary) - end - - @doc """ - A convenience function for getting the primary storage cache. - """ - def __primary__, do: Primary - - @doc """ - A convenience function for getting the cluster nodes. - """ - def nodes do - Cluster.get_nodes(get_dynamic_cache()) - end - - @doc """ - A convenience function for joining the cache to the cluster. - """ - def join_cluster do - Cluster.join(get_dynamic_cache()) - end - - @doc """ - A convenience function for removing the cache from the cluster. - """ - def leave_cluster do - Cluster.leave(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = get_boolean_option(opts, :stats) - - # Primary cache options - primary_opts = - Keyword.merge( - [telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats], - Keyword.get(opts, :primary, []) - ) - - # Maybe put a name to primary storage - primary_opts = - if opts[:name], - do: [name: normalize_module_name([name, Primary])] ++ primary_opts, - else: primary_opts - - # Maybe task supervisor for distributed tasks - {task_sup_name, children} = sup_child_spec(name, opts) - - # Prepare metadata - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - primary_name: primary_opts[:name], - task_sup: task_sup_name, - stats: stats - } - - # Prepare child_spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :rest_for_one, - children: [ - {cache.__primary__, primary_opts}, - {__MODULE__.Bootstrap, Map.put(adapter_meta, :cache, cache)} - | children - ] - ) - - {:ok, child_spec, adapter_meta} - end - - if Code.ensure_loaded?(:erpc) do - defp sup_child_spec(_name, _opts) do - {nil, []} - end - else - defp sup_child_spec(name, opts) do - # Task supervisor to execute parallel and/or remote commands - task_sup_name = normalize_module_name([name, TaskSupervisor]) - task_sup_opts = Keyword.get(opts, :task_supervisor_opts, []) - - children = [ - {Task.Supervisor, [name: task_sup_name] ++ task_sup_opts} - ] - - {task_sup_name, children} - end - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - with_dynamic_cache(adapter_meta, :get, [key, opts]) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - with_dynamic_cache(adapter_meta, :get_all, [keys, opts]) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case with_transaction(adapter_meta, on_write, [key], [key, value, opts], opts) do - :ok -> true - bool -> bool - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - action = if on_write == :put_new, do: :put_new_all, else: :put_all - keys = for {k, _} <- entries, do: k - - with_transaction(adapter_meta, action, keys, [entries, opts], opts) || action == :put_all - end - - @impl true - defspan delete(adapter_meta, key, opts) do - with_transaction(adapter_meta, :delete, [key], [key, opts], opts) - end - - @impl true - defspan take(adapter_meta, key, opts) do - with_transaction(adapter_meta, :take, [key], [key, opts], opts) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - with_transaction(adapter_meta, :incr, [key], [key, amount, opts], opts) - end - - @impl true - defspan has_key?(adapter_meta, key) do - with_dynamic_cache(adapter_meta, :has_key?, [key]) - end - - @impl true - defspan ttl(adapter_meta, key) do - with_dynamic_cache(adapter_meta, :ttl, [key]) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - with_transaction(adapter_meta, :expire, [key], [key, ttl]) - end - - @impl true - defspan touch(adapter_meta, key) do - with_transaction(adapter_meta, :touch, [key], [key]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{name: name} = adapter_meta, :delete_all, query, opts) do - # It is blocked until ongoing write operations finish (if there is any). - # Similarly, while it is executed, all later write-like operations are - # blocked until it finishes. - :global.trans( - {name, self()}, - fn -> - multi_call(adapter_meta, :delete_all, [query, opts], opts) - end, - Cluster.get_nodes(name) - ) - end - - defp do_execute(adapter_meta, operation, query, opts) do - with_dynamic_cache(adapter_meta, operation, [query, opts]) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - with_dynamic_cache(adapter_meta, :stream, [query, opts]) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, Keyword.put(opts, :nodes, Cluster.get_nodes(adapter_meta.name)), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with_dynamic_cache(adapter_meta, :stats, []) - end - - ## Helpers - - @doc """ - Helper function to use dynamic cache for internal primary cache storage - when needed. - """ - def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do - apply(cache.__primary__, action, args) - end - - def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do - cache.__primary__.with_dynamic_cache(primary_name, fn -> - apply(cache.__primary__, action, args) - end) - end - - ## Private Functions - - defp with_transaction(adapter_meta, action, keys, args, opts \\ []) do - do_with_transaction(adapter_meta, action, keys, args, opts, 1) - end - - defp do_with_transaction(%{name: name} = adapter_meta, action, keys, args, opts, times) do - # This is a bit hacky because the `:global_locks` table managed by - # `:global` is being accessed directly breaking the encapsulation. - # So far, this has been the simplest and fastest way to validate if - # the global sync lock `:"$sync_lock"` is set, so we block write-like - # operations until it finishes. The other option would be trying to - # lock the same key `:"$sync_lock"`, and then when the lock is acquired, - # delete it before processing the write operation. But this means another - # global lock across the cluster every time there is a write. So for the - # time being, we just read the global table to validate it which is much - # faster; since it is a local read with the global ETS, there is no global - # locks across the cluster. - case :ets.lookup(:global_locks, :"$sync_lock") do - [_] -> - :ok = random_sleep(times) - - do_with_transaction(adapter_meta, action, keys, args, opts, times + 1) - - [] -> - nodes = Cluster.get_nodes(name) - - # Write-like operation must be wrapped within a transaction - # to ensure proper replication - transaction(adapter_meta, [keys: keys, nodes: nodes], fn -> - multi_call(adapter_meta, action, args, opts) - end) - end - end - - defp multi_call(%{name: name, task_sup: task_sup} = meta, action, args, opts) do - # Run the command locally first - local = with_dynamic_cache(meta, action, args) - - # Run the command on the remote nodes - {ok_nodes, error_nodes} = - RPC.multi_call( - task_sup, - Cluster.get_nodes(name) -- [node()], - __MODULE__, - :with_dynamic_cache, - [meta, action, args], - opts - ) - - # Process the responses adding the local one as source of truth - handle_rpc_multi_call({[local | ok_nodes], error_nodes}, meta, action) - end - - defp handle_rpc_multi_call({res, []}, _meta, _action), do: hd(res) - - defp handle_rpc_multi_call({res, {:sanitized, {[], rpc_errors}}}, meta, action) do - _ = dispatch_replication_error(meta, action, rpc_errors) - hd(res) - end - - defp handle_rpc_multi_call({responses, {:sanitized, {errors, rpc_errors}}}, meta, action) do - _ = dispatch_replication_error(meta, action, rpc_errors) - - raise Nebulex.RPCMultiCallError, action: action, responses: responses, errors: errors - end - - defp handle_rpc_multi_call({responses, errors}, meta, action) do - handle_rpc_multi_call({responses, {:sanitized, sanitize_errors(errors)}}, meta, action) - end - - defp sanitize_errors(errors) do - Enum.reduce(errors, {[], []}, fn - {{:error, {:exception, %Nebulex.RegistryLookupError{} = error, _}}, node}, {acc1, acc2} -> - # The cache was not found in the node, maybe it was stopped and - # "Process Groups" is not updated yet, then ignore the error - {acc1, [{node, error} | acc2]} - - {{:error, {:erpc, :noconnection}}, node}, {acc1, acc2} -> - # Remote node is down and maybe the "Process Groups" is not updated yet - {acc1, [{node, :noconnection} | acc2]} - - error, {acc1, acc2} -> - {[error | acc1], acc2} - end) - end - - defp dispatch_replication_error(adapter_meta, action, rpc_errors) do - if adapter_meta.telemetry or Map.get(adapter_meta, :in_span?, false) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:replication], - %{rpc_errors: length(rpc_errors)}, - %{adapter_meta: adapter_meta, function_name: action, rpc_errors: rpc_errors} - ) - end - end - - # coveralls-ignore-start - - defp random_sleep(times) do - _ = - if rem(times, 10) == 0 do - _ = :rand.seed(:exsplus) - end - - # First time 1/4 seconds, then doubling each time up to 8 seconds max - tmax = - if times > 5 do - 8000 - else - div((1 <<< times) * 1000, 8) - end - - tmax - |> :rand.uniform() - |> Process.sleep() - end - - # coveralls-ignore-stop -end - -defmodule Nebulex.Adapters.Replicated.Bootstrap do - @moduledoc false - use GenServer - - import Nebulex.Helpers - - alias Nebulex.{Adapter, Entry, Telemetry} - alias Nebulex.Adapters.Replicated - alias Nebulex.Cache.Cluster - - # Max retries in intervals of 1 ms (5 seconds). - # If in 5 seconds the cache has not started, stop the server. - @max_retries 5000 - - ## API - - @doc false - def start_link(%{name: name} = adapter_meta) do - GenServer.start_link( - __MODULE__, - adapter_meta, - name: normalize_module_name([name, Bootstrap]) - ) - end - - ## GenServer Callbacks - - @impl true - def init(adapter_meta) do - # Trap exit signals to run cleanup job - _ = Process.flag(:trap_exit, true) - - # Ensure joining the cluster only when the cache supervision tree is started - :ok = Cluster.join(adapter_meta.name) - - # Set a global lock to stop any write operation - # until the synchronization process finishes - :ok = lock(adapter_meta.name) - - # Init retries - state = Map.put(adapter_meta, :retries, 0) - - # Start bootstrap process - {:ok, state, 1} - end - - @impl true - def handle_info(:timeout, %{pid: pid} = state) when is_pid(pid) do - # Start synchronization process - :ok = sync_data(state) - - # Delete global lock set when the server started - :ok = unlock(state.name) - - # Bootstrap process finished - {:noreply, state} - end - - def handle_info(:timeout, %{name: name, retries: retries} = state) - when retries < @max_retries do - Adapter.with_meta(name, fn _adapter, adapter_meta -> - handle_info(:timeout, adapter_meta) - end) - rescue - ArgumentError -> {:noreply, %{state | retries: retries + 1}, 1} - end - - def handle_info(:timeout, state) do - # coveralls-ignore-start - {:stop, :normal, state} - # coveralls-ignore-stop - end - - @impl true - def terminate(_reason, state) do - # Ensure leaving the cluster when the cache stops - :ok = Cluster.leave(state.name) - end - - ## Helpers - - defp lock(name) do - true = :global.set_lock({:"$sync_lock", self()}, Cluster.get_nodes(name)) - - :ok - end - - defp unlock(name) do - true = :global.del_lock({:"$sync_lock", self()}, Cluster.get_nodes(name)) - - :ok - end - - # FIXME: this is because coveralls does not mark this as covered - # coveralls-ignore-start - - defp sync_data(%{name: name} = adapter_meta) do - cluster_nodes = Cluster.get_nodes(name) - - case cluster_nodes -- [node()] do - [] -> - :ok - - nodes -> - # Sync process: - # 1. Push a new generation on all cluster nodes to make the newer one - # empty. - # 2. Copy cached data from one of the cluster nodes; entries will be - # stremed from the older generation since the newer one should be - # empty. - # 3. Push a new generation on the current/new node to make it a mirror - # of the other cluster nodes. - # 4. Reset GC timer for ell cluster nodes (making the generation timer - # gap among cluster nodes as small as possible). - with :ok <- maybe_run_on_nodes(adapter_meta, nodes, :new_generation), - :ok <- copy_entries_from_nodes(adapter_meta, nodes), - :ok <- maybe_run_on_nodes(adapter_meta, [node()], :new_generation) do - maybe_run_on_nodes(adapter_meta, nodes, :reset_generation_timer) - end - end - end - - defp maybe_run_on_nodes(%{cache: cache} = adapter_meta, nodes, fun) do - if cache.__primary__.__adapter__() == Nebulex.Adapters.Local do - nodes - |> :rpc.multicall(Replicated, :with_dynamic_cache, [adapter_meta, fun, []]) - |> handle_multicall(adapter_meta) - else - :ok - end - end - - defp handle_multicall({responses, failed_nodes}, adapter_meta) do - {_ok, errors} = Enum.split_with(responses, &(&1 == :ok)) - - dispatch_bootstrap_error( - adapter_meta, - %{failed_nodes: length(failed_nodes), remote_errors: length(errors)}, - %{failed_nodes: failed_nodes, remote_errors: errors} - ) - end - - defp copy_entries_from_nodes(adapter_meta, nodes) do - nodes - |> Enum.reduce_while([], &stream_entries(adapter_meta, &1, &2)) - |> Enum.each( - &Replicated.with_dynamic_cache( - adapter_meta, - :put, - [&1.key, &1.value, [ttl: Entry.ttl(&1)]] - ) - ) - end - - defp stream_entries(meta, node, acc) do - stream_fun = fn -> - meta - |> Replicated.stream(nil, return: :entry, page_size: 100) - |> Stream.filter(&(not Entry.expired?(&1))) - |> Stream.map(& &1) - |> Enum.to_list() - end - - case :rpc.call(node, Kernel, :apply, [stream_fun, []]) do - {:badrpc, _} -> {:cont, acc} - entries -> {:halt, entries} - end - end - - defp dispatch_bootstrap_error(adapter_meta, measurements, metadata) do - if adapter_meta.telemetry or Map.get(adapter_meta, :in_span?, false) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:bootstrap], - measurements, - Map.put(metadata, :adapter_meta, adapter_meta) - ) - end - end - - # coveralls-ignore-stop -end diff --git a/lib/nebulex/adapters/supervisor.ex b/lib/nebulex/adapters/supervisor.ex deleted file mode 100644 index e1a3670d..00000000 --- a/lib/nebulex/adapters/supervisor.ex +++ /dev/null @@ -1,19 +0,0 @@ -defmodule Nebulex.Adapters.Supervisor do - # Utility module for building a supervisor to wrap up the adapter's children. - @moduledoc false - - @doc """ - Builds a supervisor spec with the given `options` for wrapping up the - adapter's children. - """ - @spec child_spec(Keyword.t()) :: Supervisor.child_spec() - def child_spec(options) do - {children, options} = Keyword.pop(options, :children, []) - - %{ - id: Keyword.fetch!(options, :name), - start: {Supervisor, :start_link, [children, options]}, - type: :supervisor - } - end -end diff --git a/lib/nebulex/cache.ex b/lib/nebulex/cache.ex index 9cfca475..19cf62d8 100644 --- a/lib/nebulex/cache.ex +++ b/lib/nebulex/cache.ex @@ -1,15 +1,19 @@ defmodule Nebulex.Cache do @moduledoc """ - Cache's main interface; defines the cache abstraction layer which is - highly inspired by [Ecto](https://github.com/elixir-ecto/ecto). + Cache abstraction layer inspired by + [Ecto](https://github.com/elixir-ecto/ecto). - A Cache maps to an underlying implementation, controlled by the - adapter. For example, Nebulex ships with a default adapter that - implements a local generational cache. + A cache maps to an underlying in-memory storage controlled by the adapter. + For example, Nebulex ships with an adapter that implements a local + generational cache. - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example, the Cache: + The cache expects the `:otp_app` and `:adapter` as options when used. + The `:otp_app` should point to an OTP application with the cache + configuration. See the compile time options for more information: + + #{Nebulex.Cache.Options.compile_options_docs()} + + For example, the cache: defmodule MyApp.Cache do use Nebulex.Cache, @@ -20,7 +24,6 @@ defmodule Nebulex.Cache do Could be configured with: config :my_app, MyApp.Cache, - backend: :shards, gc_interval: :timer.hours(12), max_size: 1_000_000, allocated_memory: 2_000_000_000, @@ -29,55 +32,58 @@ defmodule Nebulex.Cache do Most of the configuration that goes into the `config` is specific to the adapter. For this particular example, you can check - [`Nebulex.Adapters.Local`](https://hexdocs.pm/nebulex/Nebulex.Adapters.Local.html) - for more information. In spite of this, the following configuration values - are shared across all adapters: - - * `:name` - The name of the Cache supervisor process. - - * `:telemetry_prefix` - It is recommend for adapters to publish events - using the `Telemetry` library. By default, the telemetry prefix is based - on the module name, so if your module is called `MyApp.Cache`, the prefix - will be `[:my_app, :cache]`. See the "Telemetry events" section to see - what events recommended for the adapters to publish.. Note that if you - have multiple caches, you should keep the `:telemetry_prefix` consistent - for each of them and use the `:cache` and/or `:name` (in case of a named - or dynamic cache) properties in the event metadata for distinguishing - between caches. - - * `:telemetry` - An optional flag to tell the adapters whether Telemetry - events should be emitted or not. Defaults to `true`. - - * `:stats` - Boolean to define whether or not the cache will provide stats. - Defaults to `false`. Each adapter is responsible for providing stats by - implementing `Nebulex.Adapter.Stats` behaviour. See the "Stats" section - below. + [`Nebulex.Adapters.Local`][local_adapter] for more information. + Despite this, the following configuration values are shared + across all adapters: + + #{Nebulex.Cache.Options.start_link_options_docs()} + + [local_adapter]: https://hexdocs.pm/nebulex/Nebulex.Adapters.Local.html + + ## Shared options + + All of the cache functions outlined in this module accept the following + options: + + #{Nebulex.Cache.Options.runtime_shared_options_docs()} + + > #### Adapter-specific options {: .info} + > + > In addition to the shared options, each adapter can define its + > specific options. Therefore, Nebulex recommends reviewing the + > adapter's documentation. ## Telemetry events - Similar to Ecto or Phoenix, Nebulex also provides built-in Telemetry events - applied to all caches, and cache adapter-specific events. + There are two types of telemetry events. The emitted by Nebulex and the ones + that are adapter specific. The ones emitted by Nebulex are divided into two + categories: cache lifecycle events and cache command events. Let us take a + closer look at each of them. - ### Nebulex built-in events + ### Cache lifecycle events - The following events are emitted by all Nebulex caches: + All Nebulex caches emit the following events: - * `[:nebulex, :cache, :init]` - it is dispatched whenever a cache starts. - The measurement is a single `system_time` entry in native unit. The - metadata is the `:cache` and all initialization options under `:opts`. + * `[:nebulex, :cache, :init]` - It is dispatched whenever a cache starts. + The only measurement is the current system time in native units from + calling: `System.system_time()`. The `:opts` key in the metadata + contains all initialization options. - ### Adapter-specific events + * Measurement: `%{system_time: integer()}` + * Metadata: `%{cache: module(), name: atom(), opts: keyword()}` + + ### Cache command events - It is recommend the adapters to publish certain `Telemetry` events listed - below. Those events will use the `:telemetry_prefix` outlined above which - defaults to `[:my_app, :cache]`. + When the option `:telemetry` is set to `true` (the default), Nebulex will + emit Telemetry span events for each cache command, and those will use the + `:telemetry_prefix` outlined above, which defaults to `[:nebulex, :cache]`. - For instance, to receive all events published by a cache called `MyApp.Cache`, + For instance, to receive all events published for the cache `MyApp.Cache`, one could define a module: defmodule MyApp.Telemetry do def handle_event( - [:my_app, :cache, :command, event], + [:nebulex, :cache, :command, event], measurements, metadata, config @@ -95,45 +101,48 @@ defmodule Nebulex.Cache do end end - Then, in the `Application.start/2` callback, attach the handler to this event + Then, in the `Application.start/2` callback, attach the handler to the events using a unique handler id: - :telemetry.attach( + :telemetry.attach_many( "my-app-handler-id", - [:my_app, :cache, :command], + [ + [:nebulex, :cache, :command, :start], + [:nebulex, :cache, :command, :stop], + [:nebulex, :cache, :command, :exception] + ], &MyApp.Telemetry.handle_event/4, - %{} + :no_config ) - See [the telemetry documentation](https://hexdocs.pm/telemetry/) + See the [telemetry documentation](https://hexdocs.pm/telemetry/) for more information. The following are the events you should expect from Nebulex. All examples below consider a cache named `MyApp.Cache`: - #### `[:my_app, :cache, :command, :start]` + #### `[:nebulex, :cache, :command, :start]` - This event should be invoked on every cache call sent to the adapter before - the command logic is executed. + This event is emitted before a cache command is executed. The `:measurements` map will include the following: * `:system_time` - The current system time in native units from calling: `System.system_time()`. - A Telemetry `:metadata` map including the following fields. Each cache adapter - may emit different information here. For built-in adapters, it will contain: + A Telemetry `:metadata` map including the following fields: * `:adapter_meta` - The adapter metadata. - * `:function_name` - The name of the invoked adapter function. - * `:args` - The arguments of the invoked adapter function, omitting the - first argument, since it is the adapter metadata already included into - the event's metadata. + * `:command` - The name of the invoked adapter's command. + * `:args` - The arguments passed to the invoked adapter, except for the + first one, since the adapter's metadata is available in the event's + metadata. + * `:extra_metadata` - Additional metadata through the runtime option + `:telemetry_metadata.` - #### `[:my_app, :cache, :command, :stop]` + #### `[:nebulex, :cache, :command, :stop]` - This event should be invoked on every cache call sent to the adapter after - the command logic is executed. + This event is emitted after a cache command is executed. The `:measurements` map will include the following: @@ -141,20 +150,21 @@ defmodule Nebulex.Cache do is given in the `:native` time unit. You can read more about it in the docs for `System.convert_time_unit/3`. - A Telemetry `:metadata` map including the following fields. Each cache adapter - may emit different information here. For built-in adapters, it will contain: + A Telemetry `:metadata` map including the following fields: * `:adapter_meta` - The adapter metadata. - * `:function_name` - The name of the invoked adapter function. - * `:args` - The arguments of the invoked adapter function, omitting the - first argument, since it is the adapter metadata already included into - the event's metadata. - * `:result` - The command result. + * `:command` - The name of the invoked adapter's command. + * `:args` - The arguments passed to the invoked adapter, except for the + first one, since the adapter's metadata is available in the event's + metadata. + * `:extra_metadata` - Additional metadata through the runtime option + `:telemetry_metadata.` + * `:result` - The command's result. - #### `[:my_app, :cache, :command, :exception]` + #### `[:nebulex, :cache, :command, :exception]` - This event should be invoked when an error or exception occurs while executing - the cache command. + This event is emitted when an error or exception occurs during the + cache command execution. The `:measurements` map will include the following: @@ -162,156 +172,205 @@ defmodule Nebulex.Cache do is given in the `:native` time unit. You can read more about it in the docs for `System.convert_time_unit/3`. - A Telemetry `:metadata` map including the following fields. Each cache adapter - may emit different information here. For built-in adapters, it will contain: + A Telemetry `:metadata` map including the following fields: * `:adapter_meta` - The adapter metadata. - * `:function_name` - The name of the invoked adapter function. - * `:args` - The arguments of the invoked adapter function, omitting the - first argument, since it is the adapter metadata already included into - the event's metadata. + * `:command` - The name of the invoked adapter's command. + * `:args` - The arguments passed to the invoked adapter, except for the + first one, since the adapter's metadata is available in the event's + metadata. + * `:extra_metadata` - Additional metadata through the runtime option + `:telemetry_metadata.` * `:kind` - The type of the error: `:error`, `:exit`, or `:throw`. * `:reason` - The reason of the error. - * `:stacktrace` - The stacktrace. + * `:stacktrace` - Exception's stack trace. - **NOTE:** The events outlined above are the recommended for the adapters - to dispatch. However, it is highly recommended to review the used adapter - documentation to ensure it is fully compatible with these events, perhaps - differences, or perhaps also additional events. + ### Adapter-specific events - ## Stats + Regardless of whether Nebulex emits the telemetry events outlined above or + not, the adapters can and are free to expose their own, but they will be + out of Nebulex's scope. Therefore, if you are interested in using specific + adapter events, you should review the adapters' documentation. - Stats are provided by the adapters by implementing the optional behaviour - `Nebulex.Adapter.Stats`. This behaviour exposes a callback to return the - current cache stats. Nevertheless, the behaviour brings with a default - implementation using [Erlang counters][counters], which is used by the - local built-in adapter (`Nebulex.Adapters.Local`). + ## Dynamic caches - [counters]: https://erlang.org/doc/man/counters.html + Nebulex allows you to start multiple processes from the same cache module. + This feature is typically useful when you want to have different cache + instances but access them through the same cache module. - One can enable the stats by setting the option `:stats` to `true`. - For example, in the configuration file: + When you list a cache in your supervision tree, such as `MyApp.Cache`, it will + start a supervision tree with a process named `MyApp.Cache` under the hood. + By default, the process has the same name as the cache module. Hence, whenever + you invoke a function in `MyApp.Cache`, such as `MyApp.Cache.put/3`, Nebulex + will execute the command in the cache process named `MyApp.Cache`. - config :my_app, MyApp.Cache, - stats: true, - ... + However, with Nebulex, you can start multiple processes from the same cache. + The only requirement is that they must have different process names, like + this: - > Remember to check if the underlying adapter implements the - `Nebulex.Adapter.Stats` behaviour. + children = [ + MyApp.Cache, + {MyApp.Cache, name: MyApp.UsersCache} + ] - See `c:Nebulex.Cache.stats/0` for more information. + Now you have two cache instances running: one is named `MyApp.Cache`, and the + other one is named `MyApp.UsersCache`. You can tell Nebulex which process you + want to use in your cache operations by calling: - ## Dispatching stats via Telemetry + MyApp.Cache.put_dynamic_cache(MyApp.Cache) + MyApp.Cache.put_dynamic_cache(MyApp.UsersCache) - It is possible to emit Telemetry events for the current stats via - `c:Nebulex.Cache.dispatch_stats/1`, but it has to be invoked explicitly; - Nebulex does not emit this Telemetry event automatically. But it is very - easy to emit this event using [`:telemetry_poller`][telemetry_poller]. + Once you call `MyApp.Cache.put_dynamic_cache(name)`, all invocations made on + `MyApp.Cache` will use the cache instance denoted by `name`. - [telemetry_poller]: https://github.com/beam-telemetry/telemetry_poller + Nebulex also provides a handy function for invoking commands using dynamic + caches: `c:with_dynamic_cache/2`. - For example, one can define a custom pollable measurement: + MyApp.Cache.with_dynamic_cache(MyApp.UsersCache, fn -> + # all commands here will use MyApp.UsersCache + MyApp.Cache.put("u1", "joe") + ... + end) - :telemetry_poller.start_link( - measurements: [ - {MyApp.Cache, :dispatch_stats, []}, - ], - # configure sampling period - default is :timer.seconds(5) - period: :timer.seconds(30), - name: :my_cache_stats_poller - ) + While these functions are handy, you may want to have the ability to pass + the dynamic cache directly to the command, avoiding the boilerplate logic + of using `c:put_dynamic_cache/1` or `c:with_dynamic_cache/2`. From **v3.0**, + all Cache API commands expose an extended callback version that admits a + dynamic cache at the first argument, so you can directly interact with a + cache instance. - Or you can also start the `:telemetry_poller` process along with your - application supervision tree: - - def start(_type, _args) do - my_cache_stats_poller_opts = [ - measurements: [ - {MyApp.Cache, :dispatch_stats, []}, - ], - period: :timer.seconds(30), - name: :my_cache_stats_poller - ] - - children = [ - {MyApp.Cache, []}, - {:telemetry_poller, my_cache_stats_poller_opts} - ] - - opts = [strategy: :one_for_one, name: MyApp.Supervisor] - Supervisor.start_link(children, opts) - end + MyApp.Cache.put(MyApp.UsersCache, "u1", "joe", ttl: :timer.hours(1)) + MyApp.Cache.get(MyApp.UsersCache, "u1", nil, []) + MyApp.Cache.delete(MyApp.UsersCache, "u1", []) - See [Nebulex Telemetry Guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information. + This is another handy way to work with multiple cache instances through + the same cache module. ## Distributed topologies - Nebulex provides the following adapters for distributed topologies: + One of the goals of Nebulex is also to provide the ability to set up + distributed cache topologies, but this feature will depend on the adapters. + However, there are available adapters already for this: * `Nebulex.Adapters.Partitioned` - Partitioned cache topology. * `Nebulex.Adapters.Replicated` - Replicated cache topology. * `Nebulex.Adapters.Multilevel` - Multi-level distributed cache topology. These adapters work more as wrappers for an existing local adapter and provide - the distributed topology on top of it. Optionally, you can set the adapter for + the distributed topology on top of it. You can optionally set the adapter for the primary cache storage with the option `:primary_storage_adapter`. Defaults - to `Nebulex.Adapters.Local`. + to `Nebulex.Adapters.Local`. See adapters documentation for information. """ - @type t :: module + @typedoc "Cache type" + @type t() :: module() @typedoc "Cache entry key" - @type key :: any + @type key() :: any() @typedoc "Cache entry value" - @type value :: any + @type value() :: any() + + @typedoc "Dynamic cache value" + @type dynamic_cache() :: atom() | pid() @typedoc "Cache entries" - @type entries :: map | [{key, value}] + @type entries() :: map() | [{key(), value()}] @typedoc "Cache action options" - @type opts :: Keyword.t() + @type opts() :: keyword() + + @typedoc "The data type for a query spec" + @type query_spec() :: keyword() + + @typedoc "Specification key for the item(s) to include in the returned info" + @type info_spec() :: :all | atom() | [atom()] + + @typedoc "The type for the info item's value" + @type info_item() :: any() + + @typedoc "Info map" + @type info_map() :: %{optional(atom()) => any()} + + @typedoc "The data type for the cache information" + @type info_data() :: info_map() | info_item() + + @typedoc "Proxy type for generic Nebulex error" + @type nbx_error_reason() :: Nebulex.Error.t() + + @typedoc "Fetch error reason" + @type fetch_error_reason() :: Nebulex.KeyError.t() | nbx_error_reason() + + @typedoc "Common error type" + @type error_tuple() :: error_tuple(nbx_error_reason()) + + @typedoc "Error type for the given reason" + @type error_tuple(reason) :: {:error, reason} + + @typedoc "Ok/Error tuple with default error reasons" + @type ok_error_tuple(ok) :: ok_error_tuple(ok, nbx_error_reason()) + + @typedoc "Ok/Error type" + @type ok_error_tuple(ok, error) :: {:ok, ok} | {:error, error} + + ## API + + import __MODULE__.Impl @doc false defmacro __using__(opts) do - quote bind_quoted: [opts: opts] do - @behaviour Nebulex.Cache + quote do + unquote(prelude(opts)) + unquote(base_defs()) + unquote(kv_defs()) - alias Nebulex.Cache.{ - Entry, - Persistence, - Queryable, - Stats, - Storage, - Transaction - } + if Nebulex.Adapter.Queryable in behaviours do + unquote(queryable_defs()) + end + + if Nebulex.Adapter.Persistence in behaviours do + unquote(persistence_defs()) + end + + if Nebulex.Adapter.Transaction in behaviours do + unquote(transaction_defs()) + end + + if Nebulex.Adapter.Info in behaviours do + unquote(info_defs()) + end + end + end - alias Nebulex.Hook + defp prelude(opts) do + quote do + @behaviour Nebulex.Cache - {otp_app, adapter, behaviours} = Nebulex.Cache.Supervisor.compile_config(opts) + {otp_app, adapter, behaviours, opts} = Nebulex.Cache.Supervisor.compile_config(unquote(opts)) @otp_app otp_app @adapter adapter @opts opts - @default_dynamic_cache opts[:default_dynamic_cache] || __MODULE__ - @default_key_generator opts[:default_key_generator] || Nebulex.Caching.SimpleKeyGenerator + @default_dynamic_cache @opts[:default_dynamic_cache] || __MODULE__ + @before_compile adapter + end + end + defp base_defs do + quote do ## Config and metadata @impl true def config do {:ok, config} = Nebulex.Cache.Supervisor.runtime_config(__MODULE__, @otp_app, []) + config end @impl true def __adapter__, do: @adapter - @impl true - def __default_key_generator__, do: @default_key_generator - ## Process lifecycle @doc false @@ -329,10 +388,16 @@ defmodule Nebulex.Cache do end @impl true - def stop(timeout \\ 5000) do - Supervisor.stop(get_dynamic_cache(), :normal, timeout) + def stop(opts \\ []) do + stop(get_dynamic_cache(), opts) + end + + @impl true + def stop(name, opts) do + Supervisor.stop(name, :normal, Keyword.get(opts, :timeout, 5000)) end + # Iniline common instructions @compile {:inline, get_dynamic_cache: 0} @impl true @@ -351,197 +416,140 @@ defmodule Nebulex.Cache do try do _ = put_dynamic_cache(name) + fun.() after _ = put_dynamic_cache(default_dynamic_cache) end end + end + end - @impl true - def with_dynamic_cache(name, module, fun, args) do - with_dynamic_cache(name, fn -> apply(module, fun, args) end) - end + defp kv_defs do + quote do + alias Nebulex.Cache.KV - ## Entry + defcacheapi fetch(key, opts \\ []), to: KV - @impl true - def get(key, opts \\ []) do - Entry.get(get_dynamic_cache(), key, opts) - end + defcacheapi fetch!(key, opts \\ []), to: KV - @impl true - def get!(key, opts \\ []) do - Entry.get!(get_dynamic_cache(), key, opts) - end + defcacheapi get(key, default \\ nil, opts \\ []), to: KV - @impl true - def get_all(keys, opts \\ []) do - Entry.get_all(get_dynamic_cache(), keys, opts) - end + defcacheapi get!(key, default \\ nil, opts \\ []), to: KV - @impl true - def put(key, value, opts \\ []) do - Entry.put(get_dynamic_cache(), key, value, opts) - end + defcacheapi put(key, value, opts \\ []), to: KV - @impl true - def put_new(key, value, opts \\ []) do - Entry.put_new(get_dynamic_cache(), key, value, opts) - end + defcacheapi put!(key, value, opts \\ []), to: KV - @impl true - def put_new!(key, value, opts \\ []) do - Entry.put_new!(get_dynamic_cache(), key, value, opts) - end + defcacheapi put_new(key, value, opts \\ []), to: KV - @impl true - def replace(key, value, opts \\ []) do - Entry.replace(get_dynamic_cache(), key, value, opts) - end + defcacheapi put_new!(key, value, opts \\ []), to: KV - @impl true - def replace!(key, value, opts \\ []) do - Entry.replace!(get_dynamic_cache(), key, value, opts) - end + defcacheapi replace(key, value, opts \\ []), to: KV - @impl true - def put_all(entries, opts \\ []) do - Entry.put_all(get_dynamic_cache(), entries, opts) - end + defcacheapi replace!(key, value, opts \\ []), to: KV - @impl true - def put_new_all(entries, opts \\ []) do - Entry.put_new_all(get_dynamic_cache(), entries, opts) - end + defcacheapi put_all(entries, opts \\ []), to: KV - @impl true - def delete(key, opts \\ []) do - Entry.delete(get_dynamic_cache(), key, opts) - end + defcacheapi put_all!(entries, opts \\ []), to: KV - @impl true - def take(key, opts \\ []) do - Entry.take(get_dynamic_cache(), key, opts) - end + defcacheapi put_new_all(entries, opts \\ []), to: KV - @impl true - def take!(key, opts \\ []) do - Entry.take!(get_dynamic_cache(), key, opts) - end + defcacheapi put_new_all!(entries, opts \\ []), to: KV - @impl true - def has_key?(key) do - Entry.has_key?(get_dynamic_cache(), key) - end + defcacheapi delete(key, opts \\ []), to: KV - @impl true - def get_and_update(key, fun, opts \\ []) do - Entry.get_and_update(get_dynamic_cache(), key, fun, opts) - end + defcacheapi delete!(key, opts \\ []), to: KV - @impl true - def update(key, initial, fun, opts \\ []) do - Entry.update(get_dynamic_cache(), key, initial, fun, opts) - end + defcacheapi take(key, opts \\ []), to: KV - @impl true - def incr(key, amount \\ 1, opts \\ []) do - Entry.incr(get_dynamic_cache(), key, amount, opts) - end + defcacheapi take!(key, opts \\ []), to: KV - @impl true - def decr(key, amount \\ 1, opts \\ []) do - Entry.decr(get_dynamic_cache(), key, amount, opts) - end + defcacheapi has_key?(key, opts \\ []), to: KV - @impl true - def ttl(key) do - Entry.ttl(get_dynamic_cache(), key) - end + defcacheapi get_and_update(key, fun, opts \\ []), to: KV - @impl true - def expire(key, ttl) do - Entry.expire(get_dynamic_cache(), key, ttl) - end + defcacheapi get_and_update!(key, fun, opts \\ []), to: KV - @impl true - def touch(key) do - Entry.touch(get_dynamic_cache(), key) - end + defcacheapi update(key, initial, fun, opts \\ []), to: KV - ## Queryable + defcacheapi update!(key, initial, fun, opts \\ []), to: KV - if Nebulex.Adapter.Queryable in behaviours do - @impl true - def all(query \\ nil, opts \\ []) do - Queryable.all(get_dynamic_cache(), query, opts) - end + defcacheapi incr(key, amount \\ 1, opts \\ []), to: KV - @impl true - def count_all(query \\ nil, opts \\ []) do - Queryable.count_all(get_dynamic_cache(), query, opts) - end + defcacheapi incr!(key, amount \\ 1, opts \\ []), to: KV - @impl true - def delete_all(query \\ nil, opts \\ []) do - Queryable.delete_all(get_dynamic_cache(), query, opts) - end + defcacheapi decr(key, amount \\ 1, opts \\ []), to: KV - @impl true - def stream(query \\ nil, opts \\ []) do - Queryable.stream(get_dynamic_cache(), query, opts) - end + defcacheapi decr!(key, amount \\ 1, opts \\ []), to: KV - ## Deprecated functions (for backwards compatibility) + defcacheapi ttl(key, opts \\ []), to: KV - @impl true - defdelegate size, to: __MODULE__, as: :count_all + defcacheapi ttl!(key, opts \\ []), to: KV - @impl true - defdelegate flush, to: __MODULE__, as: :delete_all - end + defcacheapi expire(key, ttl, opts \\ []), to: KV - ## Persistence + defcacheapi expire!(key, ttl, opts \\ []), to: KV - if Nebulex.Adapter.Persistence in behaviours do - @impl true - def dump(path, opts \\ []) do - Persistence.dump(get_dynamic_cache(), path, opts) - end + defcacheapi touch(key, opts \\ []), to: KV - @impl true - def load(path, opts \\ []) do - Persistence.load(get_dynamic_cache(), path, opts) - end - end + defcacheapi touch!(key, opts \\ []), to: KV + end + end - ## Transactions + defp queryable_defs do + quote do + alias Nebulex.Cache.Queryable - if Nebulex.Adapter.Transaction in behaviours do - @impl true - def transaction(opts \\ [], fun) do - Transaction.transaction(get_dynamic_cache(), opts, fun) - end + defcacheapi get_all(query_spec \\ [], opts \\ []), to: Queryable - @impl true - def in_transaction? do - Transaction.in_transaction?(get_dynamic_cache()) - end - end + defcacheapi get_all!(query_spec \\ [], opts \\ []), to: Queryable - ## Stats + defcacheapi count_all(query_spec \\ [], opts \\ []), to: Queryable - if Nebulex.Adapter.Stats in behaviours do - @impl true - def stats do - Stats.stats(get_dynamic_cache()) - end + defcacheapi count_all!(query_spec \\ [], opts \\ []), to: Queryable - @impl true - def dispatch_stats(opts \\ []) do - Stats.dispatch_stats(get_dynamic_cache(), opts) - end - end + defcacheapi delete_all(query_spec \\ [], opts \\ []), to: Queryable + + defcacheapi delete_all!(query_spec \\ [], opts \\ []), to: Queryable + + defcacheapi stream(query_spec \\ [], opts \\ []), to: Queryable + + defcacheapi stream!(query_spec \\ [], opts \\ []), to: Queryable + end + end + + defp persistence_defs do + quote do + alias Nebulex.Cache.Persistence + + defcacheapi dump(path, opts \\ []), to: Persistence + + defcacheapi dump!(path, opts \\ []), to: Persistence + + defcacheapi load(path, opts \\ []), to: Persistence + + defcacheapi load!(path, opts \\ []), to: Persistence + end + end + + defp transaction_defs do + quote do + alias Nebulex.Cache.Transaction + + defcacheapi transaction(fun, opts \\ []), to: Transaction + + defcacheapi in_transaction?(opts \\ []), to: Transaction + end + end + + defp info_defs do + quote do + alias Nebulex.Cache.Info + + defcacheapi info(spec \\ :all, opts \\ []), to: Info + + defcacheapi info!(spec \\ :all, opts \\ []), to: Info end end @@ -552,37 +560,24 @@ defmodule Nebulex.Cache do @doc """ A callback executed when the cache starts or when configuration is read. """ - @callback init(config :: Keyword.t()) :: {:ok, Keyword.t()} | :ignore + @doc group: "User callbacks" + @callback init(config :: keyword) :: {:ok, keyword} | :ignore ## Nebulex.Adapter @doc """ Returns the adapter tied to the cache. """ + @doc group: "Runtime API" @callback __adapter__ :: Nebulex.Adapter.t() - @doc """ - Returns the default key generator applied only when using - **"declarative annotation-based caching"** via `Nebulex.Caching.Decorators`. - - Sometimes you may want to set a different key generator when using - declarative caching. By default, the key generator is set to - `Nebulex.Caching.SimpleKeyGenerator`. You can change the default - key generator at compile time with: - - use Nebulex.Cache, default_key_generator: MyKeyGenerator - - See `Nebulex.Caching.Decorators` and `Nebulex.Caching.KeyGenerator` - for more information. - """ - @callback __default_key_generator__ :: Nebulex.Caching.KeyGenerator.t() - @doc """ Returns the adapter configuration stored in the `:otp_app` environment. If the `c:init/1` callback is implemented in the cache, it will be invoked. """ - @callback config() :: Keyword.t() + @doc group: "Runtime API" + @callback config() :: keyword() @doc """ Starts a supervision and return `{:ok, pid}` or just `:ok` if nothing @@ -596,15 +591,36 @@ defmodule Nebulex.Cache do See the configuration in the moduledoc for options shared between adapters, for adapter-specific configuration see the adapter's documentation. """ - @callback start_link(opts) :: - {:ok, pid} - | {:error, {:already_started, pid}} - | {:error, term} + @doc group: "Runtime API" + @callback start_link(opts()) :: + {:ok, pid()} + | {:error, {:already_started, pid()}} + | {:error, any()} @doc """ Shuts down the cache. + + ## Options + + `:timeout` - It is an integer that specifies how many milliseconds to wait + for the cache supervisor process to terminate, or the atom `:infinity` to + wait indefinitely. Defaults to `5000`. See `Supervisor.stop/3`. + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + """ + @doc group: "Runtime API" + @callback stop(opts()) :: :ok + + @doc """ + Same as `c:stop/1` but stops the cache instance given in the first argument + `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. """ - @callback stop(timeout) :: :ok + @doc group: "Runtime API" + @callback stop(dynamic_cache(), opts()) :: :ok @doc """ Returns the atom name or pid of the current cache @@ -612,34 +628,33 @@ defmodule Nebulex.Cache do See also `c:put_dynamic_cache/1`. """ - @callback get_dynamic_cache() :: atom() | pid() + @doc group: "Runtime API" + @callback get_dynamic_cache() :: dynamic_cache() @doc """ Sets the dynamic cache to be used in further commands (based on Ecto dynamic repo). - There might be cases where we want to have different cache instances but - accessing them through the same cache module. By default, when you call + There are cases where you may want to have different cache instances but + access them through the same cache module. By default, when you call `MyApp.Cache.start_link/1`, it will start a cache with the name `MyApp.Cache`. But it is also possible to start multiple caches by using a different name for each of them: MyApp.Cache.start_link(name: :cache1) - MyApp.Cache.start_link(name: :cache2, backend: :shards) + MyApp.Cache.start_link(name: :cache2) You can also start caches without names by explicitly setting the name to `nil`: - MyApp.Cache.start_link(name: nil, backend: :shards) + MyApp.Cache.start_link(name: nil) > **NOTE:** There may be adapters requiring the `:name` option anyway, therefore, it is highly recommended to see the adapter's documentation you want to use. - However, once the cache is started, it is not possible to interact directly - with it, since all operations through `MyApp.Cache` are sent by default to - the cache named `MyApp.Cache`. But you can change the default cache at - compile-time: + All operations through `MyApp.Cache` are sent by default to the cache named + `MyApp.Cache`. But you can change the default cache at compile-time: use Nebulex.Cache, default_dynamic_cache: :cache_name @@ -649,11 +664,18 @@ defmodule Nebulex.Cache do From this moment on, all future commands performed by the current process will run on `:another_cache_name`. + + Additionally, all cache commands optionally support passing the wanted + dynamic cache (name or PID) as the first argument so you can o directly + interact with a cache instance. See the + ["Dynamic caches"](#module-dynamic-caches) section in the module + documentation for more information. """ - @callback put_dynamic_cache(atom() | pid()) :: atom() | pid() + @doc group: "Runtime API" + @callback put_dynamic_cache(dynamic_cache()) :: dynamic_cache() @doc """ - Invokes the given function `fun` for the dynamic cache `name_or_pid`. + Invokes the function `fun` using the given dynamic cache. ## Example @@ -663,351 +685,978 @@ defmodule Nebulex.Cache do See `c:get_dynamic_cache/0` and `c:put_dynamic_cache/1`. """ - @callback with_dynamic_cache(name_or_pid :: atom() | pid(), fun) :: term + @doc group: "Runtime API" + @callback with_dynamic_cache(dynamic_cache(), fun()) :: any() + + ## Nebulex.Adapter.KV @doc """ - For the dynamic cache `name_or_pid`, invokes the given function name `fun` - from `module` with the list of arguments `args`. + Fetches the value for a specific `key` in the cache. - ## Example + If the cache contains the given `key`, then its value is returned + in the shape of `{:ok, value}`. - MyCache.with_dynamic_cache(:my_cache, Module, :some_fun, ["foo", "bar"]) + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. - See `c:get_dynamic_cache/0` and `c:put_dynamic_cache/1`. - """ - @callback with_dynamic_cache( - name_or_pid :: atom() | pid(), - module, - fun :: atom, - args :: [term] - ) :: term + ## Options - ## Nebulex.Adapter.Entry + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. - @doc """ - Gets a value from Cache where the key matches the given `key`. + ## Examples + + iex> MyCache.put("foo", "bar") + :ok + iex> MyCache.fetch("foo") + {:ok, "bar"} - Returns `nil` if no result was found. + iex> {:error, %Nebulex.KeyError{key: "bar"} = e} = MyCache.fetch("bar") + iex> e.reason + :not_found - See the configured adapter documentation for runtime options. + """ + @doc group: "KV API" + @callback fetch(key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) - ## Example + @doc """ + Same as `c:fetch/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples iex> MyCache.put("foo", "bar") :ok + iex> MyCache.fetch(MyCache1, "foo", []) + {:ok, "bar"} - iex> MyCache.get("foo") + """ + @doc group: "KV API" + @callback fetch(dynamic_cache(), key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) + + @doc """ + Same as `c:fetch/2` but raises `Nebulex.KeyError` if the cache doesn't contain + `key` or `Nebulex.Error` if another error occurs while executing the command. + + ## Examples + + iex> MyCache.put("foo", "bar") + :ok + iex> MyCache.fetch!("foo") "bar" - iex> MyCache.get(:non_existent_key) - nil + """ + @doc group: "KV API" + @callback fetch!(key(), opts()) :: value() + @doc """ + Same as `c:fetch/3` but raises `Nebulex.KeyError` if the cache doesn't contain + `key` or `Nebulex.Error` if another error occurs while executing the command. """ - @callback get(key, opts) :: value + @doc group: "KV API" + @callback fetch!(dynamic_cache(), key(), opts()) :: value() @doc """ - Similar to `c:get/2` but raises `KeyError` if `key` is not found. + Gets a value from the cache where the key matches the given `key`. - See the configured adapter documentation for runtime options. + If the cache contains the given `key` its value is returned as + `{:ok, value}`. - ## Example + If the cache does not contain `key`, `{:ok, default}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options - MyCache.get!(:a) + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put("foo", "bar") + :ok + iex> MyCache.get("foo") + {:ok, "bar"} + iex> MyCache.get(:inexistent) + {:ok, nil} + iex> MyCache.get(:inexistent, :default) + {:ok, :default} """ - @callback get!(key, opts) :: value + @doc group: "KV API" + @callback get(key(), default :: value(), opts()) :: ok_error_tuple(value()) @doc """ - Returns a `map` with all the key-value pairs in the Cache where the key - is in `keys`. + Same as `c:get/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - If `keys` contains keys that are not in the Cache, they're simply ignored. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - See the configured adapter documentation for runtime options. + ## Examples - ## Example + iex> MyCache.get(MyCache1, "key", nil, []) + {:ok, nil} - iex> MyCache.put_all([a: 1, c: 3]) - :ok + """ + @doc group: "KV API" + @callback get(dynamic_cache(), key(), default :: value(), opts()) :: ok_error_tuple(value()) - iex> MyCache.get_all([:a, :b, :c]) - %{a: 1, c: 3} + @doc """ + Same as `c:get/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback get!(key(), default :: value(), opts()) :: value() + @doc """ + Same as `c:get!/4` but raises an exception if an error occurs. """ - @callback get_all(keys :: [key], opts) :: map + @doc group: "KV API" + @callback get!(dynamic_cache(), key(), default :: value(), opts()) :: value() @doc """ - Puts the given `value` under `key` into the Cache. + Puts the given `value` under `key` into the cache. - If `key` already holds an entry, it is overwritten. Any previous - time to live associated with the key is discarded on successful + If `key` already holds an entry, it is overwritten. Any previous TTL + (time to live) associated with the key is discarded on a successful `put` operation. - By default, `nil` values are skipped, which means they are not stored; - the call to the adapter is bypassed. + Returns `:ok` if successful; `{:error, reason}` otherwise. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. - ## Example + ## Examples iex> MyCache.put("foo", "bar") :ok - If the value is nil, then it is not stored (operation is skipped): - - iex> MyCache.put("foo", nil) - :ok - - Put key with time-to-live: + Putting entries with specific time-to-live: iex> MyCache.put("foo", "bar", ttl: 10_000) :ok - - Using Nebulex.Time for TTL: - iex> MyCache.put("foo", "bar", ttl: :timer.hours(1)) :ok - iex> MyCache.put("foo", "bar", ttl: :timer.minutes(1)) :ok - - iex> MyCache.put("foo", "bar", ttl: :timer.seconds(1)) + iex> MyCache.put("foo", "bar", ttl: :timer.seconds(30)) :ok """ - @callback put(key, value, opts) :: :ok + @doc group: "KV API" + @callback put(key(), value(), opts()) :: :ok | error_tuple() @doc """ - Puts the given `entries` (key/value pairs) into the Cache. It replaces - existing values with new values (just as regular `put`). - - ## Options - - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + Same as `c:put/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See the configured adapter documentation for more runtime options. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - ## Example + ## Examples - iex> MyCache.put_all(apples: 3, bananas: 1) + iex> MyCache.put(MyCache1, "foo", "bar", []) :ok - - iex> MyCache.put_all(%{apples: 2, oranges: 1}, ttl: 10_000) + iex> MyCache.put(MyCache2, "foo", "bar", ttl: :timer.hours(1)) :ok - Ideally, this operation should be atomic, so all given keys are put at once. - But it depends purely on the adapter's implementation and the backend used - internally by the adapter. Hence, it is recommended to review the adapter's - documentation. """ - @callback put_all(entries, opts) :: :ok + @doc group: "KV API" + @callback put(dynamic_cache(), key(), value(), opts()) :: :ok | error_tuple() @doc """ - Puts the given `value` under `key` into the cache, only if it does not - already exist. - - Returns `true` if a value was set, otherwise, `false` is returned. - - By default, `nil` values are skipped, which means they are not stored; - the call to the adapter is bypassed. + Same as `c:put/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put!(key(), value(), opts()) :: :ok - ## Options + @doc """ + Same as `c:put!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put!(dynamic_cache(), key(), value(), opts()) :: :ok - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + @doc """ + Puts the given `entries` (key/value pairs) into the cache. It replaces + existing values with new values (just as regular `put`). - See the configured adapter documentation for more runtime options. + Returns `:ok` if successful; `{:error, reason}` otherwise. - ## Example + ## Options - iex> MyCache.put_new("foo", "bar") - true + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - iex> MyCache.put_new("foo", "bar") - false + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. - If the value is nil, it is not stored (operation is skipped): + ## Examples - iex> MyCache.put_new("other", nil) - true + iex> MyCache.put_all(apples: 3, bananas: 1) + :ok + iex> MyCache.put_all(%{apples: 2, oranges: 1}, ttl: :timer.hours(1)) + :ok + > #### Atomic operation {: .warning} + > + > Ideally, this operation should be atomic, so all given keys are put at once. + > But it depends purely on the adapter's implementation and the backend used + > internally by the adapter. Hence, reviewing the adapter's documentation is + > highly recommended. """ - @callback put_new(key, value, opts) :: boolean + @doc group: "KV API" + @callback put_all(entries(), opts()) :: :ok | error_tuple() @doc """ - Similar to `c:put_new/3` but raises `Nebulex.KeyAlreadyExistsError` if the - key already exists. + Same as `c:put_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See `c:put_new/3` for general considerations and options. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - ## Example + ## Examples - iex> MyCache.put_new!("foo", "bar") - true + iex> MyCache.put_all(MyCache1, [apples: 3, bananas: 1], []) + :ok + iex> MyCache.put_all(MyCache1, %{oranges: 1}, ttl: :timer.hours(1)) + :ok """ - @callback put_new!(key, value, opts) :: true + @doc group: "KV API" + @callback put_all(dynamic_cache(), entries(), opts()) :: :ok | error_tuple() @doc """ - Puts the given `entries` (key/value pairs) into the `cache`. It will not - perform any operation at all even if just a single key already exists. + Same as `c:put_all/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_all!(entries(), opts()) :: :ok - Returns `true` if all entries were successfully set. It returns `false` - if no key was set (at least one key already existed). + @doc """ + Same as `c:put_all!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_all!(dynamic_cache(), entries(), opts()) :: :ok - ## Options + @doc """ + Puts the given `value` under `key` into the cache only if it does not + already exist. - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + Returns `{:ok, true}` if the value is stored; otherwise, `{:ok, false}` + is returned. - See the configured adapter documentation for more runtime options. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - ## Example + ## Options - iex> MyCache.put_new_all(apples: 3, bananas: 1) + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put_new("foo", "bar") + {:ok, true} + iex> MyCache.put_new("foo", "bar", ttt: :timer.hours(1)) + {:ok, false} + + """ + @doc group: "KV API" + @callback put_new(key(), value(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.put_new(MyCache1, "foo", "bar", []) + {:ok, true} + iex> MyCache.put_new(MyCache1, "foo", "bar", ttt: :timer.hours(1)) + {:ok, false} + + """ + @doc group: "KV API" + @callback put_new(dynamic_cache(), key(), value(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new/3` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.put_new!("foo", "bar") true + iex> MyCache.put_new!("foo", "bar", ttt: :timer.hours(1)) + false + + """ + @doc group: "KV API" + @callback put_new!(key(), value(), opts()) :: boolean() + + @doc """ + Same as `c:put_new!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_new!(dynamic_cache(), key(), value(), opts()) :: boolean() + + @doc """ + Puts the given `entries` (key/value pairs) into the `cache`. It will not + perform any operation, even if a single key exists. + + Returns `{:ok, true}` if all entries are successfully stored, or + `{:ok, false}` if no key was set (at least one key already existed). - iex> MyCache.put_new_all(%{apples: 3, oranges: 1}, ttl: 10_000) + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put_new_all(apples: 3, bananas: 1) + {:ok, true} + iex> MyCache.put_new_all(%{apples: 3, oranges: 1}, ttl: :timer.hours(1)) + {:ok, false} + + > #### Atomic operation {: .warning} + > + > Ideally, this operation should be atomic, so all given keys are put at once. + > But it depends purely on the adapter's implementation and the backend used + > internally by the adapter. Hence, reviewing the adapter's documentation is + > highly recommended. + """ + @doc group: "KV API" + @callback put_new_all(entries(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.put_new_all(MyCache1, [apples: 3, bananas: 1], []) + {:ok, true} + iex> MyCache.put_new_all(MyCache1, %{apples: 3, oranges: 1}, ttl: 10_000) + {:ok, false} + + """ + @doc group: "KV API" + @callback put_new_all(dynamic_cache(), entries(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new_all/2` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.put_new_all!(apples: 3, bananas: 1) + true + iex> MyCache.put_new_all!(%{apples: 3, oranges: 1}, ttl: :timer.hours(1)) false - Ideally, this operation should be atomic, so all given keys are put at once. - But it depends purely on the adapter's implementation and the backend used - internally by the adapter. Hence, it is recommended to review the adapter's - documentation. """ - @callback put_new_all(entries, opts) :: boolean + @doc group: "KV API" + @callback put_new_all!(entries(), opts()) :: boolean() + + @doc """ + Same as `c:put_new_all!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_new_all!(dynamic_cache(), entries(), opts()) :: boolean() @doc """ Alters the entry stored under `key`, but only if the entry already exists - into the Cache. + in the cache. - Returns `true` if a value was set, otherwise, `false` is returned. + Returns `{:ok, true}` if the value is replaced. Otherwise, `{:ok, false}` + is returned. - By default, `nil` values are skipped, which means they are not stored; - the call to the adapter is bypassed. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. - ## Example + ## Examples iex> MyCache.replace("foo", "bar") - false - + {:ok, false} iex> MyCache.put_new("foo", "bar") - true - + {:ok, true} iex> MyCache.replace("foo", "bar2") - true + {:ok, true} Update current value and TTL: iex> MyCache.replace("foo", "bar3", ttl: 10_000) - true + {:ok, true} """ - @callback replace(key, value, opts) :: boolean + @doc group: "KV API" + @callback replace(key(), value(), opts()) :: ok_error_tuple(boolean()) @doc """ - Similar to `c:replace/3` but raises `KeyError` if `key` is not found. + Same as `c:replace/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See `c:replace/3` for general considerations and options. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - ## Example + ## Examples + + iex> MyCache.replace(MyCache1, "foo", "bar", []) + {:ok, false} + iex> MyCache.put_new("foo", "bar") + {:ok, true} + iex> MyCache.replace(MyCache1, "foo", "bar", ttl: :timer.hours(1)) + {:ok, true} + + """ + @doc group: "KV API" + @callback replace(dynamic_cache(), key(), value(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:replace/3` but raises an exception if an error occurs. + + ## Examples iex> MyCache.replace!("foo", "bar") + false + iex> MyCache.put_new!("foo", "bar") + true + iex> MyCache.replace!("foo", "bar2") true """ - @callback replace!(key, value, opts) :: true + @doc group: "KV API" + @callback replace!(key(), value(), opts()) :: boolean() + + @doc """ + Same as `c:replace!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback replace!(dynamic_cache(), key(), value(), opts()) :: boolean() @doc """ - Deletes the entry in Cache for a specific `key`. + Deletes the entry in the cache for a specific `key`. - See the configured adapter documentation for runtime options. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - ## Example + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples iex> MyCache.put(:a, 1) :ok - iex> MyCache.delete(:a) :ok - - iex> MyCache.get(:a) + iex> MyCache.get!(:a) nil + iex> MyCache.delete(:inexistent) + :ok + + """ + @doc group: "KV API" + @callback delete(key(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:delete/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples - iex> MyCache.delete(:non_existent_key) + iex> MyCache.delete(MyCache1, :a, []) :ok """ - @callback delete(key, opts) :: :ok + @doc group: "KV API" + @callback delete(dynamic_cache(), key(), opts()) :: :ok | error_tuple() @doc """ - Returns and removes the value associated with `key` in the Cache. - If the `key` does not exist, then `nil` is returned. + Same as `c:delete/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback delete!(key(), opts()) :: :ok + + @doc """ + Same as `c:delete!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback delete!(dynamic_cache(), key(), opts()) :: :ok + + @doc """ + Removes and returns the value associated with `key` in the cache. - If `key` is `nil`, the call to the adapter is bypassed, and `nil` is returned. + If `key` is present in the cache, its value is removed and returned as + `{:ok, value}`. - See the configured adapter documentation for runtime options. + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key` or + `Nebulex.Error` otherwise. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples iex> MyCache.put(:a, 1) :ok - iex> MyCache.take(:a) - 1 + {:ok, 1} - iex> MyCache.take(:a) - nil + iex> {:error, %Nebulex.KeyError{key: :a} = e} = MyCache.take(:a) + iex> e.reason + :not_found """ - @callback take(key, opts) :: value + @doc group: "KV API" + @callback take(key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) @doc """ - Similar to `c:take/2` but raises `KeyError` if `key` is not found. + Same as `c:take/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See `c:take/2` for general considerations and options. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - ## Example + ## Examples - MyCache.take!(:a) + iex> MyCache.put(:a, 1) + :ok + iex> MyCache.take(MyCache1, :a, []) + {:ok, 1} """ - @callback take!(key, opts) :: value + @doc group: "KV API" + @callback take(dynamic_cache(), key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) @doc """ - Returns whether the given `key` exists in the Cache. + Same as `c:take/2` but raises an exception if an error occurs. ## Examples iex> MyCache.put(:a, 1) :ok + iex> MyCache.take!(:a) + 1 + + """ + @doc group: "KV API" + @callback take!(key(), opts()) :: value() + + @doc """ + Same as `c:take!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback take!(dynamic_cache(), key(), opts()) :: value() + + @doc """ + Determines if the cache contains an entry for the specified `key`. + More formally, it returns `{:ok, true}` if the cache contains the given `key`. + If the cache doesn't contain `key`, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put(:a, 1) + :ok iex> MyCache.has_key?(:a) + {:ok, true} + iex> MyCache.has_key?(:b) + {:ok, false} + + """ + @doc group: "KV API" + @callback has_key?(key(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:has_key?/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.has_key?(MyCache1, :a, []) + {:ok, false} + + """ + @doc group: "KV API" + @callback has_key?(dynamic_cache(), key(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Increments the counter stored at `key` by the given `amount` and returns + the current count as `{:ok, count}`. + + If `amount < 0`, the value is decremented by that `amount` instead. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + #{Nebulex.Cache.Options.update_counter_options_docs()} + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.incr(:a) + {:ok, 1} + iex> MyCache.incr(:a, 2) + {:ok, 3} + iex> MyCache.incr(:a, -1) + {:ok, 2} + iex> MyCache.incr(:missing_key, 2, default: 10) + {:ok, 12} + + """ + @doc group: "KV API" + @callback incr(key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) + + @doc """ + Same as `c:incr/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.incr(MyCache1, :a, 1, []) + {:ok, 1} + + """ + @doc group: "KV API" + @callback incr(dynamic_cache(), key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) + + @doc """ + Same as `c:incr/3` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.incr!(:a) + 1 + iex> MyCache.incr!(:a, 2) + 3 + + """ + @doc group: "KV API" + @callback incr!(key(), amount :: integer(), opts()) :: integer() + + @doc """ + Same as `c:incr!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback incr!(dynamic_cache(), key(), amount :: integer(), opts()) :: integer() + + @doc """ + Decrements the counter stored at `key` by the given `amount` and returns + the current count as `{:ok, count}`. + + If `amount < 0`, the value is incremented by that `amount` instead + (opposite to `incr/3`). + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + #{Nebulex.Cache.Options.update_counter_options_docs()} + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.decr(:a) + {:ok, -1} + iex> MyCache.decr(:a, 2) + {:ok, -3} + iex> MyCache.decr(:a, -1) + {:ok, -2} + + iex> MyCache.decr(:missing_key, 2, default: 10) + {:ok, 8} + + """ + @doc group: "KV API" + @callback decr(key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) + + @doc """ + Same as `c:decr/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.decr(MyCache1, :a, 1, []) + {:ok, -1} + + """ + @doc group: "KV API" + @callback decr(dynamic_cache(), key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) + + @doc """ + Same as `c:decr/3` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.decr!(:a) + -1 + + """ + @doc group: "KV API" + @callback decr!(key(), amount :: integer(), opts()) :: integer() + + @doc """ + Same as `c:decr!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback decr!(dynamic_cache(), key(), amount :: integer(), opts()) :: integer() + + @doc """ + Returns the remaining time-to-live for the given `key`. + + If `key` is present in the cache, its remaining TTL is returned as + `{:ok, ttl}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put(:a, 1, ttl: 5000) + :ok + iex> MyCache.put(:b, 2) + :ok + iex> MyCache.ttl(:a) + {:ok, _remaining_ttl} + iex> MyCache.ttl(:b) + {:ok, :infinity} + + iex> {:error, %Nebulex.KeyError{key: :c} = e} = MyCache.ttl(:c) + iex> e.reason + :not_found + + """ + @doc group: "KV API" + @callback ttl(key(), opts()) :: ok_error_tuple(timeout(), fetch_error_reason()) + + @doc """ + Same as `c:ttl/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.put(:a, 1, ttl: 5000) + :ok + iex> MyCache.ttl(MyCache1, :a, []) + {:ok, _remaining_ttl} + + """ + @doc group: "KV API" + @callback ttl(dynamic_cache(), key(), opts()) :: ok_error_tuple(timeout(), fetch_error_reason()) + + @doc """ + Same as `c:ttl/2` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.put(:a, 1, ttl: 5000) + :ok + iex> MyCache.ttl!(:a) + _remaining_ttl + + """ + @doc group: "KV API" + @callback ttl!(key(), opts()) :: timeout() + + @doc """ + Same as `c:ttl!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback ttl!(dynamic_cache(), key(), opts()) :: timeout() + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the new `ttl` is + successfully updated; otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned; where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put(:a, 1) + :ok + iex> MyCache.expire(:a, :timer.hours(1)) + {:ok, true} + iex> MyCache.expire(:a, :infinity) + {:ok, true} + iex> MyCache.expire(:b, 5) + {:ok, false} + + """ + @doc group: "KV API" + @callback expire(key(), ttl :: timeout(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:expire/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.expire(MyCache1, :a, :timer.hours(1), []) + {:ok, false} + + """ + @doc group: "KV API" + @callback expire(dynamic_cache(), key(), ttl :: timeout(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:expire/3` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.put(:a, 1) + :ok + iex> MyCache.expire!(:a, :timer.hours(1)) true - iex> MyCache.has_key?(:b) - false + """ + @doc group: "KV API" + @callback expire!(key(), ttl :: timeout(), opts()) :: boolean() + + @doc """ + Same as `c:expire!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback expire!(dynamic_cache(), key(), ttl :: timeout(), opts()) :: boolean() + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the last access time is + successfully updated; otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. + + ## Examples + + iex> MyCache.put(:a, 1) + :ok + iex> MyCache.touch(:a) + {:ok, true} + iex> MyCache.ttl(:b) + {:ok, false} + + """ + @doc group: "KV API" + @callback touch(key(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:touch/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.touch(MyCache1, :a, []) + {:ok, false} """ - @callback has_key?(key) :: boolean + @doc group: "KV API" + @callback touch(dynamic_cache(), key(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:touch/2` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.put(:a, 1) + :ok + iex> MyCache.touch!(:a) + true + + """ + @doc group: "KV API" + @callback touch!(key(), opts()) :: boolean() + + @doc """ + Same as `c:touch!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback touch!(dynamic_cache(), key(), opts()) :: boolean() @doc """ Gets the value from `key` and updates it, all in one pass. @@ -1016,18 +1665,22 @@ defmodule Nebulex.Cache do hasn't been cached) and must return a two-element tuple: the current value (the retrieved value, which can be operated on before being returned) and the new value to be stored under `key`. `fun` may also return `:pop`, which - means the current value shall be removed from Cache and returned. + means the current value shall be removed from the cache and returned. + + This function returns: - The returned value is a tuple with the current value returned by `fun` and - the new updated value under `key`. + * `{:ok, {current_value, new_value}}` - The `current_value` is the current + cached value and `new_value` the updated one returned by `fun`. + + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples @@ -1036,548 +1689,598 @@ defmodule Nebulex.Cache do iex> MyCache.get_and_update(:a, fn current_value -> ...> {current_value, "value!"} ...> end) - {nil, "value!"} + {:ok, {nil, "value!"}} Update existing key: iex> MyCache.get_and_update(:a, fn current_value -> ...> {current_value, "new value!"} ...> end) - {"value!", "new value!"} + {:ok, {"value!", "new value!"}} Pop/remove value if exist: iex> MyCache.get_and_update(:a, fn _ -> :pop end) - {"new value!", nil} + {:ok, {"new value!", nil}} Pop/remove nonexistent key: iex> MyCache.get_and_update(:b, fn _ -> :pop end) - {nil, nil} + {:ok, {nil, nil}} + + """ + @doc group: "KV API" + @callback get_and_update(key(), (value() -> {current_value, new_value} | :pop), opts()) :: + ok_error_tuple({current_value, new_value}) + when current_value: value(), new_value: value() + + @doc """ + Same as `c:get_and_update/3`, but the command is executed on the cache + instance given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + iex> MyCache.get_and_update(MyCache1, :a, &{&1, "value!"}, []) + {:ok, {nil, "value!"}} + + """ + @doc group: "KV API" + @callback get_and_update( + dynamic_cache(), + key(), + (value() -> {current_value, new_value} | :pop), + opts() + ) :: ok_error_tuple({current_value, new_value}) + when current_value: value(), new_value: value() + + @doc """ + Same as `c:get_and_update/3` but raises an exception if an error occurs. + + ## Examples + + iex> MyCache.get_and_update!(:a, &{&1, "value!"}) + {nil, "value!"} """ - @callback get_and_update(key, (value -> {current_value, new_value} | :pop), opts) :: + @doc group: "KV API" + @callback get_and_update!(key(), (value() -> {current_value, new_value} | :pop), opts()) :: {current_value, new_value} - when current_value: value, new_value: value + when current_value: value(), new_value: value() @doc """ - Updates the cached `key` with the given function. + Same as `c:get_and_update!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback get_and_update!( + dynamic_cache(), + key(), + (value() -> {current_value, new_value} | :pop), + opts() + ) :: {current_value, new_value} + when current_value: value(), new_value: value() + + @doc """ + Updates the `key` in the cache with the given function. + + If `key` is present in the cache, the existing value is passed to `fun` and + its result is used as the updated value of `key`. If `key` is not present in + the cache, `default` is inserted as the value of `key`. The default value + will not be passed through the update function. - If `key` is present in Cache with value `value`, `fun` is invoked with - argument `value` and its result is used as the new value of `key`. + This function returns: - If `key` is not present in Cache, `initial` is inserted as the value of `key`. - The initial value will not be passed through the update function. + * `{:ok, value}` - The value associated with the `key` is updated. + + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples iex> MyCache.update(:a, 1, &(&1 * 2)) - 1 - + {:ok, 1} iex> MyCache.update(:a, 1, &(&1 * 2)) - 2 + {:ok, 2} """ - @callback update(key, initial :: value, (value -> value), opts) :: value + @doc group: "KV API" + @callback update(key(), initial :: value(), (value() -> value()), opts()) :: + ok_error_tuple(value()) @doc """ - Increments the counter stored at `key` by the given `amount`. + Same as `c:update/4`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - If `amount < 0` (negative), the value is decremented by that `amount` - instead. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - ## Options + ## Examples - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + iex> MyCache.update(MyCache1, :a, 1, &(&1 * 2), []) + {:ok, 1} - * `:default` - If `key` is not present in Cache, the default value is - inserted as initial value of key before the it is incremented. - Defaults to `0`. + """ + @doc group: "KV API" + @callback update(dynamic_cache(), key(), initial :: value(), (value() -> value()), opts()) :: + ok_error_tuple(value()) - See the configured adapter documentation for more runtime options. + @doc """ + Same as `c:update/4` but raises an exception if an error occurs. ## Examples - iex> MyCache.incr(:a) + iex> MyCache.update!(:a, 1, &(&1 * 2)) 1 - iex> MyCache.incr(:a, 2) - 3 + """ + @doc group: "KV API" + @callback update!(key(), initial :: value(), (value() -> value()), opts()) :: value() - iex> MyCache.incr(:a, -1) - 2 + @doc """ + Same as `c:update!/5` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback update!(dynamic_cache(), key(), initial :: value(), (value() -> value()), opts()) :: + value() - iex> MyCache.incr(:missing_key, 2, default: 10) - 12 + ## Nebulex.Adapter.Queryable - """ - @callback incr(key, amount :: integer, opts) :: integer + @optional_callbacks get_all: 2, + get_all: 3, + get_all!: 2, + get_all!: 3, + count_all: 2, + count_all: 3, + count_all!: 2, + count_all!: 3, + delete_all: 2, + delete_all: 3, + delete_all!: 2, + delete_all!: 3, + stream: 2, + stream: 3, + stream!: 2, + stream!: 3 @doc """ - Decrements the counter stored at `key` by the given `amount`. + Fetches all entries from the cache matching the given query specified through + the ["query-spec"](#c:get_all/2-query-specification). - If `amount < 0` (negative), the value is incremented by that `amount` - instead (opposite to `incr/3`). + This function returns: - ## Options + * `{:ok, result}` - The cache executes the query successfully. The `result` + is a list with the matched entries. - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause. - * `:default` - If `key` is not present in Cache, the default value is - inserted as initial value of key before the it is incremented. - Defaults to `0`. + May raise `Nebulex.QueryError` if query validation fails. - See the configured adapter documentation for more runtime options. + ## Query specification - ## Examples + There are two ways to use the Query API: - iex> MyCache.decr(:a) - -1 + * Fetch multiple keys (all at once), like a bulk fetch. + * Fetch all entries from the cache matching a given query, more like a + search (this is the most generic option). - iex> MyCache.decr(:a, 2) - -3 + Here is where the `query_spec` argument comes in to specify the type of query + to run. - iex> MyCache.decr(:a, -1) - -2 + The `query_spec` argument is a `t:keyword/0` with options defining the desired + query. The `query_spec` fields or options are: - iex> MyCache.decr(:missing_key, 2, default: 10) - 8 + #{Nebulex.Cache.QuerySpec.options_docs()} - """ - @callback decr(key, amount :: integer, opts) :: integer + ### Fetching multiple keys - @doc """ - Returns the remaining time-to-live for the given `key`. If the `key` does not - exist, then `nil` is returned. + While you can perform any query using the `:query` option (even fetching + multiple keys), the option `:in` is preferable. For example: - ## Examples + MyCache.get_all(in: ["a", "list", "of", "keys"]) - iex> MyCache.put(:a, 1, ttl: 5000) - :ok + ### Fetching all entries matching a given query - iex> MyCache.put(:b, 2) - :ok + As mentioned above, the option `:query` is the most generic way to match + entries in a cache. This option allows users to write custom queries + to be executed by the underlying adapter. - iex> MyCache.ttl(:a) - _remaining_ttl + For matching all cached entries, you can skip the `:query` option or set it + to `nil` instead (the default). For example: - iex> MyCache.ttl(:b) - :infinity + MyCache.get_all() #=> Equivalent to MyCache.get_all(query: nil) - iex> MyCache.ttl(:c) - nil + Using a custom query: - """ - @callback ttl(key) :: timeout | nil + MyCache.get_all(query: query_supported_by_the_adapter) - @doc """ - Returns `true` if the given `key` exists and the new `ttl` was successfully - updated, otherwise, `false` is returned. + > _Nebulex recommends to see the adapter documentation when using this option._ + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples - iex> MyCache.put(:a, 1) - :ok + Populate the cache with some entries: - iex> MyCache.expire(:a, 5) - true + iex> MyCache.put_all(a: 1, b: 2, c: 3) + :ok - iex> MyCache.expire(:a, :infinity) - true + Fetch all entries in the cache: - iex> MyCache.ttl(:b, 5) - false + iex> MyCache.get_all() + {:ok, [a: 1, b: 2, c: 3]} - """ - @callback expire(key, ttl :: timeout) :: boolean + Fetch all entries returning only the keys: - @doc """ - Returns `true` if the given `key` exists and the last access time was - successfully updated, otherwise, `false` is returned. + iex> MyCache.get_all(select: :key) + {:ok, [:a, :b, :c]} - ## Examples + Fetch all entries returning only the values: - iex> MyCache.put(:a, 1) - :ok + iex> MyCache.get_all(select: :value) + {:ok, [1, 2, 3]} - iex> MyCache.touch(:a) - true + Fetch only the requested keys (bulk fetch): - iex> MyCache.ttl(:b) - false + iex> MyCache.get_all(in: [:a, :b, :d]) + {:ok, [a: 1, b: 2]} - """ - @callback touch(key) :: boolean + Fetch the requested keys returning only the keys or values: - ## Deprecated Callbacks + iex> MyCache.get_all(in: [:a, :b, :d], select: :key) + {:ok, [:a, :b]} + iex> MyCache.get_all(in: [:a, :b, :d], select: :value) + {:ok, [1, 2]} - @doc """ - Returns the total number of cached entries. + ### Query examples for `Nebulex.Adapters.Local` adapter - ## Examples + The `Nebulex.Adapters.Local` adapter supports **"ETS Match Spec"** as query + values (in addition to `nil` or the option `:in`). - iex> :ok = Enum.each(1..10, &MyCache.put(&1, &1)) - iex> MyCache.size() - 10 + You must know the adapter's entry structure for match-spec queries, which is + `{:entry, key, value, touched, ttl}`. For example, one may write the following + query: - iex> :ok = Enum.each(1..5, &MyCache.delete(&1)) - iex> MyCache.size() - 5 + iex> match_spec = [ + ...> { + ...> {:entry, :"$1", :"$2", :_, :_}, + ...> [{:>, :"$2", 1}], + ...> [{{:"$1", :"$2"}}] + ...> } + ...> ] + iex> MyCache.get_all(query: match_spec) + {:ok, [b: 1, c: 3]} """ - @doc deprecated: "Use count_all/2 instead" - @callback size() :: integer + @doc group: "Query API" + @callback get_all(query_spec(), opts()) :: ok_error_tuple([any()]) @doc """ - Flushes the cache and returns the number of evicted keys. + Same as `c:get_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - ## Examples + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1)) - iex> MyCache.flush() - 5 + ## Examples - iex> MyCache.size() - 0 + iex> MyCache.get_all(MyCache1, [], []) + {:ok, _matched_entries} """ - @doc deprecated: "Use delete_all/2 instead" - @callback flush() :: integer - - ## Nebulex.Adapter.Queryable - - @optional_callbacks all: 2, count_all: 2, delete_all: 2, stream: 2 + @doc group: "Query API" + @callback get_all(dynamic_cache(), query_spec(), opts()) :: ok_error_tuple([any()]) @doc """ - Fetches all entries from cache matching the given `query`. + Same as `c:get_all/2` but raises an exception if an error occurs. - May raise `Nebulex.QueryError` if query validation fails. - - ## Query values - - There are two types of query values. The ones shared and implemented - by all adapters and the ones that are adapter specific. - - ### Common queries + ## Examples - The following query values are shared and/or supported for all adapters: + iex> MyCache.put_all(a: 1, b: 2, c: 3) + :ok + iex> MyCache.get_all!() + [a: 1, b: 2, c: 3] + iex> MyCache.get_all!(in: [:a, :b]) + [a: 1, b: 2] - * `nil` - Returns a list with all cached entries based on the `:return` - option. + """ + @doc group: "Query API" + @callback get_all!(query_spec(), opts()) :: [any()] - ### Adapter-specific queries + @doc """ + Same as `c:get_all!/3` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback get_all!(dynamic_cache(), query_spec(), opts()) :: [any()] - The `query` value depends entirely on the adapter implementation; it could - any term. Therefore, it is highly recommended to see adapters' documentation - for more information about building queries. For example, the built-in - `Nebulex.Adapters.Local` adapter uses `:ets.match_spec()` for queries, - as well as other pre-defined ones like `:unexpired` and `:expired`. + @doc """ + Deletes all entries matching the query specified by the given `query_spec`. - ## Options + See `c:get_all/2` for more information about the `query_spec`. - * `:return` - Tells the query what to return from the matched entries. - See the possible values in the "Query return option" section below. - The default depends on the adapter, for example, the default for the - built-in adapters is `:key`. This option is supported by the built-in - adapters, but it is recommended to see the adapter's documentation - to confirm its compatibility with this option. + This function returns: - See the configured adapter documentation for more runtime options. + * `{:ok, deleted_count}` - The cache executes the query successfully and + returns the deleted entries count. - ## Query return option + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause. - The following are the possible values for the `:return` option: + May raise `Nebulex.QueryError` if query validation fails. - * `:key` - Returns a list only with the keys. - * `:value` - Returns a list only with the values. - * `:entry` - Returns a list of `t:Nebulex.Entry.t/0`. - * `{:key, :value}` - Returns a list of tuples in the form `{key, value}`. + ## Options - See adapters documentation to confirm what of these options are supported - and what other added. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. - ## Example + ## Examples Populate the cache with some entries: - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2)) - - Fetch all (with default params): - - iex> MyCache.all() - [1, 2, 3, 4, 5] + iex> Enum.each(1..5, &MyCache.put(&1, &1 * 2)) + :ok - Fetch all entries and return values: + Delete all (default args): - iex> MyCache.all(nil, return: :value) - [2, 4, 6, 8, 10] + iex> MyCache.delete_all() + {:ok, 5} - Fetch all entries and return them as key/value pairs: + Delete only the requested keys (bulk delete): - iex> MyCache.all(nil, return: {:key, :value}) - [{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}] + iex> MyCache.delete_all(in: [1, 2, 10]) + {:ok, 2} - Fetch all entries that match with the given query assuming we are using + Delete all entries that match with the given query, assuming we are using `Nebulex.Adapters.Local` adapter: - iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [:"$1"]}] - iex> MyCache.all(query) - [3, 4, 5] - - ## Query - - Query spec is defined by the adapter, hence, it is recommended to review - adapters documentation. For instance, the built-in `Nebulex.Adapters.Local` - adapter supports `nil | :unexpired | :expired | :ets.match_spec()` as query - value. - - ## Examples - - Additional built-in queries for `Nebulex.Adapters.Local` adapter: - - iex> unexpired = MyCache.all(:unexpired) - iex> expired = MyCache.all(:expired) - - If we are using Nebulex.Adapters.Local adapter, the stored entry tuple - `{:entry, key, value, touched, ttl}`, then the match spec could be - something like: + iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}] + iex> {:ok, deleted_count} = MyCache.delete_all(query: query) - iex> spec = [ - ...> {{:entry, :"$1", :"$2", :_, :_}, - ...> [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]} - ...> ] - iex> MyCache.all(spec) - [{3, 6}, {4, 8}, {5, 10}] + See `c:get_all/2` for more query examples. + """ + @doc group: "Query API" + @callback delete_all(query_spec(), opts()) :: ok_error_tuple(non_neg_integer()) - The same previous query but using `Ex2ms`: + @doc """ + Same as `c:delete_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - iex> import Ex2ms - Ex2ms + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - iex> spec = - ...> fun do - ...> {_. key, value, _, _} when value > 5 -> {key, value} - ...> end + ## Examples - iex> MyCache.all(spec) - [{3, 6}, {4, 8}, {5, 10}] + iex> MyCache.delete_all(MyCache1, [], []) + {:ok, 0} """ - @callback all(query :: term, opts) :: [any] + @doc group: "Query API" + @callback delete_all(dynamic_cache(), query_spec(), opts()) :: ok_error_tuple(non_neg_integer()) @doc """ - Similar to `c:all/2` but returns a lazy enumerable that emits all entries - from the cache matching the given `query`. + Same as `c:delete_all/2` but raises an exception if an error occurs. - If `query` is `nil`, then all entries in cache match and are returned - when the stream is evaluated; based on the `:return` option. + ## Examples - May raise `Nebulex.QueryError` if query validation fails. + iex> MyCache.delete_all!() + 0 - ## Query values + """ + @doc group: "Query API" + @callback delete_all!(query_spec(), opts()) :: integer() - See `c:all/2` callback for more information about the query values. + @doc """ + Same as `c:delete_all!/3` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback delete_all!(dynamic_cache(), query_spec(), opts()) :: integer() - ## Options + @doc """ + Counts all entries matching the query specified by the given `query_spec`. - * `:return` - Tells the query what to return from the matched entries. - See the possible values in the "Query return option" section below. - The default depends on the adapter, for example, the default for the - built-in adapters is `:key`. This option is supported by the built-in - adapters, but it is recommended to see the adapter's documentation - to confirm its compatibility with this option. + See `c:get_all/2` for more information about the `query_spec`. - * `:page_size` - Positive integer (>= 1) that defines the page size - internally used by the adapter for paginating the results coming - back from the cache's backend. Defaults to `20`; it's unlikely - this will ever need changing. + This function returns: - See the configured adapter documentation for more runtime options. + * `{:ok, count}` - The cache executes the query successfully and + returns the `count` of the matched entries. - ## Query return option + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause. - The following are the possible values for the `:return` option: + May raise `Nebulex.QueryError` if query validation fails. - * `:key` - Returns a list only with the keys. - * `:value` - Returns a list only with the values. - * `:entry` - Returns a list of `t:Nebulex.Entry.t/0`. - * `{:key, :value}` - Returns a list of tuples in the form `{key, value}`. + ## Options - See adapters documentation to confirm what of these options are supported - and what other added. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples Populate the cache with some entries: - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2)) - - Stream all (with default params): + iex> Enum.each(1..5, &MyCache.put(&1, &1 * 2)) + :ok - iex> MyCache.stream() |> Enum.to_list() - [1, 2, 3, 4, 5] + Count all entries in cache (cache size): - Stream all entries and return values: + iex> MyCache.count_all() + {:ok, 5} - iex> nil |> MyCache.stream(return: :value, page_size: 3) |> Enum.to_list() - [2, 4, 6, 8, 10] + Count all entries that match with the given query, assuming we are using + `Nebulex.Adapters.Local` adapter: - Stream all entries and return them as key/value pairs: + iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}] + iex> {:ok, count} = MyCache.count_all(query: query) - iex> nil |> MyCache.stream(return: {:key, :value}) |> Enum.to_list() - [{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}] + See `c:get_all/2` for more query examples. + """ + @doc group: "Query API" + @callback count_all(query_spec(), opts()) :: ok_error_tuple(non_neg_integer()) - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + @doc """ + Same as `c:count_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - iex> unexpired_stream = MyCache.stream(:unexpired) - iex> expired_stream = MyCache.stream(:expired) + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - If we are using Nebulex.Adapters.Local adapter, the stored entry tuple - `{:entry, key, value, touched, ttl}`, then the match spec could be - something like: + ## Examples - iex> spec = [ - ...> {{:entry, :"$1", :"$2", :_, :_}, - ...> [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]} - ...> ] - iex> MyCache.stream(spec, page_size: 100) |> Enum.to_list() - [{3, 6}, {4, 8}, {5, 10}] + iex> MyCache.count_all(MyCache1, [], []) + {:ok, 0} - The same previous query but using `Ex2ms`: + """ + @doc group: "Query API" + @callback count_all(dynamic_cache(), query_spec(), opts()) :: ok_error_tuple(non_neg_integer()) - iex> import Ex2ms - Ex2ms + @doc """ + Same as `c:count_all/2` but raises an exception if an error occurs. - iex> spec = - ...> fun do - ...> {_, key, value, _, _} when value > 5 -> {key, value} - ...> end + ## Examples - iex> spec |> MyCache.stream(page_size: 100) |> Enum.to_list() - [{3, 6}, {4, 8}, {5, 10}] + iex> MyCache.count_all!() + 0 """ - @callback stream(query :: term, opts) :: Enum.t() + @doc group: "Query API" + @callback count_all!(query_spec(), opts()) :: non_neg_integer() @doc """ - Deletes all entries matching the given `query`. If `query` is `nil`, - then all entries in the cache are deleted. - - It returns the number of deleted entries. - - May raise `Nebulex.QueryError` if query validation fails. + Same as `c:count_all!/3` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback count_all!(dynamic_cache(), query_spec(), opts()) :: non_neg_integer() - See the configured adapter documentation for runtime options. + @doc """ + Similar to `c:get_all/2`, but returns a lazy enumerable that emits all entries + matching the query specified by the given `query_spec`. - ## Query values + See `c:get_all/2` for more information about the `query_spec`. - See `c:all/2` callback for more information about the query values. + This function returns: - ## Example + * `{:ok, stream}` - It returns a `stream` of values. - Populate the cache with some entries: + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause. - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2)) + May raise `Nebulex.QueryError` if query validation fails. - Delete all (with default params): + ## Options - iex> MyCache.delete_all() - 5 + * `:max_entries` - The number of entries to load from the cache + as we stream. Defaults to `100`. - Delete all entries that match with the given query assuming we are using - `Nebulex.Adapters.Local` adapter: + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. - iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}] - iex> MyCache.delete_all(query) + ## Examples - > For the local adapter you can use [Ex2ms](https://github.com/ericmj/ex2ms) - to build the match specs much easier. + Populate the cache with some entries: - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + iex> MyCache.put_all(a: 1, b: 2, c: 3) + :ok - iex> unexpired = MyCache.delete_all(:unexpired) - iex> expired = MyCache.delete_all(:expired) + Stream all (default args): - """ - @callback delete_all(query :: term, opts) :: integer + iex> {:ok, stream} = MyCache.stream() + iex> Enum.to_list(stream) + [a: 1, b: 2, c: 3] - @doc """ - Counts all entries in cache matching the given `query`. + Stream all entries returning only the keys (with :max_entries option): - It returns the count of the matched entries. + iex> {:ok, stream} = MyCache.stream([select: :key], max_entries: 2) + iex> Enum.to_list(stream) + [:a, :b, :c] - If `query` is `nil` (the default), then the total number of - cached entries is returned. + Stream all entries returning only the values: - May raise `Nebulex.QueryError` if query validation fails. + iex> {:ok, stream} = MyCache.stream(select: :value) + iex> Enum.to_list(stream) + [1, 2, 3] - ## Query values + Stream only the resquested keys (lazy bulk-fetch): - See `c:all/2` callback for more information about the query values. + iex> {:ok, stream} = MyCache.stream(in: [:a, :b, :d]) + iex> Enum.to_list(stream) + [a: 1, b: 2] + iex> {:ok, stream} = MyCache.stream(in: [:a, :b, :d], select: :key) + iex> Enum.to_list(stream) + [:a, :b] - ## Example + """ + @doc group: "Query API" + @callback stream(query_spec(), opts()) :: ok_error_tuple(Enum.t()) - Populate the cache with some entries: + @doc """ + Same as `c:stream/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2)) + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. - Count all entries in cache: + ## Examples - iex> MyCache.count_all() - 5 + iex> = MyCache.stream(MyCache1, nil, []) + {:ok, _stream} - Count all entries that match with the given query assuming we are using - `Nebulex.Adapters.Local` adapter: + """ + @doc group: "Query API" + @callback stream(dynamic_cache(), query_spec(), opts()) :: ok_error_tuple(Enum.t()) - iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}] - iex> MyCache.count_all(query) + @doc """ + Same as `c:stream/2` but raises an exception if an error occurs. - > For the local adapter you can use [Ex2ms](https://github.com/ericmj/ex2ms) - to build the match specs much easier. + ## Examples - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + iex> MyCache.put_all(a: 1, b: 2, c: 3) + :ok + iex> MyCache.stream!() |> Enum.to_list() + [a: 1, b: 2, c: 3] - iex> unexpired = MyCache.count_all(:unexpired) - iex> expired = MyCache.count_all(:expired) + """ + @doc group: "Query API" + @callback stream!(query_spec(), opts()) :: Enum.t() + @doc """ + Same as `c:stream!/3` but raises an exception if an error occurs. """ - @callback count_all(query :: term, opts) :: integer + @doc group: "Query API" + @callback stream!(dynamic_cache(), query_spec(), opts()) :: Enum.t() ## Nebulex.Adapter.Persistence - @optional_callbacks dump: 2, load: 2 + @optional_callbacks dump: 2, dump: 3, dump!: 2, dump!: 3, load: 2, load: 3, load!: 2, load!: 3 @doc """ Dumps a cache to the given file `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. ## Options - This operation relies entirely on the adapter implementation, which means the - options depend on each of them. For that reason, it is recommended to review - the documentation of the adapter to be used. The built-in adapters inherit - the default implementation from `Nebulex.Adapter.Persistence`, hence, review - the available options there. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples Populate the cache with some entries: iex> entries = for x <- 1..10, into: %{}, do: {x, x} - iex> MyCache.set_many(entries) + iex> MyCache.put_all(entries) :ok Dump cache to a file: @@ -1586,27 +2289,52 @@ defmodule Nebulex.Cache do :ok """ - @callback dump(path :: Path.t(), opts) :: :ok | {:error, term} + @doc group: "Persistence API" + @callback dump(path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:dump/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + MyCache.dump(MyCache1, "my_cache", []) + + """ + @doc group: "Persistence API" + @callback dump(dynamic_cache(), path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:dump/2` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback dump!(path :: Path.t(), opts()) :: :ok + + @doc """ + Same as `c:dump!/3` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback dump!(dynamic_cache(), path :: Path.t(), opts()) :: :ok @doc """ Loads a dumped cache from the given `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. ## Options - Similar to `c:dump/2`, this operation relies entirely on the adapter - implementation, therefore, it is recommended to review the documentation - of the adapter to be used. Similarly, the built-in adapters inherit the - default implementation from `Nebulex.Adapter.Persistence`, hence, review - the available options there. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples Populate the cache with some entries: iex> entries = for x <- 1..10, into: %{}, do: {x, x} - iex> MyCache.set_many(entries) + iex> MyCache.put_all(entries) :ok Dump cache to a file: @@ -1620,125 +2348,269 @@ defmodule Nebulex.Cache do :ok """ - @callback load(path :: Path.t(), opts) :: :ok | {:error, term} + @doc group: "Persistence API" + @callback load(path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:load/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + MyCache.load(MyCache1, "my_cache", []) + + """ + @doc group: "Persistence API" + @callback load(dynamic_cache(), path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:load/2` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback load!(path :: Path.t(), opts()) :: :ok + + @doc """ + Same as `c:load!/3` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback load!(dynamic_cache(), path :: Path.t(), opts()) :: :ok ## Nebulex.Adapter.Transaction - @optional_callbacks transaction: 2, in_transaction?: 0 + @optional_callbacks transaction: 2, transaction: 3, in_transaction?: 1, in_transaction?: 2 @doc """ Runs the given function inside a transaction. - A successful transaction returns the value returned by the function. + If an Elixir exception occurs, the exception will bubble up from the + transaction function. If the cache aborts the transaction, it returns + `{:error, reason}`. + + A successful transaction returns the value returned by the function wrapped + in a tuple as `{:ok, value}`. + + ### Nested transactions - See the configured adapter documentation for runtime options. + If `transaction/2` is called inside another transaction, the cache executes + the function without wrapping the new transaction call in any way. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples - MyCache.transaction fn -> + MyCache.transaction(fn -> alice = MyCache.get(:alice) bob = MyCache.get(:bob) MyCache.put(:alice, %{alice | balance: alice.balance + 100}) MyCache.put(:bob, %{bob | balance: bob.balance + 100}) - end + end) - Locking only the involved key (recommended): + We can provide the keys to lock when using the `Nebulex.Adapters.Local` + adapter: (recommended): + + MyCache.transaction( + fn -> + alice = MyCache.get(:alice) + bob = MyCache.get(:bob) + MyCache.put(:alice, %{alice | balance: alice.balance + 100}) + MyCache.put(:bob, %{bob | balance: bob.balance + 100}) + end, + [keys: [:alice, :bob]] + ) - MyCache.transaction [keys: [:alice, :bob]], fn -> - alice = MyCache.get(:alice) - bob = MyCache.get(:bob) - MyCache.put(:alice, %{alice | balance: alice.balance + 100}) - MyCache.put(:bob, %{bob | balance: bob.balance + 100}) - end + """ + @doc group: "Transaction API" + @callback transaction(fun(), opts()) :: ok_error_tuple(any()) + + @doc """ + Same as `c:transaction/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + MyCache.transaction( + MyCache1, + fn -> + alice = MyCache.get(:alice) + bob = MyCache.get(:bob) + MyCache.put(:alice, %{alice | balance: alice.balance + 100}) + MyCache.put(:bob, %{bob | balance: bob.balance + 100}) + end, + [keys: [:alice, :bob]] + ) """ - @callback transaction(opts, function :: fun) :: term + @doc group: "Transaction API" + @callback transaction(dynamic_cache(), fun(), opts()) :: ok_error_tuple(any()) @doc """ - Returns `true` if the current process is inside a transaction. + Returns `{:ok, true}` if the current process is inside a transaction; + otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples - MyCache.in_transaction? - #=> false + MyCache.in_transaction?() + #=> {:ok, false} MyCache.transaction(fn -> - MyCache.in_transaction? #=> true + MyCache.in_transaction? #=> {:ok, true} end) """ - @callback in_transaction?() :: boolean + @doc group: "Transaction API" + @callback in_transaction?(opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:in_transaction?/1`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. + + ## Examples + + MyCache.in_transaction?(MyCache1, []) + + """ + @doc group: "Transaction API" + @callback in_transaction?(dynamic_cache(), opts()) :: ok_error_tuple(boolean()) - ## Nebulex.Adapter.Stats + ## Nebulex.Adapter.Info - @optional_callbacks stats: 0, dispatch_stats: 1 + @optional_callbacks info: 2, info: 3, info!: 2, info!: 3 @doc """ - Returns `Nebulex.Stats.t()` with the current stats values. + Returns `{:ok, info}` where `info` contains the requested cache information, + as specified by the `spec`. - If the stats are disabled for the cache, then `nil` is returned. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - ## Example + The `spec` (information specification key) can be: + + * **The atom `:all`** - returns a map with all information items. + * **An atom** - returns the value for the requested information item. + * **A list of atoms** - returns a map only with the requested information + items. + + If the argument `spec` is omitted, all information items are returned; + same as if the `spec` was the atom `:all`. + + The adapters are free to add the information specification keys they want. + However, Nebulex suggests the adapters add the following keys: - iex> MyCache.stats() - %Nebulex.Stats{ - measurements: { + * `:server` - General information about the cache server (e.g., cache name, + adapter, PID, etc.). + * `:memory` - Memory consumption information (e.g., used memory, + allocated memory, etc.). + * `:stats` - Cache statistics (e.g., hits, misses, etc.). + + ## Examples + + The following examples assume the underlying adapter uses the implementation + provided by `Nebulex.Adapters.Common.Info`. + + iex> {:ok, info} = MyCache.info() + iex> info + %{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + }, + memory: %{ + total: 1_000_000, + used: 0 + }, + stats: %{ + deletions: 0, evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0 + } + } + + iex> {:ok, info} = MyCache.info(:server) + iex> info + %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + } + + iex> {:ok, info} = MyCache.info([:server, :stats]) + iex> info + %{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> }, - metadata: %{} + stats: %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } } """ - @callback stats() :: Nebulex.Stats.t() | nil + @doc group: "Info API" + @callback info(spec :: info_spec(), opts()) :: ok_error_tuple(info_data()) @doc """ - Emits a telemetry event when called with the current stats count. - - The telemetry `:measurements` map will include the same as - `Nebulex.Stats.t()`'s measurements. For example: - - * `:evictions` - Current **evictions** count. - * `:expirations` - Current **expirations** count. - * `:hits` - Current **hits** count. - * `:misses` - Current **misses** count. - * `:updates` - Current **updates** count. - * `:writes` - Current **writes** count. - - The telemetry `:metadata` map will include the same as `Nebulex.Stats.t()`'s - metadata by default. For example: - - * `:cache` - The cache module, or the name (if an explicit name has been - given to the cache). - - Additionally, you can add your own metadata fields by given the option - `:metadata`. - - ## Options + Same as `c:info/2`, but the command is executed on the cache + instance given at the first argument `dynamic_cache`. - * `:event_prefix` – The prefix of the telemetry event. - Defaults to `[:nebulex, :cache]`. - - * `:metadata` – A map with additional metadata fields. Defaults to `%{}`. + See the ["Dynamic caches"](#module-dynamic-caches) section in the + module documentation for more information. ## Examples - iex> MyCache.dispatch_stats() - :ok + MyCache.info(MyCache1, :all, []) - iex> MyCache.Stats.dispatch_stats( - ...> event_prefix: [:my_cache], - ...> metadata: %{tag: "tag1"} - ...> ) - :ok + """ + @doc group: "Info API" + @callback info(dynamic_cache(), spec :: info_spec(), opts()) :: ok_error_tuple(info_data()) - **NOTE:** Since `:telemetry` is an optional dependency, when it is not - defined, a default implementation is provided without any logic, just - returning `:ok`. + @doc """ + Same as `c:info/2` but raises an exception if an error occurs. + """ + @doc group: "Info API" + @callback info!(spec :: info_spec(), opts()) :: info_data() + + @doc """ + Same as `c:info/3` but raises an exception if an error occurs. """ - @callback dispatch_stats(opts) :: :ok + @doc group: "Info API" + @callback info!(dynamic_cache(), spec :: info_spec(), opts()) :: info_data() end diff --git a/lib/nebulex/cache/cluster.ex b/lib/nebulex/cache/cluster.ex deleted file mode 100644 index b7833472..00000000 --- a/lib/nebulex/cache/cluster.ex +++ /dev/null @@ -1,102 +0,0 @@ -defmodule Nebulex.Cache.Cluster do - # The module used by cache adapters for - # distributed caching functionality. - @moduledoc false - - @doc """ - Joins the node where the cache `name`'s supervisor process is running to the - `name`'s node group. - """ - @spec join(name :: atom) :: :ok - def join(name) do - pid = Process.whereis(name) || self() - - if pid in pg_members(name) do - :ok - else - :ok = pg_join(name, pid) - end - end - - @doc """ - Makes the node where the cache `name`'s supervisor process is running, leave - the `name`'s node group. - """ - @spec leave(name :: atom) :: :ok - def leave(name) do - pg_leave(name, Process.whereis(name) || self()) - end - - @doc """ - Returns the list of nodes joined to given `name`'s node group. - """ - @spec get_nodes(name :: atom) :: [node] - def get_nodes(name) do - name - |> pg_members() - |> Enum.map(&node/1) - |> :lists.usort() - end - - @doc """ - Selects one node based on the computation of the `key` slot. - """ - @spec get_node(name_or_nodes :: atom | [node], Nebulex.Cache.key(), keyslot :: module) :: node - def get_node(name_or_nodes, key, keyslot) - - def get_node(name, key, keyslot) when is_atom(name) do - name - |> get_nodes() - |> get_node(key, keyslot) - end - - def get_node(nodes, key, keyslot) when is_list(nodes) do - Enum.at(nodes, keyslot.hash_slot(key, length(nodes))) - end - - ## PG - - if Code.ensure_loaded?(:pg) do - defp pg_join(name, pid) do - :ok = :pg.join(__MODULE__, name, pid) - end - - defp pg_leave(name, pid) do - _ = :pg.leave(__MODULE__, name, pid) - :ok - end - - defp pg_members(name) do - :pg.get_members(__MODULE__, name) - end - else - # Inline common instructions - @compile {:inline, pg2_namespace: 1} - - defp pg_join(name, pid) do - name - |> ensure_namespace() - |> :pg2.join(pid) - end - - defp pg_leave(name, pid) do - name - |> ensure_namespace() - |> :pg2.leave(pid) - end - - defp pg_members(name) do - name - |> ensure_namespace() - |> :pg2.get_members() - end - - defp ensure_namespace(name) do - namespace = pg2_namespace(name) - :ok = :pg2.create(namespace) - namespace - end - - defp pg2_namespace(name), do: {:nbx, name} - end -end diff --git a/lib/nebulex/cache/entry.ex b/lib/nebulex/cache/entry.ex deleted file mode 100644 index 187baafc..00000000 --- a/lib/nebulex/cache/entry.ex +++ /dev/null @@ -1,246 +0,0 @@ -defmodule Nebulex.Cache.Entry do - @moduledoc false - - import Nebulex.Helpers - - alias Nebulex.{Adapter, Time} - - # Inline common instructions - @compile {:inline, get_ttl: 1} - - @doc """ - Implementation for `c:Nebulex.Cache.get/2`. - """ - def get(name, key, opts) do - Adapter.with_meta(name, & &1.get(&2, key, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.get!/2`. - """ - def get!(name, key, opts) do - if result = get(name, key, opts) do - result - else - raise KeyError, key: key, term: name - end - end - - @doc """ - Implementation for `c:Nebulex.Cache.get_all/2`. - """ - def get_all(_name, [], _opts), do: %{} - - def get_all(name, keys, opts) do - Adapter.with_meta(name, & &1.get_all(&2, keys, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.put/3`. - """ - def put(name, key, value, opts) do - true = do_put(name, key, value, :put, opts) - :ok - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_new/3`. - """ - def put_new(name, key, value, opts) do - do_put(name, key, value, :put_new, opts) - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_new!/3`. - """ - def put_new!(name, key, value, opts) do - with false <- put_new(name, key, value, opts) do - raise Nebulex.KeyAlreadyExistsError, cache: name, key: key - end - end - - @doc """ - Implementation for `c:Nebulex.Cache.replace/3`. - """ - def replace(name, key, value, opts) do - do_put(name, key, value, :replace, opts) - end - - @doc """ - Implementation for `c:Nebulex.Cache.replace!/3`. - """ - def replace!(name, key, value, opts) do - with false <- replace(name, key, value, opts) do - raise KeyError, key: key, term: name - end - end - - defp do_put(_name, _key, nil, _on_write, _opts), do: true - - defp do_put(name, key, value, on_write, opts) do - Adapter.with_meta(name, & &1.put(&2, key, value, get_ttl(opts), on_write, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_all/2`. - """ - def put_all(name, entries, opts) do - _ = do_put_all(name, entries, :put, opts) - :ok - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_new_all/2`. - """ - def put_new_all(name, entries, opts) do - do_put_all(name, entries, :put_new, opts) - end - - def do_put_all(_name, [], _on_write, _opts), do: true - def do_put_all(_name, entries, _on_write, _opts) when map_size(entries) == 0, do: true - - def do_put_all(name, entries, on_write, opts) do - Adapter.with_meta(name, & &1.put_all(&2, entries, get_ttl(opts), on_write, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.delete/2`. - """ - def delete(name, key, opts) do - Adapter.with_meta(name, & &1.delete(&2, key, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.take/2`. - """ - def take(_name, nil, _opts), do: nil - - def take(name, key, opts) do - Adapter.with_meta(name, & &1.take(&2, key, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.take!/2`. - """ - def take!(name, key, opts) do - if result = take(name, key, opts) do - result - else - raise KeyError, key: key, term: name - end - end - - @doc """ - Implementation for `c:Nebulex.Cache.has_key?/1`. - """ - def has_key?(name, key) do - Adapter.with_meta(name, & &1.has_key?(&2, key)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.get_and_update/3`. - """ - def get_and_update(name, key, fun, opts) when is_function(fun, 1) do - Adapter.with_meta(name, fn adapter, adapter_meta -> - current = adapter.get(adapter_meta, key, opts) - - case fun.(current) do - {get, nil} -> - {get, get} - - {get, update} -> - true = adapter.put(adapter_meta, key, update, get_ttl(opts), :put, opts) - {get, update} - - :pop when is_nil(current) -> - {nil, nil} - - :pop -> - :ok = adapter.delete(adapter_meta, key, opts) - {current, nil} - - other -> - raise ArgumentError, - "the given function must return a two-element tuple or :pop," <> - " got: #{inspect(other)}" - end - end) - end - - @doc """ - Implementation for `c:Nebulex.Cache.update/4`. - """ - def update(name, key, initial, fun, opts) do - Adapter.with_meta(name, fn adapter, adapter_meta -> - adapter_meta - |> adapter.get(key, opts) - |> case do - nil -> {initial, nil} - val -> {fun.(val), val} - end - |> case do - {nil, old} -> - # avoid storing nil values - old - - {new, _} -> - true = adapter.put(adapter_meta, key, new, get_ttl(opts), :put, opts) - new - end - end) - end - - @doc """ - Implementation for `c:Nebulex.Cache.incr/3`. - """ - def incr(name, key, amount, opts) when is_integer(amount) do - default = get_option(opts, :default, "an integer", &is_integer/1, 0) - Adapter.with_meta(name, & &1.update_counter(&2, key, amount, get_ttl(opts), default, opts)) - end - - def incr(_cache, _key, amount, _opts) do - raise ArgumentError, "expected amount to be an integer, got: #{inspect(amount)}" - end - - @doc """ - Implementation for `c:Nebulex.Cache.decr/3`. - """ - def decr(name, key, amount, opts) when is_integer(amount) do - incr(name, key, amount * -1, opts) - end - - def decr(_cache, _key, amount, _opts) do - raise ArgumentError, "expected amount to be an integer, got: #{inspect(amount)}" - end - - @doc """ - Implementation for `c:Nebulex.Cache.ttl/1`. - """ - def ttl(name, key) do - Adapter.with_meta(name, & &1.ttl(&2, key)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.expire/2`. - """ - def expire(name, key, ttl) do - ttl = - (Time.timeout?(ttl) && ttl) || - raise ArgumentError, "expected ttl to be a valid timeout, got: #{inspect(ttl)}" - - Adapter.with_meta(name, & &1.expire(&2, key, ttl)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.touch/1`. - """ - def touch(name, key) do - Adapter.with_meta(name, & &1.touch(&2, key)) - end - - ## Helpers - - defp get_ttl(opts) do - get_option(opts, :ttl, "a valid timeout", &Time.timeout?/1, :infinity) - end -end diff --git a/lib/nebulex/cache/impl.ex b/lib/nebulex/cache/impl.ex new file mode 100644 index 00000000..869c7c20 --- /dev/null +++ b/lib/nebulex/cache/impl.ex @@ -0,0 +1,33 @@ +defmodule Nebulex.Cache.Impl do + @moduledoc false + + @doc """ + Helper macro for defining the functions implementing the Cache API. + """ + defmacro defcacheapi(fun, to: target) do + {name, args} = Macro.decompose_call(fun) + all_args = defcacheapi_all_args(args) + + quote do + @impl true + def unquote(name)(unquote_splicing(args)) do + unquote(name)( + get_dynamic_cache(), + unquote_splicing(all_args) + ) + end + + @impl true + def unquote(name)(dynamic_cache, unquote_splicing(all_args)) do + unquote(target).unquote(name)(dynamic_cache, unquote_splicing(all_args)) + end + end + end + + defp defcacheapi_all_args(args) do + Enum.map(args, fn + {:\\, _, [arg, _]} -> arg + arg -> arg + end) + end +end diff --git a/lib/nebulex/cache/info.ex b/lib/nebulex/cache/info.ex new file mode 100644 index 00000000..fea74dee --- /dev/null +++ b/lib/nebulex/cache/info.ex @@ -0,0 +1,25 @@ +defmodule Nebulex.Cache.Info do + @moduledoc false + + import Nebulex.Adapter, only: [defcommandp: 2] + import Nebulex.Utils, only: [unwrap_or_raise: 1] + + ## API + + @doc """ + Implementation for `c:Nebulex.Cache.info/2`. + """ + def info(name, spec, opts) when is_atom(spec) or is_list(spec) do + do_info(name, spec, opts) + end + + @compile {:inline, do_info: 3} + defcommandp do_info(name, spec, opts), command: :info + + @doc """ + Implementation for `c:Nebulex.Cache.info!/2`. + """ + def info!(name, item, opts) do + unwrap_or_raise info(name, item, opts) + end +end diff --git a/lib/nebulex/cache/kv.ex b/lib/nebulex/cache/kv.ex new file mode 100644 index 00000000..00351d9b --- /dev/null +++ b/lib/nebulex/cache/kv.ex @@ -0,0 +1,341 @@ +defmodule Nebulex.Cache.KV do + @moduledoc false + + import Nebulex.Adapter + import Nebulex.Utils, only: [unwrap_or_raise: 1] + + alias Nebulex.Cache.Options + alias Nebulex.Time + + @doc """ + Implementation for `c:Nebulex.Cache.fetch/2`. + """ + defcommand fetch(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.fetch!/2`. + """ + def fetch!(name, key, opts) do + unwrap_or_raise fetch(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.get/3`. + """ + def get(name, key, default, opts) do + with_meta(name, &do_get(&1, key, default, opts)) + end + + defp do_get(adapter_meta, key, default, opts) do + with {:error, %Nebulex.KeyError{key: ^key}} <- run_command(adapter_meta, :fetch, [key, opts]) do + {:ok, default} + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.get!/3`. + """ + def get!(name, key, default, opts) do + unwrap_or_raise get(name, key, default, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.put/3`. + """ + def put(name, key, value, opts) do + with {:ok, _} <- do_put(name, key, value, :put, opts) do + :ok + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.put!/3`. + """ + def put!(name, key, value, opts) do + _ = unwrap_or_raise do_put(name, key, value, :put, opts) + + :ok + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new/3`. + """ + def put_new(name, key, value, opts) do + do_put(name, key, value, :put_new, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new!/3`. + """ + def put_new!(name, key, value, opts) do + unwrap_or_raise put_new(name, key, value, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.replace/3`. + """ + def replace(name, key, value, opts) do + do_put(name, key, value, :replace, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.replace!/3`. + """ + def replace!(name, key, value, opts) do + unwrap_or_raise replace(name, key, value, opts) + end + + defp do_put(name, key, value, on_write, opts) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + do_put(name, key, value, ttl, on_write, opts) + end + + @compile {:inline, do_put: 6} + defcommandp do_put(name, key, value, ttl, on_write, opts), command: :put + + @doc """ + Implementation for `c:Nebulex.Cache.put_all/2`. + """ + def put_all(name, entries, opts) do + with {:ok, _} <- do_put_all(name, entries, :put, opts) do + :ok + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_all!/2`. + """ + def put_all!(name, entries, opts) do + _ = unwrap_or_raise do_put_all(name, entries, :put, opts) + + :ok + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new_all/2`. + """ + def put_new_all(name, entries, opts) do + do_put_all(name, entries, :put_new, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new_all!/2`. + """ + def put_new_all!(name, entries, opts) do + unwrap_or_raise put_new_all(name, entries, opts) + end + + def do_put_all(_name, [], _on_write, _opts) do + {:ok, true} + end + + def do_put_all(_name, %{} = entries, _on_write, _opts) when map_size(entries) == 0 do + {:ok, true} + end + + def do_put_all(name, entries, on_write, opts) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + do_put_all(name, entries, ttl, on_write, opts) + end + + @compile {:inline, do_put_all: 5} + defcommandp do_put_all(name, entries, ttl, on_write, opts), command: :put_all + + @doc """ + Implementation for `c:Nebulex.Cache.delete/2`. + """ + defcommand delete(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.delete!/2`. + """ + def delete!(name, key, opts) do + unwrap_or_raise delete(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.take/2`. + """ + defcommand take(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.take!/2`. + """ + def take!(name, key, opts) do + unwrap_or_raise take(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.has_key?/1`. + """ + defcommand has_key?(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.incr/3`. + """ + def incr(name, key, amount, opts) + + def incr(name, key, amount, opts) when is_integer(amount) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + {default, opts} = Options.pop_and_validate_integer(opts, :default) + + do_incr(name, key, amount, ttl, default, opts) + end + + def incr(_name, _key, amount, _opts) do + raise ArgumentError, + "invalid value for amount argument: expected integer, got: #{inspect(amount)}" + end + + @compile {:inline, do_incr: 6} + defcommandp do_incr(name, key, amount, ttl, default, opts), command: :update_counter + + @doc """ + Implementation for `c:Nebulex.Cache.incr!/3`. + """ + def incr!(name, key, amount, opts) do + unwrap_or_raise incr(name, key, amount, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.decr/3`. + """ + def decr(name, key, amount, opts) + + def decr(name, key, amount, opts) when is_integer(amount) do + incr(name, key, amount * -1, opts) + end + + def decr(_cache, _key, amount, _opts) do + raise ArgumentError, + "invalid value for amount argument: expected integer, got: #{inspect(amount)}" + end + + @doc """ + Implementation for `c:Nebulex.Cache.decr!/3`. + """ + def decr!(name, key, amount, opts) do + unwrap_or_raise decr(name, key, amount, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.ttl/1`. + """ + defcommand ttl(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.ttl!/1`. + """ + def ttl!(name, key, opts) do + unwrap_or_raise ttl(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.expire/2`. + """ + def expire(name, key, ttl, opts) do + ttl = + (Time.timeout?(ttl) && ttl) || + raise ArgumentError, "expected ttl to be a valid timeout, got: #{inspect(ttl)}" + + do_expire(name, key, ttl, opts) + end + + @compile {:inline, do_expire: 4} + defcommandp do_expire(name, key, ttl, opts), command: :expire + + @doc """ + Implementation for `c:Nebulex.Cache.expire!/2`. + """ + def expire!(name, key, ttl, opts) do + unwrap_or_raise expire(name, key, ttl, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.touch/1`. + """ + defcommand touch(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.touch!/1`. + """ + def touch!(name, key, opts) do + unwrap_or_raise touch(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.get_and_update/3`. + """ + def get_and_update(name, key, fun, opts) when is_function(fun, 1) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + with_meta(name, fn adapter_meta -> + with {:ok, current} <- do_get(adapter_meta, key, nil, opts) do + {:ok, eval_get_and_update_function(current, adapter_meta, key, ttl, opts, fun)} + end + end) + end + + defp eval_get_and_update_function(current, adapter_meta, key, ttl, opts, fun) do + case fun.(current) do + {get, nil} -> + {get, get} + + {get, update} -> + {:ok, true} = run_command(adapter_meta, :put, [key, update, ttl, :put, opts]) + + {get, update} + + :pop when is_nil(current) -> + {nil, nil} + + :pop -> + :ok = run_command(adapter_meta, :delete, [key, opts]) + + {current, nil} + + other -> + raise ArgumentError, + "the given function must return a two-element tuple or :pop," <> + " got: #{inspect(other)}" + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.get_and_update!/3`. + """ + def get_and_update!(name, key, fun, opts) do + unwrap_or_raise get_and_update(name, key, fun, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.update/4`. + """ + def update(name, key, initial, fun, opts) when is_function(fun, 1) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + with_meta(name, fn adapter_meta -> + value = + case run_command(adapter_meta, :fetch, [key, opts]) do + {:ok, value} -> fun.(value) + {:error, %Nebulex.KeyError{key: ^key}} -> initial + {:error, _} = error -> throw({:return, error}) + end + + with {:ok, true} <- run_command(adapter_meta, :put, [key, value, ttl, :put, opts]) do + {:ok, value} + end + end) + catch + {:return, error} -> error + end + + @doc """ + Implementation for `c:Nebulex.Cache.update!/4`. + """ + def update!(name, key, initial, fun, opts) do + unwrap_or_raise update(name, key, initial, fun, opts) + end +end diff --git a/lib/nebulex/cache/options.ex b/lib/nebulex/cache/options.ex new file mode 100644 index 00000000..57c708fb --- /dev/null +++ b/lib/nebulex/cache/options.ex @@ -0,0 +1,292 @@ +defmodule Nebulex.Cache.Options do + @moduledoc false + + alias Nebulex.{Time, Utils} + + # Compilation time option definitions + compile_opts = [ + otp_app: [ + type: :atom, + required: true, + doc: """ + The OTP application the cache configuration is under. + """ + ], + adapter: [ + type: {:custom, __MODULE__, :__validate_behaviour__, [Nebulex.Adapter, "adapter"]}, + type_doc: "`t:module/0`", + required: true, + doc: """ + The cache adapter module. + """ + ], + default_dynamic_cache: [ + type: :atom, + required: false, + doc: """ + Default dynamic cache for executing cache commands. Set to the + defined cache module by default. For example, when you call + `MyApp.Cache.start_link/1`, it will start a cache with the name + `MyApp.Cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section + for more information. + """ + ] + ] + + # Start option definitions (runtime) + start_link_opts = [ + name: [ + type: {:custom, __MODULE__, :__validate_name__, []}, + type_doc: "`t:atom/0` | `{:via, reg_mod :: module(), via_name :: any()}`", + required: false, + doc: """ + The name of the supervisor process the cache is started under. + Set to the defined cache module by default. For example, when + you call `MyApp.Cache.start_link/1`, a cache named `MyApp.Cache` + is started. + """ + ], + telemetry: [ + type: :boolean, + required: false, + default: true, + doc: """ + A flag to determine whether to emit the Telemetry cache command events. + """ + ], + telemetry_prefix: [ + type: {:list, :atom}, + required: false, + default: [:nebulex, :cache], + doc: """ + Nebulex emits cache events using the [Telemetry](`:telemetry`) library. + See the ["Telemetry events"](#module-telemetry-events) section to see + which events are emitted by Nebulex out-of-box. + + Note that if you have multiple caches (or dynamic caches), since the + `:adapter_meta` property is available within the event metadata, you can + use the `:cache` or `:name` properties (or both) to distinguish between + caches. Alternatively, you can use different `:telemetry_prefix` values. + """ + ], + bypass_mode: [ + type: :boolean, + required: false, + default: false, + doc: """ + If `true`, the cache calls are skipped by overwriting the configured + adapter with `Nebulex.Adapters.Nil` when the cache starts. This option + is handy for tests if you want to disable or bypass the cache while + running the tests. + """ + ] + ] + + # Shared option definitions (runtime) + runtime_shared_opts = [ + timeout: [ + type: :timeout, + required: false, + default: :infinity, + doc: """ + The time in **milliseconds** to wait for a command to finish + (`:infinity` to wait indefinitely). + + > #### Timeout option {: .warning} + > + > Despite being a shared option accepted by almost all cache functions, + > it is up to the adapter to support it. + """ + ], + telemetry_event: [ + type: {:list, :atom}, + required: false, + doc: """ + The telemetry event name to dispatch the event under. Defaults to what + is configured in the `:telemetry_prefix` option. See the + ["Telemetry events"](#module-telemetry-events) section + for more information. + """ + ], + telemetry_metadata: [ + type: {:map, :any, :any}, + required: false, + default: %{}, + doc: """ + Extra metadata to add to the Telemetry cache command events. + These end up in the `:extra_metadata` metadata key of these events. + + See the ["Telemetry events"](#module-telemetry-events) section + for more information. + """ + ] + ] + + # Runtime common option definitions for write operations + runtime_common_write_opts = [ + ttl: [ + type: :timeout, + required: false, + default: :infinity, + doc: """ + The key's time-to-live (or expiry time) in **milliseconds**. + """ + ] + ] + + # Runtime option definitions for updating counter + update_counter_opts = [ + default: [ + type: :integer, + required: false, + default: 0, + doc: """ + If the key is not present in the cache, the default value is inserted as + the key's initial value before it is incremented. + """ + ] + ] + + # Compilation time options schema + @compile_opts_schema NimbleOptions.new!(compile_opts) + + # Start options schema + @start_link_opts_schema NimbleOptions.new!(start_link_opts) + + # Shared options schema + @runtime_shared_opts_schema NimbleOptions.new!(runtime_shared_opts) + + # Runtime common write operations schema + @runtime_common_write_opts_schema NimbleOptions.new!(runtime_common_write_opts) + + # Update counter options schema + @update_counter_opts_schema NimbleOptions.new!(runtime_common_write_opts ++ update_counter_opts) + + ## Docs API + + # coveralls-ignore-start + + @spec compile_options_docs() :: binary() + def compile_options_docs do + NimbleOptions.docs(@compile_opts_schema) + end + + @spec start_link_options_docs() :: binary() + def start_link_options_docs do + NimbleOptions.docs(@start_link_opts_schema) + end + + @spec runtime_shared_options_docs() :: binary() + def runtime_shared_options_docs do + NimbleOptions.docs(@runtime_shared_opts_schema) + end + + @spec runtime_common_write_options_docs() :: binary() + def runtime_common_write_options_docs do + NimbleOptions.docs(@runtime_common_write_opts_schema) + end + + @spec update_counter_options_docs() :: binary() + def update_counter_options_docs do + NimbleOptions.docs(@update_counter_opts_schema) + end + + # coveralls-ignore-stop + + ## Validation API + + @spec validate_compile_opts!(keyword()) :: keyword() + def validate_compile_opts!(opts) do + NimbleOptions.validate!(opts, @compile_opts_schema) + end + + @spec validate_start_opts!(keyword()) :: keyword() + def validate_start_opts!(opts) do + start_link_opts = + opts + |> Keyword.take(Keyword.keys(@start_link_opts_schema.schema)) + |> NimbleOptions.validate!(@start_link_opts_schema) + + Keyword.merge(opts, start_link_opts) + end + + @spec validate_runtime_shared_opts!(keyword()) :: keyword() + def validate_runtime_shared_opts!(opts) do + NimbleOptions.validate!(opts, @runtime_shared_opts_schema) + end + + @doc false + def __validate_name__(name) + + def __validate_name__(name) when is_atom(name) do + {:ok, name} + end + + def __validate_name__({:via, _reg_mod, _reg_name}) do + {:ok, nil} + end + + @doc false + def __validate_behaviour__(value, behaviour, msg) when is_atom(value) do + with {:module, module} <- Code.ensure_compiled(value), + behaviours = Utils.module_behaviours(module), + true <- behaviour in behaviours do + {:ok, module} + else + {:error, _} -> + msg = + "#{msg} #{inspect(value)} was not compiled, " <> + "ensure it is correct and it is included as a project dependency" + + {:error, msg} + + false -> + msg = + "expected the #{msg} module given to Nebulex.Cache " <> + "to list #{inspect(behaviour)} as a behaviour" + + {:error, msg} + end + end + + def __validate_behaviour__(value, _behaviour, _msg) do + {:error, "expected a module, got: #{inspect(value)}"} + end + + ## Extras + + @spec pop_and_validate_timeout(keyword(), any()) :: {timeout(), keyword()} + def pop_and_validate_timeout(opts, key) do + case Keyword.pop(opts, key) do + {nil, opts} -> + {:infinity, opts} + + {ttl, opts} -> + if not Time.timeout?(ttl) do + raise ArgumentError, + "invalid value for #{inspect(key)} option: expected " <> + "non-negative integer or :infinity, got: #{inspect(ttl)}" + end + + {ttl, opts} + end + end + + @spec pop_and_validate_integer(keyword(), any()) :: {integer(), keyword()} + def pop_and_validate_integer(opts, key) do + case Keyword.pop(opts, key) do + {nil, opts} -> + {0, opts} + + {val, opts} when is_integer(val) -> + {val, opts} + + {val, _opts} -> + raise ArgumentError, + "invalid value for #{inspect(key)} option: expected integer, " <> + "got: #{inspect(val)}" + end + end +end diff --git a/lib/nebulex/cache/persistence.ex b/lib/nebulex/cache/persistence.ex index 133cf988..3b6dd543 100644 --- a/lib/nebulex/cache/persistence.ex +++ b/lib/nebulex/cache/persistence.ex @@ -1,19 +1,30 @@ defmodule Nebulex.Cache.Persistence do @moduledoc false - alias Nebulex.Adapter + import Nebulex.Adapter, only: [defcommand: 1] + import Nebulex.Utils, only: [unwrap_or_raise: 1] @doc """ Implementation for `c:Nebulex.Cache.dump/2`. """ - def dump(name, path, opts) do - Adapter.with_meta(name, & &1.dump(&2, path, opts)) + defcommand dump(name, path, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.dump!/2`. + """ + def dump!(name, path, opts) do + unwrap_or_raise dump(name, path, opts) end @doc """ Implementation for `c:Nebulex.Cache.load/2`. """ - def load(name, path, opts) do - Adapter.with_meta(name, & &1.load(&2, path, opts)) + defcommand load(name, path, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.load!/2`. + """ + def load!(name, path, opts) do + unwrap_or_raise load(name, path, opts) end end diff --git a/lib/nebulex/cache/query_spec.ex b/lib/nebulex/cache/query_spec.ex new file mode 100644 index 00000000..09224055 --- /dev/null +++ b/lib/nebulex/cache/query_spec.ex @@ -0,0 +1,81 @@ +defmodule Nebulex.Cache.QuerySpec do + # A query specification is a `t:keyword/0` with a set of options defining + # the desired query. + @moduledoc false + + # Query-spec option definitions + query_spec = [ + in: [ + type: {:list, :any}, + required: false, + doc: """ + The list of keys to fetch. The value to return depends on the `:select` + option. The `:in` option is a predefined query meant to fetch multiple + keys simultaneously. + + If present, it overrides the `:query` option and instructs the underlying + adapter to match the entries associated with the set of keys requested. + For every key that does not hold a value or does not exist, it is ignored + and not added to the returned list. + """ + ], + query: [ + type: :any, + required: false, + default: nil, + doc: """ + The query specification to match entries in the cache. + + If present and set to `nil`, it matches all entries in the cache. The + `nil` is a predefined value all adapters must support. Other than that, + the value depends entirely on the adapter. The adapter is responsible + for defining the query or matching specification. For example, the + `Nebulex.Adapters.Local` adapter supports the + [**"ETS Match Spec"**](https://www.erlang.org/doc/man/ets#match_spec). + """ + ], + select: [ + type: {:in, [:key, :value, {:key, :value}, :entry]}, + required: false, + default: {:key, :value}, + doc: """ + Selects which fields to choose from the entry. + + The possible values are: + + * `{:key, :value}` - (Default) Selects the key and the value from + the entry. They are returned as a tuple `{key, value}`. + * `:key` - Selects the key from the entry. + * `:value` - Selects the value from the entry. + * `:entry` - Selects the whole entry with its fields (use it carefully). + The adapter defines the entry, the structure, and its fields. + Therefore, Nebulex recommends seeing the adapter's documentation to + understand the entry's structure or type along with its fields and to + verify if the select option is supported. + + """ + ] + ] + + # Query options schema + @query_spec_schema NimbleOptions.new!(query_spec) + + ## Docs API + + # coveralls-ignore-start + + @spec options_docs() :: binary() + def options_docs do + NimbleOptions.docs(@query_spec_schema) + end + + # coveralls-ignore-stop + + ## Validation API + + @compile {:inline, validate!: 1} + @spec validate!(keyword()) :: keyword() + def validate!(opts) do + NimbleOptions.validate!(opts, @query_spec_schema) + end +end diff --git a/lib/nebulex/cache/queryable.ex b/lib/nebulex/cache/queryable.ex index 154a9ecc..8bf963aa 100644 --- a/lib/nebulex/cache/queryable.ex +++ b/lib/nebulex/cache/queryable.ex @@ -1,36 +1,101 @@ defmodule Nebulex.Cache.Queryable do @moduledoc false - alias Nebulex.Adapter + import Nebulex.Adapter, only: [defcommandp: 1, defcommandp: 2] + import Nebulex.Utils, only: [unwrap_or_raise: 1] - @default_page_size 20 + alias Nebulex.Cache.QuerySpec + + # Default max entries to load from the cache as we stream + @default_max_entries 100 + + @doc """ + Implementation for `c:Nebulex.Cache.get_all/2`. + """ + def get_all(name, query_spec, opts) do + execute(name, query_meta(query_spec, :get_all), opts) + end @doc """ - Implementation for `c:Nebulex.Cache.all/2`. + Implementation for `c:Nebulex.Cache.get_all!/2`. """ - def all(name, query, opts) do - Adapter.with_meta(name, & &1.execute(&2, :all, query, opts)) + def get_all!(name, query_spec, opts) do + unwrap_or_raise get_all(name, query_spec, opts) end @doc """ Implementation for `c:Nebulex.Cache.count_all/2`. """ - def count_all(name, query, opts) do - Adapter.with_meta(name, & &1.execute(&2, :count_all, query, opts)) + def count_all(name, query_spec, opts) do + execute(name, query_meta(query_spec, :count_all), opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.count_all!/2`. + """ + def count_all!(name, query_spec, opts) do + unwrap_or_raise count_all(name, query_spec, opts) end @doc """ Implementation for `c:Nebulex.Cache.delete_all/2`. """ - def delete_all(name, query, opts) do - Adapter.with_meta(name, & &1.execute(&2, :delete_all, query, opts)) + def delete_all(name, query_spec, opts) do + execute(name, query_meta(query_spec, :delete_all), opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.delete_all!/2`. + """ + def delete_all!(name, query_spec, opts) do + unwrap_or_raise delete_all(name, query_spec, opts) end @doc """ Implementation for `c:Nebulex.Cache.stream/2`. """ - def stream(name, query, opts) do - opts = Keyword.put_new(opts, :page_size, @default_page_size) - Adapter.with_meta(name, & &1.stream(&2, query, opts)) + def stream(name, query_spec, opts) do + opts = Keyword.put_new(opts, :max_entries, @default_max_entries) + + with {:ok, stream} <- do_stream(name, query_meta(query_spec, :stream), opts) do + {:ok, Stream.flat_map(stream, & &1)} + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.stream!/2`. + """ + def stream!(name, query, opts) do + unwrap_or_raise stream(name, query, opts) + end + + ## Private functions + + # Inline common instructions + @compile {:inline, execute: 3, do_stream: 3} + + # Execute wrapper + defcommandp execute(name, query_meta, opts) + + # Stream wrapper + defcommandp do_stream(name, query_spec, opts), command: :stream + + # Helper for building the query + defp query_meta(query_spec, op) when is_list(query_spec) do + query_spec = QuerySpec.validate!(query_spec) + + select = Keyword.fetch!(query_spec, :select) + + query = + case Keyword.fetch(query_spec, :in) do + {:ok, keys} -> {:in, keys} + :error -> {:q, Keyword.fetch!(query_spec, :query)} + end + + %{op: op, select: select, query: query} + end + + defp query_meta(query_spec, _op) do + raise ArgumentError, "invalid query spec: expected a keyword list, got: #{inspect(query_spec)}" end end diff --git a/lib/nebulex/cache/registry.ex b/lib/nebulex/cache/registry.ex index 32a7869a..e80a394a 100644 --- a/lib/nebulex/cache/registry.ex +++ b/lib/nebulex/cache/registry.ex @@ -3,49 +3,76 @@ defmodule Nebulex.Cache.Registry do use GenServer + import Nebulex.Utils, only: [wrap_error: 2] + ## API - @spec start_link(Keyword.t()) :: GenServer.on_start() + @spec start_link(keyword) :: GenServer.on_start() def start_link(_opts) do GenServer.start_link(__MODULE__, :ok, name: __MODULE__) end - @spec register(pid, term) :: :ok - def register(pid, value) when is_pid(pid) do - GenServer.call(__MODULE__, {:register, pid, value}) + @spec register(pid, atom, term) :: :ok + def register(pid, name, value) when is_pid(pid) and is_atom(name) do + GenServer.call(__MODULE__, {:register, pid, name, value}) end - @spec lookup(atom | pid) :: term + @spec lookup(atom | pid) :: {:ok, term} | {:error, Nebulex.Error.t()} + def lookup(name_or_pid) + def lookup(name) when is_atom(name) do - name - |> GenServer.whereis() - |> Kernel.||(raise Nebulex.RegistryLookupError, name: name) - |> lookup() + if pid = GenServer.whereis(name) do + lookup(pid) + else + wrap_error Nebulex.Error, reason: :registry_lookup_error, cache: name + end end def lookup(pid) when is_pid(pid) do - {_ref, value} = :persistent_term.get({__MODULE__, pid}) - value + case :persistent_term.get({__MODULE__, pid}, nil) do + {_ref, _name, value} -> + {:ok, value} + + nil -> + wrap_error Nebulex.Error, reason: :registry_lookup_error, cache: pid + end + end + + @spec all_running() :: [atom | pid] + def all_running do + for {{__MODULE__, pid}, {_ref, name, _value}} <- :persistent_term.get() do + name || pid + end end ## GenServer Callbacks @impl true def init(:ok) do - {:ok, :ok} + {:ok, nil} end @impl true - def handle_call({:register, pid, value}, _from, state) do + def handle_call({:register, pid, name, value}, _from, state) do + # Monitor the process so that when it is down it can be removed ref = Process.monitor(pid) - :ok = :persistent_term.put({__MODULE__, pid}, {ref, value}) + + # Store the process data + :ok = :persistent_term.put({__MODULE__, pid}, {ref, name, value}) + + # Reply with success {:reply, :ok, state} end @impl true def handle_info({:DOWN, ref, _type, pid, _reason}, state) do - {^ref, _} = :persistent_term.get({__MODULE__, pid}) + # Check the process reference + {^ref, _, _} = :persistent_term.get({__MODULE__, pid}) + + # Remove the process data _ = :persistent_term.erase({__MODULE__, pid}) + + # Continue {:noreply, state} end end diff --git a/lib/nebulex/cache/stats.ex b/lib/nebulex/cache/stats.ex deleted file mode 100644 index 1a2b9611..00000000 --- a/lib/nebulex/cache/stats.ex +++ /dev/null @@ -1,39 +0,0 @@ -defmodule Nebulex.Cache.Stats do - @moduledoc false - - alias Nebulex.Adapter - - ## API - - @doc """ - Implementation for `c:Nebulex.Cache.stats/0`. - """ - def stats(name) do - Adapter.with_meta(name, & &1.stats(&2)) - end - - if Code.ensure_loaded?(:telemetry) do - @doc """ - Implementation for `c:Nebulex.Cache.dispatch_stats/1`. - """ - def dispatch_stats(name, opts \\ []) do - Adapter.with_meta(name, fn adapter, meta -> - with true <- is_list(meta.telemetry_prefix), - %Nebulex.Stats{} = info <- adapter.stats(meta) do - :telemetry.execute( - meta.telemetry_prefix ++ [:stats], - info.measurements, - Map.merge(info.metadata, opts[:metadata] || %{}) - ) - else - _ -> :ok - end - end) - end - else - @doc """ - Implementation for `c:Nebulex.Cache.dispatch_stats/1`. - """ - def dispatch_stats(_name, _opts \\ []), do: :ok - end -end diff --git a/lib/nebulex/cache/supervisor.ex b/lib/nebulex/cache/supervisor.ex index 352729ec..3aea3e31 100644 --- a/lib/nebulex/cache/supervisor.ex +++ b/lib/nebulex/cache/supervisor.ex @@ -1,30 +1,35 @@ defmodule Nebulex.Cache.Supervisor do @moduledoc false + use Supervisor - import Nebulex.Helpers + import Nebulex.Cache.Options + import Nebulex.Utils alias Nebulex.Telemetry @doc """ Starts the cache manager supervisor. """ + @spec start_link(module(), atom(), module(), keyword()) :: Supervisor.on_start() def start_link(cache, otp_app, adapter, opts) do - sup_opts = if name = Keyword.get(opts, :name, cache), do: [name: name], else: [] - Supervisor.start_link(__MODULE__, {cache, otp_app, adapter, opts}, sup_opts) + name = Keyword.get(opts, :name, cache) + sup_opts = if name, do: [name: name], else: [] + + Supervisor.start_link(__MODULE__, {name, cache, otp_app, adapter, opts}, sup_opts) end @doc """ Retrieves the runtime configuration. """ + @spec runtime_config(module(), atom(), keyword()) :: {:ok, keyword()} | :ignore def runtime_config(cache, otp_app, opts) do config = otp_app |> Application.get_env(cache, []) |> Keyword.merge(opts) |> Keyword.put(:otp_app, otp_app) - |> Keyword.put_new_lazy(:telemetry_prefix, fn -> telemetry_prefix(cache) end) - |> Keyword.update(:telemetry, true, &(is_boolean(&1) && &1)) + |> validate_start_opts!() cache_init(cache, config) end @@ -40,54 +45,75 @@ defmodule Nebulex.Cache.Supervisor do @doc """ Retrieves the compile time configuration. """ + @spec compile_config(keyword()) :: {atom(), module(), [module()], keyword()} def compile_config(opts) do - otp_app = opts[:otp_app] || raise ArgumentError, "expected otp_app: to be given as argument" - adapter = opts[:adapter] || raise ArgumentError, "expected adapter: to be given as argument" + # Validate options + opts = validate_compile_opts!(opts) - behaviours = module_behaviours(adapter, "adapter") + otp_app = Keyword.fetch!(opts, :otp_app) + adapter = Keyword.fetch!(opts, :adapter) + behaviours = module_behaviours(adapter) - unless Nebulex.Adapter in behaviours do - raise ArgumentError, - "expected :adapter option given to Nebulex.Cache to list Nebulex.Adapter as a behaviour" - end - - {otp_app, adapter, behaviours} + {otp_app, adapter, behaviours, opts} end ## Supervisor Callbacks @impl true - def init({cache, otp_app, adapter, opts}) do + def init({name, cache, otp_app, adapter, opts}) do + # Normalize name to atom, ignore via/global names + name = if is_atom(name), do: name, else: nil + case runtime_config(cache, otp_app, opts) do {:ok, opts} -> - Telemetry.execute( - [:nebulex, :cache, :init], - %{system_time: System.system_time()}, - %{cache: cache, opts: opts} - ) - + # Dispatch Telemetry event notifying the cache is started + :ok = + Telemetry.execute( + [:nebulex, :cache, :init], + %{system_time: System.system_time()}, + %{name: name, cache: cache, opts: opts} + ) + + # Check if bypass_mode is enabled to set the bypass adapter + adapter = maybe_set_bypass_adapter(adapter, opts) + + # Init the adapter {:ok, child, meta} = adapter.init([cache: cache] ++ opts) - meta = Map.put(meta, :cache, cache) - child_spec = wrap_child_spec(child, [adapter, meta]) + + # Add required keys to the metadata + meta = + Map.merge(meta, %{ + name: name, + cache: cache, + adapter: adapter, + telemetry: Keyword.fetch!(opts, :telemetry), + telemetry_prefix: Keyword.fetch!(opts, :telemetry_prefix), + bypass_mode: Keyword.fetch!(opts, :bypass_mode) + }) + + # Build child spec + child_spec = wrap_child_spec(child, [name, meta]) + + # Init the cache supervisor Supervisor.init([child_spec], strategy: :one_for_one, max_restarts: 0) - other -> - other + :ignore -> + :ignore end end ## Helpers @doc false - def start_child({mod, fun, args}, adapter, meta) do - case apply(mod, fun, args) do - {:ok, pid} -> - meta = Map.put(meta, :pid, pid) - :ok = Nebulex.Cache.Registry.register(self(), {adapter, meta}) - {:ok, pid} - - other -> - other + def start_child({mod, fun, args}, name, meta) do + with {:ok, pid} <- apply(mod, fun, args) do + # Add the PID to the metadata + meta = Map.put(meta, :pid, pid) + + # Register the started cache's pid + :ok = Nebulex.Cache.Registry.register(self(), name, meta) + + {:ok, pid} end end @@ -99,10 +125,10 @@ defmodule Nebulex.Cache.Supervisor do %{spec | start: {__MODULE__, :start_child, [start | args]}} end - # sobelow_skip ["DOS.StringToAtom"] - defp telemetry_prefix(cache) do - cache - |> Module.split() - |> Enum.map(&(&1 |> Macro.underscore() |> String.to_atom())) + defp maybe_set_bypass_adapter(adapter, opts) do + case Keyword.fetch!(opts, :bypass_mode) do + true -> Nebulex.Adapters.Nil + false -> adapter + end end end diff --git a/lib/nebulex/cache/transaction.ex b/lib/nebulex/cache/transaction.ex index c23fbd7d..863c95ee 100644 --- a/lib/nebulex/cache/transaction.ex +++ b/lib/nebulex/cache/transaction.ex @@ -1,19 +1,20 @@ defmodule Nebulex.Cache.Transaction do @moduledoc false - alias Nebulex.Adapter + import Nebulex.Adapter, only: [defcommand: 1, defcommandp: 2] @doc """ Implementation for `c:Nebulex.Cache.transaction/2`. """ - def transaction(name, fun, opts) do - Adapter.with_meta(name, & &1.transaction(&2, fun, opts)) + def transaction(name, fun, opts) when is_function(fun, 0) do + do_transaction(name, fun, opts) end + @compile {:inline, do_transaction: 3} + defcommandp do_transaction(name, fun, opts), command: :transaction + @doc """ - Implementation for `c:Nebulex.Cache.in_transaction?/0`. + Implementation for `c:Nebulex.Cache.in_transaction?/1`. """ - def in_transaction?(name) do - Adapter.with_meta(name, & &1.in_transaction?(&2)) - end + defcommand in_transaction?(name, opts) end diff --git a/lib/nebulex/caching.ex b/lib/nebulex/caching.ex index bd8aa3ca..e311b92e 100644 --- a/lib/nebulex/caching.ex +++ b/lib/nebulex/caching.ex @@ -14,31 +14,103 @@ if Code.ensure_loaded?(Decorator.Define) do without having to actually execute the function again. The caching logic is applied transparently without any interference to the invoker. - See **`Nebulex.Caching.Decorators`** for more information about - **"Declarative annotation-based caching"**. + Continue checking **`Nebulex.Caching.Decorators`** to learn more about the + caching decorators and their usage. + + ## Compilation time options + + The following are the available compilation time options when defining + the caching usage via `use Nebulex.Caching`: + + #{Nebulex.Caching.Options.caching_options_docs()} + + > #### `use Nebulex.Caching, opts` {: .info} + > + > These options apply to all decorated functions in a module, but each + > decorator declaration can overwrite them. They act as a global or default + > configuration for the decorators. For example, if the cache is the same + > for all decorated functions in a module, one can configure it globally + > like this: `use Nebulex.Caching, cache: MyCache`. Therefore, the decorator + > declaration doesn't require the `:cache` option. """ + alias Nebulex.Caching.{Decorators, Options} + @doc false - defmacro __using__(_opts) do - quote do + defmacro __using__(opts \\ []) do + quote bind_quoted: [opts: opts] do + # Validate options + opts = + opts + |> Macro.escape() + |> Options.validate_caching_opts!() + + # Set the __using__ macro options so they can be used in the decorators + :ok = Module.put_attribute(__MODULE__, :__caching_opts__, opts) + use Nebulex.Caching.Decorators + import Nebulex.Caching end end - alias Nebulex.Caching.Decorators + @doc """ + Creates a dynamic cache tuple form to use in the decorated function + (wrapper macro for `Nebulex.Caching.Decorators.dynamic_cache_spec/2`). + + The first argument, `cache`, specifies the defined cache module, + and the second argument, `name`, is the actual name of the cache. + + > #### Using `dynamic_cache` {: .info} + > + > This macro is automatically imported and then available when using + > `use Nebulex.Caching`. + + ## Example + + defmodule MyApp.Users do + use Nebulex.Caching + + @decorate cacheable(cache: dynamic_cache(MyApp.Cache, :users)) + def get_user(id) do + # your logic ... + end + end + + See the **"`:cache` option"** section in the `Nebulex.Caching.Decorators` + module documentation for more information. + """ + defmacro dynamic_cache(cache, name) do + quote do + Decorators.dynamic_cache_spec(unquote(cache), unquote(name)) + end + end @doc """ - A wrapper macro for `Nebulex.Caching.Decorators.build_keyref/2`. + Creates a reference tuple form to use in the decorated function + (wrapper macro for `Nebulex.Caching.Decorators.keyref_spec/3`). + + > #### Using `keyref` {: .info} + > + > This macro is automatically imported and then available when using + > `use Nebulex.Caching`. - This macro is imported automatically with `use Nebulex.Caching`, - which means you don't need to do any additional `alias` or `import`. + ## Options - See `cacheable/3` decorator for more information about its usage. + * `:cache` - The cache where the referenced key is stored. + + * `:ttl` - The TTL for the referenced key. If configured, it overrides + the TTL given in the decorator's option `:opts`. + + See `Nebulex.Caching.Decorators.cacheable/3` decorator + for more information. """ - defmacro keyref(cache \\ nil, key) do + defmacro keyref(key, opts \\ []) do + cache = Keyword.get(opts, :cache) + ttl = Keyword.get(opts, :ttl) + quote do - Decorators.build_keyref(unquote(cache), unquote(key)) + Decorators.keyref_spec(unquote(cache), unquote(key), unquote(ttl)) end end end diff --git a/lib/nebulex/caching/decorators.ex b/lib/nebulex/caching/decorators.ex index 9c45b084..fd336ae3 100644 --- a/lib/nebulex/caching/decorators.ex +++ b/lib/nebulex/caching/decorators.ex @@ -1,28 +1,32 @@ if Code.ensure_loaded?(Decorator.Define) do defmodule Nebulex.Caching.Decorators do @moduledoc """ - Declarative annotation-based caching via function - [decorators](https://github.com/arjan/decorator). + Declarative decorator-based caching, inspired by + [Spring Cache Abstraction][spring-cache]. + + > *[`decorator`][decorator-lib] library is used underneath.* + + [spring-cache]: https://docs.spring.io/spring/docs/3.2.x/spring-framework-reference/html/cache.html + [decorator-lib]: https://github.com/arjan/decorator For caching declaration, the abstraction provides three Elixir function decorators: `cacheable `, `cache_evict`, and `cache_put`, which allow functions to trigger cache population or cache eviction. - Let us take a closer look at each annotation. - - > Inspired by [Spring Cache Abstraction](https://docs.spring.io/spring/docs/3.2.x/spring-framework-reference/html/cache.html). + Let us take a closer look at each decorator. ## `cacheable` decorator - As the name implies, `cacheable` is used to demarcate functions that are - cacheable - that is, functions for whom the result is stored into the cache - so, on subsequent invocations (with the same arguments), the value in the - cache is returned without having to actually execute the function. In its - simplest form, the decorator/annotation declaration requires the name of - the cache associated with the annotated function: + As the name implies, `cacheable` is used to delimit functions that are + cacheable - that is, functions for whom the result is stored in the cache + so that on subsequent invocations (with the same arguments), the value is + returned from the cache without having to execute the function. In its + simplest form, the decorator declaration requires the cache associated with + the decorated function if the [default cache](#module-default-cache) is not + configured (see ["Cache configuration"](#module-cache-configuration)): @decorate cacheable(cache: Cache) - def get_account(id) do - # the logic for retrieving the account ... + def find_book(isbn) do + # the logic for retrieving the book ... end In the snippet above, the function `get_account/1` is associated with the @@ -30,301 +34,280 @@ if Code.ensure_loaded?(Decorator.Define) do to see whether the invocation has been already executed and does not have to be repeated. - ### Default Key Generation + See `cacheable/3` for more information. - Since caches are essentially key-value stores, each invocation of a cached - function needs to be translated into a suitable key for cache access. - Out of the box, the caching abstraction uses a simple key-generator - based on the following algorithm: + ## `cache_put` decorator - * If no params are given, return `0`. - * If only one param is given, return that param as key. - * If more than one param is given, return a key computed from the hashes - of all parameters (`:erlang.phash2(args)`). + For cases where the cache needs to be updated without interfering with the + function execution, one can use the `cache_put` decorator. That is, the + function will always be executed and its result placed into the cache + (according to the `cache_put` options). It supports the same options as + `cacheable` and should be used for cache population or update rather than + function flow optimization. - > **IMPORTANT:** Since Nebulex v2.1.0, the default key generation implements - the algorithm described above, breaking backward compatibility with older - versions. Therefore, you may need to change your code in case of using the - default key generation. + @decorate cache_put(cache: Cache) + def update_book(isbn) do + # the logic for retrieving the book and then updating it ... + end - The default key generator is provided by the cache via the callback - `c:Nebulex.Cache.__default_key_generator__/0` and it is applied only - if the option `key:` or `keys:` is not configured. Defaults to - `Nebulex.Caching.SimpleKeyGenerator`. You can change the default - key generator at compile time with the option `:default_key_generator`. - For example, one can define a cache with a default key generator as: + Note that using `cache_put` and `cacheable` decorators on the same function + is generally discouraged because they have different behaviors. While the + latter causes the function execution to be skipped by using the cache, the + former forces the execution in order to execute a cache update. This leads + to unexpected behavior and with the exception of specific corner-cases + (such as decorators having conditions that exclude them from each other), + such declarations should be avoided. - defmodule MyApp.Cache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local, - default_key_generator: __MODULE__ + See `cache_put/3` for more information. - @behaviour Nebulex.Caching.KeyGenerator + ## `cache_evict` decorator - @impl true - def generate(mod, fun, args), do: :erlang.phash2({mod, fun, args}) + The cache abstraction allows not just the population of a cache store but + also eviction. This process is useful for removing stale or unused data from + the cache. Opposed to `cacheable`, the decorator `cache_evict` demarcates + functions that perform cache eviction, which are functions that act as + triggers for removing data from the cache. Just like its sibling, + `cache_evict` requires specifying the cache that will be affected by the + action, allows to provide a key or a list of keys to be evicted, but in + addition, features an extra option `:all_entries` which indicates whether + a cache-wide eviction needs to be performed rather than just one or a few + entries (based on `:key` or `:keys` option): + + @decorate cache_evict(cache: Cache, all_entries: true) + def load_books(file_stream) do + # the logic for loading books ... end - The key generator module must implement the `Nebulex.Caching.KeyGenerator` - behaviour. + The option `:all_entries` comes in handy when an entire cache region needs + to be cleared out - rather than evicting each entry (which would take a + long time since it is inefficient), all the entries are removed in one + operation as shown above. - > **IMPORTANT:** There are some caveats to keep in mind when using - the key generator, therefore, it is highly recommended to review - `Nebulex.Caching.KeyGenerator` behaviour documentation before. + One can also indicate whether the eviction should occur after (the default) + or before the function executes through the `:before_invocation` attribute. + The former provides the same semantics as the rest of the decorators; once + the method completes successfully, an action (in this case, eviction) on the + cache is executed. If the function does not execute (as it might be cached) + or an exception is raised, the eviction does not occur. The latter + (`before_invocation: true`) causes the eviction to occur always before the + method is invoked. This is useful in cases where the eviction does not need + to be tied to the function execution outcome. - Also, you can provide a different key generator at any time - (overriding the default one) when using any caching annotation - through the option `:key_generator`. For example: + See `cache_evict/3` for more information. - # With a module implementing the key-generator behaviour - @decorate cache_put(cache: Cache, key_generator: CustomKeyGenerator) - def update_account(account) do - # the logic for updating the given entity ... - end + ## Shared Options - # With the shorthand tuple {module, args} - @decorate cache_put( - cache: Cache, - key_generator: {CustomKeyGenerator, [account.name]} - ) - def update_account2(account) do - # the logic for updating the given entity ... - end + All three cache decorators explained previously accept the following + options: - # With a MFA tuple - @decorate cache_put( - cache: Cache, - key_generator: {AnotherModule, :genkey, [account.id]} - ) - def update_account3(account) do - # the logic for updating the given entity ... - end + #{Nebulex.Caching.Options.shared_options_docs()} - > The `:key_generator` option is available for all caching annotations. + ## Cache configuration - ### Custom Key Generation Declaration + As documented in the options above, the `:cache` option configures the cache + for the decorated function (in the decorator declaration). However, there + are three possible values, such as documented in the `t:cache/0` type. + Let's go over these cache value alternatives in detail. - Since caching is generic, it is quite likely the target functions have - various signatures that cannot be simply mapped on top of the cache - structure. This tends to become obvious when the target function has - multiple arguments out of which only some are suitable for caching - (while the rest are used only by the function logic). For example: + ### Cache module - @decorate cacheable(cache: Cache) - def get_account(email, include_users?) do - # the logic for retrieving the account ... - end + The first cache value option is an existing cache module; this is the most + common value. For example: - At first glance, while the boolean argument influences the way the account - is found, it is no use for the cache. + @decorate cacheable(cache: MyApp.Cache) + def find_book(isbn) do + # the logic for retrieving the book ... + end - For such cases, the `cacheable` decorator allows the user to specify the - key explicitly based on the function attributes. + ### Dynamic cache - @decorate cacheable(cache: Cache, key: {Account, email}) - def get_account(email, include_users?) do - # the logic for retrieving the account ... - end + In case one is using a dynamic cache: - @decorate cacheable(cache: Cache, key: {Account, user.account_id}) - def get_user_account(%User{} = user) do - # the logic for retrieving the account ... + @decorate cacheable(cache: dynamic_cache(MyApp.Cache, :books)) + def find_book(isbn) do + # the logic for retrieving the book ... end - It is also possible passing options to the cache, like so: + > See ["Dynamic caches"][dynamic-caches] for more information. - @decorate cacheable( - cache: Cache, - key: {Account, email}, - opts: [ttl: 300_000] - ) - def get_account(email, include_users?) do - # the logic for retrieving the account ... - end + [dynamic-caches]: https://hexdocs.pm/nebulex/Nebulex.Cache.html#module-dynamic-caches - See the **"Shared Options"** section below. + ### Anonymous function - ### Functions with multiple clauses + Finally, it is also possible to configure an anonymous function to resolve + the cache value in runtime. The function receives the + [decorator context](`t:context/0`) as an argument and must return either + a cache module or a dynamic cache. - Since [decorator lib](https://github.com/arjan/decorator#functions-with-multiple-clauses) - is used, it is important to be aware of its recommendations, warns, - limitations, and so on. In this case, for functions with multiple clauses - the general advice is to create an empty function head, and call the - decorator on that head, like so: + @decorate cacheable(cache: &MyApp.Resolver.resolve_cache/1) + def find_book(isbn) do + # the logic for retrieving the book ... + end - @decorate cacheable(cache: Cache, key: email) - def get_account(email \\\\ nil) + Where `resolve_cache` function may look like this: - def get_account(nil), do: nil + defmodule MyApp.Resolver do + alias Nebulex.Caching.Decorators.Context - def get_account(email) do - # the logic for retrieving the account ... + def resolve_cache(%Context{} = context) do + # the logic for generating the cache value + end end - ## `cache_put` decorator + ## Default cache - For cases where the cache needs to be updated without interfering with the - function execution, one can use the `cache_put` decorator. That is, the - method will always be executed and its result placed into the cache - (according to the `cache_put` options). It supports the same options as - `cacheable`. + While option `:cache` is handy for specifying the decorated function's + cache, it may be cumbersome when there is a module with several decorated + functions, and all use the same cache. In that case, we must set the + `:cache` option with the same value in all the decorated functions. + Fortunately, the `:cache` option can be configured globally for all + decorated functions in a module when defining the caching usage via + `use Nebulex.Caching`. For example: - @decorate cache_put(cache: Cache, key: {Account, acct.email}) - def update_account(%Account{} = acct, attrs) do - # the logic for updating the account ... - end + defmodule MyApp.Books do + use Nebulex.Caching, cache: MyApp.Cache - Note that using `cache_put` and `cacheable` annotations on the same function - is generally discouraged because they have different behaviors. While the - latter causes the method execution to be skipped by using the cache, the - former forces the execution in order to execute a cache update. This leads - to unexpected behavior and with the exception of specific corner-cases - (such as decorators having conditions that exclude them from each other), - such declarations should be avoided. + @decorate cacheable() + def get_book(isbn) do + # the logic for retrieving a book ... + end - ## `cache_evict` decorator + @decorate cacheable(cache: MyApp.BestSellersCache) + def best_sellers do + # the logic for retrieving best seller books ... + end - The cache abstraction allows not just the population of a cache store but - also eviction. This process is useful for removing stale or unused data from - the cache. Opposed to `cacheable`, the decorator `cache_evict` demarcates - functions that perform cache eviction, which are functions that act as - triggers for removing data from the cache. The `cache_evict` decorator not - only allows a key to be specified, but also a set of keys. Besides, extra - options like`all_entries` which indicates whether a cache-wide eviction - needs to be performed rather than just an entry one (based on the key or - keys): - - @decorate cache_evict(cache: Cache, key: {Account, email}) - def delete_account_by_email(email) do - # the logic for deleting the account ... + ... end - @decorate cacheable( - cache: Cache, - keys: [{Account, acct.id}, {Account, acct.email}] - ) - def delete_account(%Account{} = acct) do - # the logic for deleting the account ... - end + In the snippet above, the function `get_book/1` is associated with the + cache `MyApp.Cache` by default since option `:cache` is not provided in + the decorator. In other words, when the `:cache` option is configured + globally (when defining the caching usage via `use Nebulex.Caching`), + it is not required in the decorator declaration. However, one can always + override the global or default cache in the decorator declaration by + providing the option `:cache`, as is shown in the `best_sellers/0` + function, which is associated with a different cache. - @decorate cacheable(cache: Cache, all_entries: true) - def delete_all_accounts do - # the logic for deleting all the accounts ... - end + To conclude, it is crucial to know the decorator must be associated with + a cache, either a global or default cache defined at the caching usage + definition (e.g., `use Nebulex.Caching, cache: MyCache`) or a specific + one configured in the decorator declaration. - The option `all_entries:` comes in handy when an entire cache region needs - to be cleared out - rather than evicting each entry (which would take a - long time since it is inefficient), all the entries are removed in one - operation as shown above. + ## Key Generation - ## Shared Options + Since caches are essentially key-value stores, each invocation of a cached + function needs to be translated into a suitable key for cache access. The + key can be generated using a default key generator (which is configurable) + or through decorator options `:key` or `:keys`. Let us take a closer look + at each approach: - All three cache annotations explained previously accept the following - options: + ### Default Key Generation - * `:cache` - Defines what cache to use (required). Raises `ArgumentError` - if the option is not present. It can be also a MFA tuple to resolve the - cache dynamically in runtime by calling it. See "The :cache option" - section below for more information. + Out of the box, the caching abstraction uses a simple key generator + strategy given by `Nebulex.Caching.SimpleKeyGenerator`, which is + based on the following algorithm: - * `:key` - Defines the cache access key (optional). It overrides the - `:key_generator` option. If this option is not present, a default - key is generated by the configured or default key generator. + * If no arguments are given, return `0`. + * If only one argument is given, return that param as key. + * If more than one argument is given, return a key computed + from the hash of all arguments (`:erlang.phash2(args)`). - * `:opts` - Defines the cache options that will be passed as argument - to the invoked cache function (optional). + To provide a different default key generator, one needs to implement the + `Nebulex.Caching.KeyGenerator` behaviour. Once configured via the + `:default_key_generator` option, the generator will be used for each + declaration that does not specify its own key generation strategy (see the + ["Custom Key Generation"](#module-custom-key-generation-declaration) + section down below). - * `:match` - Match function `t:match_fun/0`. This function is for matching - and deciding whether the code-block evaluation result (which is received - as an argument) is cached or not. The function should return: + The following example shows how to configure a custom default key generator: - * `true` - the code-block evaluation result is cached as it is - (the default). - * `{true, value}` - `value` is cached. This is useful to set what - exactly must be cached. - * `{true, value, opts}` - `value` is cached with the options given by - `opts`. This return allows us to set the value to be cached, as well - as the runtime options for storing it (e.g.: the `ttl`). - * `false` - Nothing is cached. + defmodule MyApp.Books do + use Nebulex.Caching, + cache: MyApp.Cache + default_key_generator: MyApp.Keygen - The default match function looks like this: + ... + end - ```elixir - fn - {:error, _} -> false - :error -> false - nil -> false - _ -> true + defmodule MyApp.Keygen do + @behaviour Nebulex.Caching.KeyGenerator + + @impl true + def generate(context) do + # your key generation logic ... + end end - ``` - - By default, if the code-block evaluation returns any of the following - terms/values `nil`, `:error`, `{:error, term}`, the default match - function returns `false` (the returned result is not cached), - otherwise, `true` is returned (the returned result is cached). - - * `:key_generator` - The custom key-generator to be used (optional). - If present, this option overrides the default key generator provided - by the cache, and it is applied only if the option `key:` or `keys:` - is not present. In other words, the option `key:` or `keys:` overrides - the `:key_generator` option. See "The `:key_generator` option" section - below for more information about the possible values. - - * `:on_error` - It may be one of `:raise` (the default) or `:nothing`. - The decorators/annotations call the cache under the hood, hence, - by default, any error or exception at executing a cache command - is propagated. When this option is set to `:nothing`, any error - or exception executing a cache command is ignored and the annotated - function is executed normally. - - ### The `:cache` option - - The cache option can be the de defined cache module or an MFA tuple to - resolve the cache dynamically in runtime. When it is an MFA tuple, the - MFA is invoked passing the calling module, function name, and arguments - by default, and the MFA arguments are passed as extra arguments. - For example: - - @decorate cacheable(cache: {MyApp.Cache, :cache, []}, key: var) - def some_function(var) do - # Some logic ... + + ### Custom Key Generation Declaration + + Since caching is generic, it is quite likely the target functions have + various signatures that cannot be simply mapped on top of the cache + structure. This tends to become obvious when the target function has + multiple arguments out of which only some are suitable for caching + (while the rest are used only by the function logic). For example: + + @decorate cacheable(cache: Cache) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... end - The annotated function above will call `MyApp.Cache.cache(mod, fun, args)` - to resolve the cache in runtime, where `mod` is the calling module, `fun` - the calling function name, and `args` the calling arguments. + At first glance, while the two `boolean` arguments influence the way the + book is found, they are not used for the cache. Furthermore, what if only + one of the two is important while the other is not? + + For such cases, the `cacheable` decorator allows the user to specify how + the key is generated through its `:key` attribute (the same applies to all + decorators). The developer can pick the arguments of interest (or their + nested properties), perform operations or even invoke arbitrary functions + without having to write any code or implement any interface. This is the + recommended approach over the default generator since functions tend to be + quite different in signatures as the code base grows; while the default + strategy might work for some functions, it rarely does for all functions. + + The following are some examples of generating keys: + + @decorate cacheable(cache: Cache, key: isbn) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... + end - Also, we can define the function passing some extra arguments, like so: + @decorate cacheable(cache: Cache, key: isbn.raw_number) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... + end - @decorate cacheable(cache: {MyApp.Cache, :cache, ["extra"]}, key: var) - def some_function(var) do - # Some logic ... + @decorate cacheable(cache: Cache, key: &{&1.function_name, hd(&1.args)}) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... end - In this case, the MFA will be invoked by adding the extra arguments, like: - `MyApp.Cache.cache(mod, fun, args, "extra")`. + In the last example, an anonymous function is used for generating the key. + It must be an anonymous function that expects the + [decorator's context](`t:context/0`) as an argument. - ### The `:key_generator` option + One can also provide options for the cache commands executed underneath, + like so: - The possible values for the `:key_generator` are: + @decorate cacheable(cache: Cache, key: isbn, opts: [ttl: 300_000]) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... + end - * A module implementing the `Nebulex.Caching.KeyGenerator` behaviour. + In that case, `opts: [ttl: 300_000]` specifies the TTL for the cached value. - * A MFA tuple `{module, function, args}` for a function to call to - generate the key before the cache is invoked. A shorthand value of - `{module, args}` is equivalent to - `{module, :generate, [calling_module, calling_function_name, args]}`. + See the ["Shared Options"](#module-shared-options) section + for more information. - ## Putting all together + ## Examples - Supposing we are using `Ecto` and we want to define some cacheable functions - within the context `MyApp.Accounts`: + Supposing an app uses Ecto, and there is a context for accessing books + `MyApp.Books`, we may decorate some functions as follows: - # The config + # The cache config config :my_app, MyApp.Cache, gc_interval: 86_400_000, #=> 1 day - backend: :shards + max_size: 1_000_000 #=> Max 1M books # The Cache defmodule MyApp.Cache do @@ -333,154 +316,336 @@ if Code.ensure_loaded?(Decorator.Define) do adapter: Nebulex.Adapters.Local end - # Some Ecto schema - defmodule MyApp.Accounts.User do + # Book schema + defmodule MyApp.Books.Book do use Ecto.Schema - schema "users" do - field(:username, :string) - field(:password, :string) - field(:role, :string) + schema "books" do + field(:isbn, :string) + field(:title, :string) + field(:author, :string) + # The rest of the fields omitted end - def changeset(user, attrs) do - user - |> cast(attrs, [:username, :password, :role]) - |> validate_required([:username, :password, :role]) + def changeset(book, attrs) do + book + |> cast(attrs, [:isbn, :title, :author]) + |> validate_required([:isbn, :title, :author]) end end - # Accounts context - defmodule MyApp.Accounts do - use Nebulex.Caching - - alias MyApp.Accounts.User - alias MyApp.{Cache, Repo} + # Books context + defmodule MyApp.Books do + use Nebulex.Caching, cache: MyApp.Cache - @ttl :timer.hours(1) + alias MyApp.Repo + alias MyApp.Books.Book - @decorate cacheable(cache: Cache, key: {User, id}, opts: [ttl: @ttl]) - def get_user!(id) do - Repo.get!(User, id) + @decorate cacheable(key: id) + def get_book(id) do + Repo.get(Book, id) end - @decorate cacheable( - cache: Cache, - key: {User, username}, - opts: [ttl: @ttl] - ) - def get_user_by_username(username) do - Repo.get_by(User, [username: username]) + @decorate cacheable(key: isbn) + def get_book_by_isbn(isbn) do + Repo.get_by(Book, [isbn: isbn]) end @decorate cache_put( - cache: Cache, - keys: [{User, usr.id}, {User, usr.username}], - match: &match_update/1 + keys: [book.id, book.isbn], + match: &__MODULE__.match_fun/1 ) - def update_user(%User{} = usr, attrs) do - usr - |> User.changeset(attrs) + def update_book(%Book{} = book, attrs) do + book + |> Book.changeset(attrs) |> Repo.update() end - defp match_update({:ok, usr}), do: {true, usr} - defp match_update({:error, _}), do: false + def match_fun({:ok, usr}), do: {true, usr} + def match_fun({:error, _}), do: false - @decorate cache_evict( - cache: Cache, - keys: [{User, usr.id}, {User, usr.username}] - ) - def delete_user(%User{} = usr) do - Repo.delete(usr) + @decorate cache_evict(keys: [book.id, book.isbn]) + def delete_book(%Book{} = book) do + Repo.delete(book) end - def create_user(attrs \\\\ %{}) do - %User{} - |> User.changeset(attrs) + def create_book(attrs \\\\ %{}) do + %Book{} + |> Book.changeset(attrs) |> Repo.insert() end end - See [Cache Usage Patters Guide](http://hexdocs.pm/nebulex/cache-usage-patterns.html). + ## Functions with multiple clauses + + Since [`decorator`](https://github.com/arjan/decorator#functions-with-multiple-clauses) + library is used, it is important to be aware of its recommendations, + caveats, limitations, and so on. For instance, for functions with multiple + clauses the general advice is to create an empty function head, and call + the decorator on that head, like so: + + @decorate cacheable(cache: Cache) + def get_user(id \\\\ nil) + + def get_user(nil), do: nil + + def get_user(id) do + # your logic ... + end + + However, the previous example works because we are not using the function + attributes for defining a custom key via the `:key` option. If we add + `key: id` for instance, we will get errors and/or warnings, since the + decorator is expecting the attribute `id` to be present, but it is not + in the first function clause. In other words, when we take this approach, + is like the decorator was applied to all function clauses separately. + To overcome this issue, the arguments used in the decorator must be + present in the function clauses, which could be achieved in different + ways. A simple way would be to decorate a wrapper function with the + arguments the decorator use and do the pattern-matching in a separate + function. + + @decorate cacheable(cache: Cache, key: id) + def get_user(id \\\\ nil) do + do_get_user(id) + end + + defp do_get_user(nil), do: nil + + defp do_get_user(id) do + # your logic ... + end + + Alternatively, you could decorate only the function clause needing the + caching. + + def get_user(nil), do: nil + + @decorate cacheable(cache: Cache, key: id) + def get_user(id) do + # your logic ... + end + + ## Further readings + + * [Cache Usage Patters Guide](http://hexdocs.pm/nebulex/cache-usage-patterns.html). + """ - use Decorator.Define, cacheable: 1, cache_evict: 1, cache_put: 1 + defmodule Context do + @moduledoc """ + Decorator context. + """ + + @typedoc """ + Decorator context type. + + The decorator context defines the following keys: + + * `:decorator` - Decorator's name. + * `:module` - The invoked module. + * `:function_name` - The invoked function name + * `:arity` - The arity of the invoked function. + * `:args` - The arguments that are given to the invoked function. + + ## Caveats about the `:args` + + The following are some caveats about the context's `:args` + to keep in mind: + + * Only arguments explicitly assigned to a variable will be included. + * Ignored or underscored arguments will be ignored. + * Pattern-matching expressions without a variable assignment will be + ignored. Therefore, if there is a pattern-matching and you want to + include its value, it has to be explicitly assigned to a variable. + + For example, suppose you have a module with a decorated function: + + defmodule MyApp.SomeModule do + use Nebulex.Caching + + alias MyApp.Cache + + @decorate cacheable(cache: Cache, key: &__MODULE__.key_generator/1) + def get_something(x, _y, _, {_, _}, [_, _], %{a: a}, %{} = z) do + # Function's logic + end + + def key_generator(context) do + # Key generation logic + end + end + + The generator will be invoked like so: + + key_generator(%Nebulex.Caching.Decorators.Context{ + decorator: :cacheable, + module: MyApp.SomeModule, + function_name: :get_something, + arity: 7, + args: [x, z] + }) + + As you may notice, only the arguments `x` and `z` are included in the + context args when calling the `key_generator/1` function. + """ + @type t() :: %__MODULE__{ + decorator: :cacheable | :cache_evict | :cache_put, + module: module(), + function_name: atom(), + arity: non_neg_integer(), + args: [any()] + } + + # Context struct + defstruct decorator: nil, module: nil, function_name: nil, arity: 0, args: [] + end + + # Decorator definitions + use Decorator.Define, + cacheable: 0, + cacheable: 1, + cache_evict: 0, + cache_evict: 1, + cache_put: 0, + cache_put: 1 - import Nebulex.Helpers + import Nebulex.Utils, only: [get_option: 5] import Record - ## Types + ## Records + + # Dynamic cache spec + defrecordp(:dynamic_cache, :"$nbx_dynamic_cache_spec", cache: nil, name: nil) # Key reference spec - defrecordp(:keyref, :"$nbx_cache_keyref", cache: nil, key: nil) + defrecordp(:keyref, :"$nbx_keyref_spec", cache: nil, key: nil, ttl: nil) + + ## Types + + @typedoc "Proxy type to the decorator context" + @type context() :: Context.t() + + @typedoc "Type spec for a dynamic cache definition" + @type dynamic_cache() :: record(:dynamic_cache, cache: module(), name: atom() | pid()) + + @typedoc "The type for the cache value" + @type cache_value() :: module() | dynamic_cache() + + @typedoc """ + The type for the `:cache` option value. + + When defining the `:cache` option on the decorated function, + the value can be: + + * The defined cache module. + * A dynamic cache spec created with the macro + [`dynamic_cache/2`](`Nebulex.Caching.dynamic_cache/2`). + * An anonymous function to call to resolve the cache value in runtime. + The function receives the decorator context as an argument and must + return either a cache module or a dynamic cache. + + """ + @type cache() :: cache_value() | (context() -> cache_value()) + + @typedoc """ + The type for the `:key` option value. + + When defining the `:key` option on the decorated function, + the value can be: + + * An anonymous function to call to generate the key in runtime. + The function receives the decorator context as an argument + and must return the key for caching. + * Any term. + + """ + @type key() :: (context() -> any()) | any() + + @typedoc "Type for on_error action" + @type on_error() :: :nothing | :raise + + @typedoc "Type for the match function return" + @type match_return() :: boolean() | {true, any()} | {true, any(), keyword()} + + @typedoc "Type for match function" + @type match() :: + (result :: any() -> match_return()) + | (result :: any(), context() -> match_return()) + + @typedoc "Type for a key reference spec" + @type keyref_spec() :: + record(:keyref, cache: Nebulex.Cache.t(), key: any(), ttl: timeout() | nil) + + @typedoc "Type for a key reference" + @type keyref() :: keyref_spec() | any() - @typedoc "Type spec for a key reference" - @type keyref :: record(:keyref, cache: Nebulex.Cache.t(), key: any) + @typedoc """ + Type spec for the option `:references`. - @typedoc "Type for :on_error option" - @type on_error_opt :: :raise | :nothing + When defining the `:references` option on the decorated function, + the value can be: - @typedoc "Match function type" - @type match_fun :: (any -> boolean | {true, any} | {true, any, Keyword.t()}) + * A reserved tuple that the type `t:keyref/0` defines. It must be created + using the macro [`keyref/2`](`Nebulex.Caching.keyref/2`). + * An anonymous function expects the result of the decorated function + evaluation as an argument. Alternatively, the decorator context can be + received as a second argument. It must return the referenced key, which + could be `t:keyref/0` or any term. + * `nil` means there are no key references (ignored). + * Any term. - @typedoc "Type spec for the option :references" - @type references :: (any -> any) | nil | any + See `cacheable/3` decorator for more information. + """ + @type references() :: + nil + | keyref() + | (result :: any() -> keyref() | any()) + | (result :: any(), context() -> keyref() | any()) - ## API + ## Decorator API @doc """ - Provides a way of annotating functions to be cached (cacheable aspect). + As the name implies, the `cacheable` decorator indicates storing in cache + the result of invoking a function. + + Each time a decorated function is invoked, the caching behavior will be + applied, checking whether the function has already been invoked for the + given arguments. A default algorithm uses the function arguments to compute + the key. Still, a custom key can be provided through the `:key` option, + or a custom key-generator implementation can replace the default one + (See ["Key Generation"](#module-key-generation) section in the module + documentation). - The returned value by the code block is cached if it doesn't exist already - in cache, otherwise, it is returned directly from cache and the code block - is not executed. + If no value is found in the cache for the computed key, the target function + will be invoked, and the returned value will be stored in the associated + cache. Note that what is cached can be handled with the `:match` option. ## Options - * `:references` - (Optional) (`t:references/0`) Indicates the key given - by the option `:key` references another key given by the option - `:references`. In other words, when it is present, this option tells - the `cacheable` decorator to store the function's block result under - the referenced key given by the option `:references`, and the referenced - key under the key given by the option `:key`. The value could be: - - * `nil` - (Default) It is ignored (no key references). - * `(term -> keyref | term)` - An anonymous function receiving the - result of the function's code block evaluation and must return the - referenced key. There is also a special type of return in case you - want to reference a key located in an external/different cache than - the one defined with the options `:key` or `:key_generator`. In this - scenario, you must return a special type `t:keyref/0`, which can be - build with the macro [`keyref/2`](`Nebulex.Caching.keyref/2`). - See the "External referenced keys" section below. - * `any` - It could be an explicit term or value, for example, a fixed - value or a function argument. - - See the "Referenced keys" section for more information. - - See the "Shared options" section at the module documentation. + #{Nebulex.Caching.Options.cacheable_options_docs()} + + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache @ttl :timer.hours(1) - @decorate cacheable(cache: Cache, key: id, opts: [ttl: @ttl]) + @decorate cacheable(key: id, opts: [ttl: @ttl]) def get_by_id(id) do # your logic (maybe the loader to retrieve the value from the SoR) end - @decorate cacheable(cache: Cache, key: email, references: & &1.id) + @decorate cacheable(key: email, references: & &1.id) def get_by_email(email) do # your logic (maybe the loader to retrieve the value from the SoR) end - @decorate cacheable(cache: Cache, key: clauses, match: &match_fun/1) + @decorate cacheable(key: clauses, match: &match_fun/1) def all(clauses) do # your logic (maybe the loader to retrieve the value from the SoR) end @@ -489,91 +654,84 @@ if Code.ensure_loaded?(Decorator.Define) do defp match_fun(_), do: true end - The **Read-through** pattern is supported by this decorator. The loader to - retrieve the value from the system-of-record (SoR) is your function's logic - and the rest is provided by the macro under-the-hood. + > #### **Read-through** pattern {: .info} + > + > This decorator supports the **Read-through** pattern. The loader to + > retrieve the value from the system of record (SoR) is your function's + > logic, and the macro under the hood provides the rest. ## Referenced keys - Referenced keys are particularly useful when you have multiple different - keys keeping the same value. For example, let's imagine we have an schema - `User` with more than one unique field, like `:id`, `:email`, and `:token`. - We may have a module with functions retrieving the user account by any of - those fields, like so: + Referenced keys are handy when multiple keys keep the same value. For + example, let's imagine we have a schema `User` with multiple unique fields, + like `:id`, `:email`, and `:token`. We may have a module with functions + retrieving the user account by any of those fields, like so: defmodule MyApp.UserAccounts do - use Nebulex.Caching + use Nebulex.Caching, cache: MyApp.Cache - alias MyApp.Cache - - @decorate cacheable(cache: Cache, key: id) + @decorate cacheable(key: id) def get_user_account(id) do # your logic ... end - @decorate cacheable(cache: Cache, key: email) + @decorate cacheable(key: email) def get_user_account_by_email(email) do # your logic ... end - @decorate cacheable(cache: Cache, key: token) + @decorate cacheable(key: token) def get_user_account_by_token(token) do # your logic ... end - @decorate cache_evict( - cache: Cache, - keys: [user.id, user.email, user.token] - ) - def update_user_account(user) do + @decorate cache_evict(keys: [user.id, user.email, user.token]) + def update_user_account(user, attrs) do # your logic ... end end - As you notice, all the three functions will end up storing the same user - record under a different key. This is not very efficient in terms of - memory space, is it? Besides, when the user record is updated, we have - to invalidate the previously cached entries, which means, we have to - specify in the `cache_evict` decorator all the different keys the user - account has ben cached under. + As you notice, all three functions will store the same user record under a + different key. It could be more efficient in terms of memory space. Besides, + when the user record is updated, we have to invalidate the previously cached + entries, which means we have to specify in the `cache_evict` decorator all + the different keys associated with the cached user account. - By means of the referenced keys, we can address it in a better and simpler - way. The module will look like this: + Using the referenced keys, we can address it better and more simply. + The module will look like this: defmodule MyApp.UserAccounts do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache - @decorate cacheable(cache: Cache, key: id) + @decorate cacheable(key: id) def get_user_account(id) do # your logic ... end - @decorate cacheable(cache: Cache, key: email, references: & &1.id) + @decorate cacheable(key: email, references: & &1.id) def get_user_account_by_email(email) do # your logic ... end - @decorate cacheable(cache: Cache, key: token, references: & &1.id) + @decorate cacheable(key: token, references: & &1.id) def get_user_account_by_token(token) do # your logic ... end - @decorate cache_evict(cache: Cache, key: user.id) - def update_user_account(user) do + @decorate cache_evict(key: user.id) + def update_user_account(user, attrs) do # your logic ... end end - With the option `:references` we are indicating to the `cacheable` decorator - to store the user id (`& &1.id` - assuming the function returns an user - record) under the key `email` and the key `token`, and the user record + With the option `:references`, we are indicating to the `cacheable` + decorator to store the user id (`& &1.id` - assuming the function returns a + user record) under the key `email` and the key `token`, and the user record itself under the user id, which is the referenced key. This time, instead of - storing the same object three times, it will be stored only once under the - user id, and the other entries will just keep a reference to it. When the - functions `get_user_account_by_email/1` or `get_user_account_by_token/1` - are executed, the decorator will automatically handle it; under-the-hood, + storing the same object three times, the decorator will cache it only once + under the user ID, and the other entries will keep a reference to it. When + the functions `get_user_account_by_email/1` or `get_user_account_by_token/1` + are executed, the decorator will automatically handle it; under the hood, it will fetch the referenced key given by `email` or `token` first, and then get the user record under the referenced key. @@ -584,6 +742,70 @@ if Code.ensure_loaded?(Decorator.Define) do doesn't evict the references automatically"_. See the ["CAVEATS"](#cacheable/3-caveats) section below. + ### The `match` function on references + + The `cacheable` decorator also evaluates the `:match` option's function on + cache key references to ensure consistency and correctness. Let's give an + example to understand what this is about. + + Using the previous _"user accounts"_ example, here is the first call to + fetch a user by email: + + iex> user = MyApp.UserAccounts.get_user_account_by_email("me@test.com") + #=> %MyApp.UserAccounts.User{id: 1, email: "me@test.com", ...} + + The user is now available in the cache for subsequent calls. Now, let's + suppose we update the user's email by calling: + + iex> MyApp.UserAccounts.update_user_account(user, %{ + ...> email: "updated@test.com", ... + ...> }) + #=> %MyApp.UserAccounts.User{id: 1, email: "updated@test.com", ...} + + The `update_user_account` function should have removed the user schema + associated with the `user.id` key (decorated with `cache_evict`) but not + its references. Therefore, if we call `get_user_account_by_email` again: + + iex> user = MyApp.UserAccounts.get_user_account_by_email("me@test.com") + #=> %MyApp.UserAccounts.User{id: 1, email: "updated@test.com", ...} + + And here, we have an inconsistency because we are requesting a user with + the email `"me@test.com"` and we got a user with a different email + `"updated@test.com"` (the updated one). How can we avoid this? The answer + is to leverage the match function to ensure consistency and correctness. + Let's provide a match function that helps us with it. + + @decorate cacheable( + key: email, + references: & &1.id, + match: &match(&1, email) + ) + def get_user_account_by_email(email) do + # your logic ... + end + + defp match(%{email: email}, email), do: true + defp match(_, _), do: false + + With the solution above, the `cacheable` decorator only caches the user's + value if the email matches the one in the arguments. Otherwise, nothing is + cached, and the decorator evaluates the function's block. Previously, the + decorator was caching the user regardless of the requested email value. + With this fix, if we try the previous call: + + iex> MyApp.UserAccounts.get_user_account_by_email("me@test.com") + #=> nil + + Since there is an email mismatch in the previous call, the decorator removes + the mismatch reference from the cache (eliminating the inconsistency) and + executes the function body, assuming it uses `MyApp.Repo.get_by/2`, `nil` + is returned because there is no such user in the database. + + > #### `:match` option {: .info} + > + > The `:match` option can and should be used when using references to allow + > the decorator to remove inconsistent cache key references automatically. + ### External referenced keys Previously, we saw how to work with referenced keys but on the same cache, @@ -647,11 +869,11 @@ if Code.ensure_loaded?(Decorator.Define) do The functions `get_user_account/1` and `update_user_account/2` use `RedisCache` to store the real value in Redis while `get_user_account_by_email/1` and `get_user_account_by_token/1` use - `LocalCache` to store the referenced keys. Then, with the option + `LocalCache` to store the cache key references. Then, with the option `references: &keyref(RedisCache, &1.id)` we are telling the `cacheable` decorator the referenced key given by `&1.id` is located in the cache `RedisCache`; underneath, the macro [`keyref/2`](`Nebulex.Caching.keyref/2`) - builds the special return type for the external cache reference. + builds the particular return type for the external cache reference. ### CAVEATS @@ -684,49 +906,42 @@ if Code.ensure_loaded?(Decorator.Define) do the ID, but not the email. """ - def cacheable(attrs, block, context) do + @doc group: "Decorator API" + def cacheable(attrs \\ [], block, context) do caching_action(:cacheable, attrs, block, context) end @doc """ - Provides a way of annotating functions to be evicted; but updating the - cached key instead of deleting it. + Decorator indicating that a function triggers a + [cache put](`c:Nebulex.Cache.put/3`) operation. - The content of the cache is updated without interfering with the function - execution. That is, the method would always be executed and the result - cached. - - The difference between `cacheable/3` and `cache_put/3` is that `cacheable/3` - will skip running the function if the key exists in the cache, whereas - `cache_put/3` will actually run the function and then put the result in - the cache. + In contrast to the `cacheable` decorator, this decorator does not cause the + decorated function to be skipped. Instead, it always causes the function + to be invoked and its result to be stored in the associated cache if the + condition given by the `:match` option matches accordingly. ## Options - * `:keys` - The set of cached keys to be updated with the returned value - on function completion. It overrides `:key` and `:key_generator` - options. + #{Nebulex.Caching.Options.cache_put_options_docs()} - See the "Shared options" section at the module documentation. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache @ttl :timer.hours(1) - @decorate cache_put(cache: Cache, key: id, opts: [ttl: @ttl]) + @decorate cache_put(key: id, opts: [ttl: @ttl]) def update!(id, attrs \\\\ %{}) do # your logic (maybe write data to the SoR) end @decorate cache_put( - cache: Cache, key: id, - match: &match_fun/1, + match: &__MODULE__.match_fun/1, opts: [ttl: @ttl] ) def update(id, attrs \\\\ %{}) do @@ -734,159 +949,223 @@ if Code.ensure_loaded?(Decorator.Define) do end @decorate cache_put( - cache: Cache, keys: [object.name, object.id], - match: &match_fun/1, + match: &__MODULE__.match_fun/1, opts: [ttl: @ttl] ) def update_object(object) do # your logic (maybe write data to the SoR) end - defp match_fun({:ok, updated}), do: {true, updated} - defp match_fun({:error, _}), do: false + def match_fun({:ok, updated}), do: {true, updated} + def match_fun({:error, _}), do: false end - The **Write-through** pattern is supported by this decorator. Your function - provides the logic to write data to the system-of-record (SoR) and the rest - is provided by the decorator under-the-hood. + > #### **Write-through** pattern {: .info} + > + > This decorator supports the **Write-through** pattern. Your function + > provides the logic to write data to the system of record (SoR), and the + > decorator under the hood provides the rest. """ - def cache_put(attrs, block, context) do + @doc group: "Decorator API" + def cache_put(attrs \\ [], block, context) do caching_action(:cache_put, attrs, block, context) end @doc """ - Provides a way of annotating functions to be evicted (eviction aspect). - - On function's completion, the given key or keys (depends on the `:key` and - `:keys` options) are deleted from the cache. + Decorator indicating that a function triggers a cache evict operation + (`delete` or `delete_all`). ## Options - * `:keys` - Defines the set of keys to be evicted from cache on function - completion. It overrides `:key` and `:key_generator` options. - - * `:all_entries` - Defines if all entries must be removed on function - completion. Defaults to `false`. - - * `:before_invocation` - Boolean to indicate whether the eviction should - occur after (the default) or before the function executes. The former - provides the same semantics as the rest of the annotations; once the - function completes successfully, an action (in this case eviction) - on the cache is executed. If the function does not execute (as it might - be cached) or an exception is raised, the eviction does not occur. - The latter (`before_invocation: true`) causes the eviction to occur - always, before the function is invoked; this is useful in cases where - the eviction does not need to be tied to the function outcome. + #{Nebulex.Caching.Options.cache_evict_options_docs()} - See the "Shared options" section at the module documentation. + See the ["Shared options"](#module-shared-options) section in the module + documentation for more options. ## Examples defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache - @decorate cache_evict(cache: Cache, key: id) + @decorate cache_evict(key: id) def delete(id) do # your logic (maybe write/delete data to the SoR) end - @decorate cache_evict(cache: Cache, keys: [object.name, object.id]) + @decorate cache_evict(keys: [object.name, object.id]) def delete_object(object) do # your logic (maybe write/delete data to the SoR) end - @decorate cache_evict(cache: Cache, all_entries: true) + @decorate cache_evict(all_entries: true) def delete_all do # your logic (maybe write/delete data to the SoR) end end - The **Write-through** pattern is supported by this decorator. Your function - provides the logic to write data to the system-of-record (SoR) and the rest - is provided by the decorator under-the-hood. But in contrast with `update` - decorator, when the data is written to the SoR, the key for that value is - deleted from cache instead of updated. + > #### **Write-through** pattern {: .info} + > + > This decorator supports the **Write-through** pattern. Your function + > provides the logic to write data to the system of record (SoR), and the + > decorator under the hood provides the rest. But in contrast with the + > `update` decorator, the data is deleted from the cache instead of updated. """ - def cache_evict(attrs, block, context) do + @doc group: "Decorator API" + def cache_evict(attrs \\ [], block, context) do caching_action(:cache_evict, attrs, block, context) end + ## Decorator helpers + @doc """ - A convenience function for building a cache key reference when using the - `cacheable` decorator. If you want to build an external reference, which is, - referencing a `key` stored in an external cache, you have to provide the - `cache` where the `key` is located to. The `cache` argument is optional, - and by default is `nil`, which means, the referenced `key` is in the same - cache provided via `:key` or `:key_generator` options (internal reference). - - **NOTE:** In case you need to build a reference, consider using the macro - `Nebulex.Caching.keyref/2` instead. + A helper function to create a reserved tuple for a dynamic cache. - See `cacheable/3` decorator for more information about external references. + The first argument, `cache`, specifies the defined cache module, + and the second argument, `name`, is the actual name of the cache. - ## Examples + When creating a dynamic cache tuple form, use the macro + `Nebulex.Caching.dynamic_cache/2` instead. + + ## Example + + defmodule MyApp.Books do + use Nebulex.Caching + + @decorate cacheable(cache: dynamic_cache(MyApp.Cache, :books)) + def find_book(isbn) do + # your logic ... + end + end - iex> Nebulex.Caching.Decorators.build_keyref("my-key") - {:"$nbx_cache_keyref", nil, "my-key"} - iex> Nebulex.Caching.Decorators.build_keyref(MyCache, "my-key") - {:"$nbx_cache_keyref", MyCache, "my-key"} + """ + @doc group: "Decorator Helpers" + @spec dynamic_cache_spec(module(), atom() | pid()) :: dynamic_cache() + def dynamic_cache_spec(cache, name) do + dynamic_cache(cache: cache, name: name) + end + + @doc """ + A helper function to create a reserved tuple for a reference. + + ## Arguments + + * `cache` - The cache where the referenced key is stored. If it is `nil`, + the referenced key is looked up in the same cache provided via the + `:cache` option. + * `key` - The referenced key. + * `ttl` - The TTL for the referenced key. If configured, it overrides the + TTL given in the decorator's option `:opts`. + When creating a reference tuple form, use the macro + `Nebulex.Caching.keyref/2` instead. + + See the ["Referenced keys"](#cacheable/3-referenced-keys) section in the + `cacheable` decorator for more information. """ - @spec build_keyref(Nebulex.Cache.t(), term) :: keyref() - def build_keyref(cache \\ nil, key) do - keyref(cache: cache, key: key) + @doc group: "Decorator Helpers" + @spec keyref_spec(cache() | nil, any(), timeout() | nil) :: keyref_spec() + def keyref_spec(cache, key, ttl) do + keyref(cache: cache, key: key, ttl: ttl) end - ## Private Functions + ## Private functions for decorators - defp caching_action(action, attrs, block, context) do - cache = attrs[:cache] || raise ArgumentError, "expected cache: to be given as argument" - opts_var = attrs[:opts] || [] - on_error_var = on_error_opt(attrs) - match_var = attrs[:match] || default_match_fun() + defp caching_action(decorator, attrs, block, context) do + # Get options defined via the __using__ macro + caching_opts = Module.get_attribute(context.module, :__caching_opts__, []) - args = - context.args - |> Enum.reduce([], &walk/2) - |> Enum.reverse() + # Resolve the cache to use + cache_var = get_cache(attrs, caching_opts) - cache_block = cache_block(cache, args, context) - keygen_block = keygen_block(attrs, args, context) - action_block = action_block(action, block, attrs, keygen_block) + # Get the options to be given to the cache commands + opts_var = attrs[:opts] || [] + + # Build decorator context + context = decorator_context(decorator, context) + + # Build key generation block + keygen_block = keygen_block(attrs, caching_opts) + + # Build the action block + action_block = + action_block( + decorator, + block, + attrs, + keygen_block, + on_error_opt(attrs, Keyword.fetch!(caching_opts, :on_error)), + attrs[:match] || default_match_fun() + ) quote do - cache = unquote(cache_block) + # Set common vars + cache = unquote(cache_var) opts = unquote(opts_var) - match = unquote(match_var) - on_error = unquote(on_error_var) - unquote(action_block) + # Set the decorator context + :ok = unquote(__MODULE__).put_decorator_context(unquote(context)) + + try do + # Execute the decorated function's code block + unquote(action_block) + after + # Reset decorator context + unquote(__MODULE__).del_decorator_context() + end end end + defp get_cache(attrs, caching_opts) do + attrs[:cache] + |> Kernel.||(caching_opts[:cache]) + |> Kernel.||( + raise ArgumentError, + "expected :cache option to be found within the decorator options " <> + "if it is not configured globally in the caching definition " <> + "(e.g., `use Nebulex.Caching, cache: MyCache`)" + ) + end + defp default_match_fun do quote do fn {:error, _} -> false :error -> false - nil -> false _ -> true end end end - defp walk({:\\, _, [ast, _]}, acc) do - walk(ast, acc) + defp decorator_context(decorator, context) do + # Sanitize context args + args = + context.args + |> Enum.reduce([], &sanitize_arg/2) + |> Enum.reverse() + + quote do + var!(ctx_args, __MODULE__) = unquote(args) + + %Context{ + decorator: unquote(decorator), + module: unquote(context.module), + function_name: unquote(context.name), + arity: unquote(context.arity), + args: var!(ctx_args, __MODULE__) + } + end + end + + defp sanitize_arg({:\\, _, [ast, _]}, acc) do + sanitize_arg(ast, acc) end - defp walk({:=, _, [_, ast]}, acc) do - walk(ast, acc) + defp sanitize_arg({:=, _, [_, ast]}, acc) do + sanitize_arg(ast, acc) end - defp walk({var, _meta, context} = ast, acc) when is_atom(var) and is_atom(context) do + defp sanitize_arg({var, _meta, context} = ast, acc) when is_atom(var) and is_atom(context) do if match?("_" <> _, "#{var}") or Macro.special_form?(var, 0) do acc else @@ -894,69 +1173,23 @@ if Code.ensure_loaded?(Decorator.Define) do end end - defp walk(_ast, acc) do + defp sanitize_arg(_ast, acc) do acc end - # MFA cache: `{module, function, args}` - defp cache_block({:{}, _, [mod, fun, cache_args]}, args, ctx) do - quote do - unquote(mod).unquote(fun)( - unquote(ctx.module), - unquote(ctx.name), - unquote(args), - unquote_splicing(cache_args) - ) - end - end - - # Module implementing the cache behaviour (default) - defp cache_block({_, _, _} = cache, _args, _ctx) do - quote(do: unquote(cache)) - end - - defp keygen_block(attrs, args, ctx) do - cond do - key = Keyword.get(attrs, :key) -> + defp keygen_block(attrs, caching_opts) do + case Keyword.fetch(attrs, :key) do + {:ok, key} -> quote(do: unquote(key)) - keygen = Keyword.get(attrs, :key_generator) -> - keygen_call(keygen, ctx, args) - - true -> - quote do - cache.__default_key_generator__().generate( - unquote(ctx.module), - unquote(ctx.name), - unquote(args) - ) - end - end - end - - # MFA key-generator: `{module, function, args}` - defp keygen_call({:{}, _, [mod, fun, keygen_args]}, _ctx, _args) do - quote do - unquote(mod).unquote(fun)(unquote_splicing(keygen_args)) - end - end - - # Key-generator tuple `{module, args}`, where the `module` implements - # the key-generator behaviour - defp keygen_call({{_, _, _} = mod, keygen_args}, ctx, _args) when is_list(keygen_args) do - quote do - unquote(mod).generate(unquote(ctx.module), unquote(ctx.name), unquote(keygen_args)) - end - end + :error -> + generator = Keyword.fetch!(caching_opts, :default_key_generator) - # Key-generator module implementing the behaviour - defp keygen_call({_, _, _} = keygen, ctx, args) do - quote do - unquote(keygen).generate(unquote(ctx.module), unquote(ctx.name), unquote(args)) + quote(do: &unquote(generator).generate/1) end end - defp action_block(:cacheable, block, attrs, keygen) do + defp action_block(:cacheable, block, attrs, keygen, on_error, match) do references = Keyword.get(attrs, :references) quote do @@ -965,216 +1198,328 @@ if Code.ensure_loaded?(Decorator.Define) do unquote(keygen), unquote(references), opts, - on_error, - match, + unquote(match), + unquote(on_error), fn -> unquote(block) end ) end end - defp action_block(:cache_put, block, attrs, keygen) do - keys = get_keys(attrs) - - key = - if is_list(keys) and length(keys) > 0, - do: {:"$keys", keys}, - else: keygen + defp action_block(:cache_put, block, attrs, keygen, on_error, match) do + key = get_key(attrs, keygen) quote do result = unquote(block) - unquote(__MODULE__).run_cmd( - unquote(__MODULE__), - :eval_match, - [result, match, cache, unquote(key), opts], - on_error, - result + unquote(__MODULE__).eval_cache_put( + cache, + unquote(key), + result, + opts, + unquote(on_error), + unquote(match) ) result end end - defp action_block(:cache_evict, block, attrs, keygen) do - before_invocation? = attrs[:before_invocation] || false - - eviction = eviction_block(attrs, keygen) - - if is_boolean(before_invocation?) && before_invocation? do - quote do - unquote(eviction) - unquote(block) - end - else - quote do - result = unquote(block) - - unquote(eviction) - - result - end - end - end - - defp eviction_block(attrs, keygen) do - keys = get_keys(attrs) - all_entries? = attrs[:all_entries] || false - - cond do - is_boolean(all_entries?) && all_entries? -> - quote(do: unquote(__MODULE__).run_cmd(cache, :delete_all, [], on_error, 0)) - - is_list(keys) and length(keys) > 0 -> - delete_keys_block(keys) - - true -> - quote(do: unquote(__MODULE__).run_cmd(cache, :delete, [unquote(keygen)], on_error, :ok)) - end - end + defp action_block(:cache_evict, block, attrs, keygen, on_error, _match) do + before_invocation? = get_boolean(attrs, :before_invocation) + all_entries? = get_boolean(attrs, :all_entries) + key = get_key(attrs, keygen) - defp delete_keys_block(keys) do quote do - Enum.each(unquote(keys), &unquote(__MODULE__).run_cmd(cache, :delete, [&1], on_error, :ok)) + unquote(__MODULE__).eval_cache_evict( + cache, + unquote(key), + unquote(before_invocation?), + unquote(all_entries?), + unquote(on_error), + fn -> unquote(block) end + ) end end - defp get_keys(attrs) do - get_option( - attrs, - :keys, - "a list with at least one element", - &((is_list(&1) and length(&1) > 0) or is_nil(&1)) - ) + defp get_key(attrs, default) do + with keys when is_list(keys) and length(keys) > 0 <- + get_option( + attrs, + :keys, + "a list with at least one element", + &((is_list(&1) and length(&1) > 0) or is_nil(&1)), + default + ) do + {:"$keys", keys} + end end - defp on_error_opt(attrs) do + defp on_error_opt(attrs, default) do get_option( attrs, :on_error, ":raise or :nothing", &(&1 in [:raise, :nothing]), - :raise + default ) end - ## Helpers + defp get_boolean(attrs, key) do + get_option(attrs, key, "a boolean", &Kernel.is_boolean/1, false) + end + + ## Internal API + + # Inline common instructions + @compile {:inline, put_decorator_context: 1, get_decorator_context: 0, del_decorator_context: 0} @doc """ - Convenience function for evaluating the `cacheable` decorator in runtime. + Convenience function to set the decorator's context + for the current process. - **NOTE:** For internal purposes only. + **NOTE:** Internal purposes only. """ - @spec eval_cacheable( - module, - term, - references, - Keyword.t(), - on_error_opt, - match_fun, - (-> term) - ) :: term - def eval_cacheable(cache, key, references, opts, on_error, match, block) - - def eval_cacheable(cache, key, nil, opts, on_error, match, block) do - with nil <- run_cmd(cache, :get, [key, opts], on_error) do - result = block.() - - run_cmd( - __MODULE__, - :eval_match, - [result, match, cache, key, opts], - on_error, - result - ) + @doc group: "Internal API" + @spec put_decorator_context(context()) :: :ok + def put_decorator_context(context) do + _ = Process.put({__MODULE__, :decorator_context}, context) - result - end + :ok end - def eval_cacheable(cache, key, references, opts, on_error, match, block) do - case run_cmd(cache, :get, [key, opts], on_error) do - nil -> - result = block.() - ref_key = eval_cacheable_ref(references, result) - - with true <- - run_cmd( - __MODULE__, - :eval_match, - [result, match, cache, ref_key, opts], - on_error, - result - ) do - :ok = cache_put(cache, key, ref_key, opts) - end + @doc """ + Convenience function to get the decorator's context + from the current process. - result + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec get_decorator_context() :: context() | nil + def get_decorator_context do + Process.get({__MODULE__, :decorator_context}) + end - keyref(cache: ref_cache, key: ref_key) -> - cache = ref_cache || cache + @doc """ + Convenience function to delete the decorator's context + from the current process. - with nil <- run_cmd(cache, :get, [ref_key, opts], on_error) do - result = block.() + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec del_decorator_context() :: :ok + def del_decorator_context do + _ = Process.delete({__MODULE__, :decorator_context}) - run_cmd( - __MODULE__, - :eval_match, - [result, match, cache, ref_key, opts], - on_error, - result - ) + :ok + end + + @doc """ + Convenience function for wrapping and/or encapsulating + the **cacheable** decorator logic. + + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec eval_cacheable(any(), any(), references(), keyword(), match(), on_error(), fun()) :: any() + def eval_cacheable(cache, key, references, opts, match, on_error, block_fun) do + context = get_decorator_context() + cache = eval_cache(cache, context) + key = eval_key(cache, key, context) + + do_eval_cacheable(cache, key, references, opts, match, on_error, block_fun) + end + + defp do_eval_cacheable(cache, key, nil, opts, match, on_error, block_fun) do + do_apply(cache, :fetch, [key, opts]) + |> handle_cacheable( + on_error, + block_fun, + &__MODULE__.eval_cache_put(cache, key, &1, opts, on_error, match) + ) + end + + defp do_eval_cacheable( + ref_cache, + ref_key, + {:"$nbx_parent_keyref", keyref(cache: cache, key: key)}, + opts, + match, + on_error, + block_fun + ) do + do_apply(ref_cache, :fetch, [ref_key, opts]) + |> handle_cacheable( + on_error, + block_fun, + fn value -> + with false <- do_eval_cache_put(ref_cache, ref_key, value, opts, on_error, match) do + # The match returned `false`, remove the reference's parent key + _ = do_apply(cache, :delete, [key]) + + false + end + end, + fn value -> + case eval_function(match, value) do + false -> + # Remove the reference's parent key + _ = do_apply(cache, :delete, [key]) - result + block_fun.() + + _else -> + value end + end + ) + end - val -> - val + defp do_eval_cacheable(cache, key, references, opts, match, on_error, block_fun) do + case do_apply(cache, :fetch, [key, opts]) do + {:ok, keyref(cache: ref_cache, key: ref_key)} -> + eval_cacheable( + ref_cache || cache, + ref_key, + {:"$nbx_parent_keyref", keyref(cache: cache, key: key)}, + opts, + match, + on_error, + block_fun + ) + + other -> + handle_cacheable(other, on_error, block_fun, fn result -> + reference = eval_cacheable_ref(references, result) + + with true <- eval_cache_put(cache, reference, result, opts, on_error, match) do + :ok = cache_put(cache, key, reference, opts) + end + end) end end defp eval_cacheable_ref(references, result) do - with ref_fun when is_function(ref_fun, 1) <- references do - ref_fun.(result) - end - |> case do + case eval_function(references, result) do keyref() = ref -> ref - ref_key -> keyref(key: ref_key) + referenced_key -> keyref(key: referenced_key) end end - @doc """ - Convenience function for evaluating the `:match` function in runtime. + # Handle fetch result + defp handle_cacheable(result, on_error, block_fn, key_err_fn, on_ok \\ nil) - **NOTE:** For internal purposes only. + defp handle_cacheable({:ok, value}, _on_error, _block_fn, _key_err_fn, nil) do + value + end + + defp handle_cacheable({:ok, value}, _on_error, _block_fn, _key_err_fn, on_ok) do + on_ok.(value) + end + + defp handle_cacheable({:error, %Nebulex.KeyError{}}, _on_error, block_fn, key_err_fn, _on_ok) do + block_fn.() + |> tap(key_err_fn) + end + + defp handle_cacheable({:error, _}, :nothing, block_fn, _key_err_fn, _on_ok) do + block_fn.() + end + + defp handle_cacheable({:error, reason}, :raise, _block_fn, _key_err_fn, _on_ok) do + raise reason + end - **NOTE:** Workaround to avoid dialyzer warnings when using declarative - annotation-based caching via decorators. + @doc """ + Convenience function for wrapping and/or encapsulating + the **cache_evict** decorator logic. + + **NOTE:** Internal purposes only. """ - @spec eval_match(term, match_fun, module, term, Keyword.t()) :: boolean - def eval_match(result, match, cache, key, opts) + @doc group: "Internal API" + @spec eval_cache_evict(any(), any(), boolean(), boolean(), on_error(), fun()) :: any() + def eval_cache_evict(cache, key, before_invocation?, all_entries?, on_error, block_fun) do + context = get_decorator_context() + cache = eval_cache(cache, context) + key = eval_key(cache, key, context) + + do_eval_cache_evict(cache, key, before_invocation?, all_entries?, on_error, block_fun) + end - def eval_match(result, match, cache, keyref(cache: nil, key: key), opts) do - eval_match(result, match, cache, key, opts) + defp do_eval_cache_evict(cache, key, true, all_entries?, on_error, block_fun) do + _ = do_evict(all_entries?, cache, key, on_error) + + block_fun.() + end + + defp do_eval_cache_evict(cache, key, false, all_entries?, on_error, block_fun) do + result = block_fun.() + + _ = do_evict(all_entries?, cache, key, on_error) + + result + end + + defp do_evict(true, cache, _key, on_error) do + run_cmd(cache, :delete_all, [], on_error) + end + + defp do_evict(false, cache, {:"$keys", keys}, on_error) do + run_cmd(cache, :delete_all, [[in: keys]], on_error) + end + + defp do_evict(false, cache, key, on_error) do + run_cmd(cache, :delete, [key, []], on_error) + end + + @doc """ + Convenience function for wrapping and/or encapsulating + the **cache_put** decorator logic. + + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec eval_cache_put(any(), any(), any(), keyword(), on_error(), match()) :: any() + def eval_cache_put(cache, key, value, opts, on_error, match) do + context = get_decorator_context() + cache = eval_cache(cache, context) + key = eval_key(cache, key, context) + + do_eval_cache_put(cache, key, value, opts, on_error, match) end - def eval_match(result, match, _cache, keyref(cache: ref_cache, key: key), opts) do - eval_match(result, match, ref_cache, key, opts) + defp do_eval_cache_put( + cache, + keyref(cache: ref_cache, key: ref_key, ttl: ttl), + value, + opts, + on_error, + match + ) do + opts = if ttl, do: Keyword.put(opts, :ttl, ttl), else: opts + + eval_cache_put(ref_cache || cache, ref_key, value, opts, on_error, match) end - def eval_match(result, match, cache, key, opts) do - case match.(result) do - {true, value} -> - :ok = cache_put(cache, key, value, opts) + defp do_eval_cache_put(cache, key, value, opts, on_error, match) do + case eval_function(match, value) do + {true, cache_value} -> + _ = run_cmd(__MODULE__, :cache_put, [cache, key, cache_value, opts], on_error) true - {true, value, match_opts} -> - :ok = cache_put(cache, key, value, Keyword.merge(opts, match_opts)) + {true, cache_value, new_opts} -> + _ = + run_cmd( + __MODULE__, + :cache_put, + [cache, key, cache_value, Keyword.merge(opts, new_opts)], + on_error + ) true true -> - :ok = cache_put(cache, key, result, opts) + _ = run_cmd(__MODULE__, :cache_put, [cache, key, value, opts], on_error) true @@ -1184,40 +1529,99 @@ if Code.ensure_loaded?(Decorator.Define) do end @doc """ - Convenience function for cache_put annotation. + Convenience function for the `cache_put` decorator. - **NOTE:** For internal purposes only. + **NOTE:** Internal purposes only. """ - @spec cache_put(module, {:"$keys", term} | term, term, Keyword.t()) :: :ok + @doc group: "Internal API" + @spec cache_put(cache_value(), {:"$keys", any()} | any(), any(), keyword()) :: :ok def cache_put(cache, key, value, opts) def cache_put(cache, {:"$keys", keys}, value, opts) do - entries = for k <- keys, do: {k, value} - - cache.put_all(entries, opts) + do_apply(cache, :put_all, [Enum.map(keys, &{&1, value}), opts]) end def cache_put(cache, key, value, opts) do - cache.put(key, value, opts) + do_apply(cache, :put, [key, value, opts]) end @doc """ - Convenience function for ignoring cache errors when `:on_error` option - is set to `:nothing` + Convenience function for evaluating the `cache` argument. - **NOTE:** For internal purposes only. + **NOTE:** Internal purposes only. """ - @spec run_cmd(module, atom, [term], on_error_opt, term) :: term - def run_cmd(mod, fun, args, on_error, default \\ nil) + @doc group: "Internal API" + @spec eval_cache(any(), context()) :: cache_value() + def eval_cache(cache, ctx) - def run_cmd(mod, fun, args, :raise, _default) do - apply(mod, fun, args) + def eval_cache(cache, _ctx) when is_atom(cache), do: cache + def eval_cache(dynamic_cache() = cache, _ctx), do: cache + def eval_cache(cache, ctx) when is_function(cache, 1), do: cache.(ctx) + def eval_cache(cache, _ctx), do: raise_invalid_cache(cache) + + @doc """ + Convenience function for evaluating the `key` argument. + + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec eval_key(any(), any(), context()) :: any() + def eval_key(cache, key, ctx) + + def eval_key(_cache, key, ctx) when is_function(key, 1) do + key.(ctx) + end + + def eval_key(_cache, key, _ctx) do + key + end + + @doc """ + Convenience function for running a cache command. + + **NOTE:** Internal purposes only. + """ + @spec run_cmd(module(), atom(), [any()], on_error()) :: any() + def run_cmd(cache, fun, args, on_error) + + def run_cmd(cache, fun, args, :nothing) do + do_apply(cache, fun, args) + end + + def run_cmd(cache, fun, args, :raise) do + with {:error, reason} <- do_apply(cache, fun, args) do + raise reason + end + end + + ## Private functions + + defp eval_function(fun, arg) when is_function(fun, 1) do + fun.(arg) + end + + defp eval_function(fun, arg) when is_function(fun, 2) do + fun.(arg, get_decorator_context()) end - def run_cmd(mod, fun, args, :nothing, default) do + defp eval_function(other, _arg) do + other + end + + defp do_apply(dynamic_cache(cache: cache, name: name), fun, args) do + apply(cache, fun, [name | args]) + end + + defp do_apply(mod, fun, args) do apply(mod, fun, args) - rescue - _e -> default + end + + @compile {:inline, raise_invalid_cache: 1} + @spec raise_invalid_cache(any()) :: no_return() + defp raise_invalid_cache(cache) do + raise ArgumentError, + "invalid value for :cache option: expected " <> + "t:Nebulex.Caching.Decorators.cache/0, got: #{inspect(cache)}" end end end diff --git a/lib/nebulex/caching/key_generator.ex b/lib/nebulex/caching/key_generator.ex index e2a7f556..8603d69d 100644 --- a/lib/nebulex/caching/key_generator.ex +++ b/lib/nebulex/caching/key_generator.ex @@ -1,52 +1,15 @@ defmodule Nebulex.Caching.KeyGenerator do @moduledoc """ - Cache key generator. Used for creating a key based on the given module, - function name and its arguments (the module and function name are used - as context). + Cache key generator. See the default implementation `Nebulex.Caching.SimpleKeyGenerator`. - - ## Caveats when using the key generator - - Since the callback `c:generate/3` is invoked passing the calling module where - the annotated function is defined, the name of the annotated function, and the - arguments given to that annotated function, there are some caveats to keep in - mind: - - * Only arguments explicitly assigned to a variable will be included when - calling the callback `c:generate/3`. - * Ignored or underscored arguments will be ignored. - * Pattern-matching expressions without a variable assignment will be - ignored. If there is a pattern-matching, it has to be explicitly - assigned to a variable so it can be included when calling the - callback `c:generate/3`. - - For example, suppose you have a module with an annotated function: - - defmodule MyApp.SomeModule do - use Nebulex.Caching - - alias MyApp.{Cache, CustomKeyGenerator} - - @decorate cacheable(cache: Cache, key_generator: CustomKeyGenerator) - def get_something(x, _ignored, _, {_, _}, [_, _], %{a: a}, %{} = y) do - # Function's logic - end - end - - The generator will be invoked like so: - - MyKeyGenerator.generate(MyApp.SomeModule, :get_something, [x, y]) - - Based on the caveats described above, only the arguments `x` and `y` are - included when calling the callback `c:generate/3`. """ @typedoc "Key generator type" - @type t :: module + @type t() :: module() @doc """ - Generates a key for the given `module`, `function_name`, and its `args`. + Receives the decorator `context` as an argument and returns the generated key. """ - @callback generate(module, function_name :: atom, args :: [term]) :: term + @callback generate(Nebulex.Caching.Decorators.Context.t()) :: any() end diff --git a/lib/nebulex/caching/options.ex b/lib/nebulex/caching/options.ex new file mode 100644 index 00000000..8358efd1 --- /dev/null +++ b/lib/nebulex/caching/options.ex @@ -0,0 +1,257 @@ +defmodule Nebulex.Caching.Options do + @moduledoc false + + # Options given to the __using__ macro + caching_opts = [ + cache: [ + type: :atom, + required: false, + doc: """ + Defines the cache all decorated functions in the module will use + by default. It can be overridden on each decorated function since + the `:cache` option is also available at the decorator level + (see ["Shared Options"](#module-shared-options)). + + See ["Default cache"](#module-default-cache) section + for more information. + """ + ], + on_error: [ + type: {:in, [:nothing, :raise]}, + type_doc: "`t:on_error/0`", + required: false, + default: :nothing, + doc: """ + Same as `:on_error` in the ["Shared Options"](#module-shared-options), + but applies to all decorated functions in a module as default. + """ + ], + default_key_generator: [ + type: + {:custom, Nebulex.Cache.Options, :__validate_behaviour__, + [Nebulex.Caching.KeyGenerator, "key-generator"]}, + type_doc: "`t:module/0`", + required: false, + default: Nebulex.Caching.SimpleKeyGenerator, + doc: """ + The default key-generator module the caching decorators will use. + """ + ] + ] + + # Shared decorator options + shared_opts = [ + cache: [ + type: :any, + type_doc: "`t:cache/0`", + required: true, + doc: """ + The cache to use (see `t:cache/0` for possible values). If configured, + it overrides the [default or global cache](#module-default-cache). + The decorator uses the given `cache`. If configured, it overrides the + [default or global cache](#module-default-cache). See `t:cache/0` for + possible values. + + Raises an exception if the `:cache` option is not provided in the + decorator declaration and is not configured when defining the + caching usage via `use Nebulex.Caching` either. + + See ["Cache configuration"](#module-cache-configuration) section + for more information. + """ + ], + key: [ + type: :any, + type_doc: "`t:key/0`", + required: false, + doc: """ + The cache access key the decorator will use when running the decorated + function. The default key generator generates a default key when the + option is unavailable. + + See ["Key Generation"](#module-key-generation) section + for more information. + """ + ], + opts: [ + type: :keyword_list, + required: false, + default: [], + doc: """ + The options used by the decorator when invoking cache commands. + """ + ], + match: [ + type: {:or, [fun: 1, fun: 2]}, + type_doc: "`t:match/0`", + required: false, + doc: """ + Anonymous function to decide whether or not the result (provided as a + first argument) of evaluating the decorated function is cached. + Optionally, the match function can receive the decorator context as a + second argument. The match function can return: + + * `true` - The value returned by the decorated function invocation is + cached. (the default). + * `{true, value}` - `value` is cached. It is helpful to customize what + exactly must be cached. + * `{true, value, opts}` - The `value` is cached with the provided + options `opts`. It is helpful to customize what must be cached and the + runtime options for storing it. (e.g., `{true, value, [ttl: @ttl]}`). + * `false` - Cache nothing. + + The default match function looks like this: + + ```elixir + fn + {:error, _} -> false + :error -> false + _ -> true + end + ``` + + By default, if the evaluation of the decorated function returns any of the + following terms/values `:error` or `{:error, term}`, the default match + function returns `false` (cache nothing). Otherwise, `true` is returned + (the value is cached). Remember that the default match function may store + a `nil` value if the decorated function returns it. If you don't want to + cache `nil` values or, in general, desire a different behavior, you should + provide another match function to meet your requirements. + """ + ], + on_error: [ + type: {:in, [:nothing, :raise]}, + type_doc: "`t:on_error/0`", + required: false, + default: :nothing, + doc: """ + The decorators perform cache commands under the hood. With the option + `:on_error`, we can tell the decorator what to do in case of an error + or exception. The option supports the following values: + + * `:nothing` - ignores the error. + * `:raise` - raises if there is an error. + + If configured, it overrides the global or default value + (e.g., `use Nebulex.Caching, on_error: ...`). + """ + ] + ] + + # cacheable options + cacheable_opts = [ + references: [ + type: {:or, [{:fun, 1}, {:fun, 2}, nil, :any]}, + type_doc: "`t:references/0`", + required: false, + default: nil, + doc: """ + Indicates the key given by the option `:key` references another key + provided by the option `:references`. In other words, when present, + this option tells the `cacheable` decorator to store the decorated + function's block result under the referenced key given by the option + `:references` and the referenced key under the key provided by the + option `:key`. + + See the ["Referenced keys"](#cacheable/3-referenced-keys) section below + for more information. + """ + ] + ] + + # cache_put options + cache_put_opts = [ + keys: [ + type: {:list, :any}, + required: false, + doc: """ + The list of keys the decorator will use to cache the decorated function's + result; each key holds a copy of the result. When present, it overrides + the `:key` option. + """ + ] + ] + + # cache_evict options + cache_evict_opts = [ + keys: [ + type: {:list, :any}, + required: false, + doc: """ + The list of keys the decorator will remove after or before the decorated + function's execution. When present, it overrides the `:key` option. + """ + ], + all_entries: [ + type: :boolean, + required: false, + default: false, + doc: """ + Defines whether or not the decorator must remove all the entries inside + the cache. + """ + ], + before_invocation: [ + type: :boolean, + required: false, + default: false, + doc: """ + Defines whether or not the decorator should run before invoking the + decorated function. + """ + ] + ] + + # caching options schema + @caching_opts_schema NimbleOptions.new!(caching_opts) + + # shared options schema + @shared_opts_schema NimbleOptions.new!(shared_opts) + + # cacheable options schema + @cacheable_opts_schema NimbleOptions.new!(cacheable_opts) + + # cache_put options schema + @cache_put_opts_schema NimbleOptions.new!(cache_put_opts) + + # cache_evict options schema + @cache_evict_opts_schema NimbleOptions.new!(cache_evict_opts) + + ## Docs API + + # coveralls-ignore-start + + @spec caching_options_docs() :: binary() + def caching_options_docs do + NimbleOptions.docs(@caching_opts_schema) + end + + @spec shared_options_docs() :: binary() + def shared_options_docs do + NimbleOptions.docs(@shared_opts_schema) + end + + @spec cacheable_options_docs() :: binary() + def cacheable_options_docs do + NimbleOptions.docs(@cacheable_opts_schema) + end + + @spec cache_put_options_docs() :: binary() + def cache_put_options_docs do + NimbleOptions.docs(@cache_put_opts_schema) + end + + @spec cache_evict_options_docs() :: binary() + def cache_evict_options_docs do + NimbleOptions.docs(@cache_evict_opts_schema) + end + + # coveralls-ignore-stop + + ## Validation API + + @spec validate_caching_opts!(keyword()) :: keyword() + def validate_caching_opts!(opts) do + NimbleOptions.validate!(opts, @caching_opts_schema) + end +end diff --git a/lib/nebulex/caching/simple_key_generator.ex b/lib/nebulex/caching/simple_key_generator.ex index 3d4396bd..8ac75292 100644 --- a/lib/nebulex/caching/simple_key_generator.ex +++ b/lib/nebulex/caching/simple_key_generator.ex @@ -4,40 +4,38 @@ defmodule Nebulex.Caching.SimpleKeyGenerator do It implementats a simple algorithm: - * If no params are given, return `0`. - * If only one param is given, return that param as key. - * If more than one param is given, return a key computed from the hashes - of all parameters (`:erlang.phash2(args)`). + * If no arguments are given, return `0`. + * If only one argument is given, return that param as key. + * If more than one argument is given, return a key computed + from the hash of all arguments (`:erlang.phash2(args)`). - > Based on the [default key generation in Spring Cache Abstraction](https://docs.spring.io/spring-framework/docs/3.2.x/spring-framework-reference/html/cache.html#cache-annotations-cacheable-default-key). - - This implementation aims to cover those simple/generic scenarios where the - key generated based on the arguments only, fulfill the needs. For example: + This approach works well for those cases where the decorated functions keep + the same arguments (same hash code). For example: defmodule MyApp.Users do - use Nebulex.Caching - - alias MayApp.Cache + use Nebulex.Caching, cache: MayApp.Cache - @decorate cacheable(cache: Cache) + @decorate cacheable() def get_user(id) do # logic for retrieving a user... end - @decorate cache_evict(cache: Cache) + @decorate cache_evict() def delete_user(id) do # logic for deleting a user... end end - The key generator will generate the same key for both, cacheable and - evict functions; since it is generated based on the arguments only. + The previous example works because the hash code of the arguments in both + decorated functions will be the same. """ @behaviour Nebulex.Caching.KeyGenerator @impl true - def generate(_mod, _fun, []), do: 0 - def generate(_mod, _fun, [arg]), do: arg - def generate(_mod, _fun, args), do: :erlang.phash2(args) + def generate(context) + + def generate(%{args: []}), do: 0 + def generate(%{args: [arg]}), do: arg + def generate(%{args: args}), do: :erlang.phash2(args) end diff --git a/lib/nebulex/entry.ex b/lib/nebulex/entry.ex deleted file mode 100644 index 8bc41b68..00000000 --- a/lib/nebulex/entry.ex +++ /dev/null @@ -1,111 +0,0 @@ -defmodule Nebulex.Entry do - @moduledoc """ - Defines a Cache Entry. - - This is the structure used by the caches for representing cache entries. - """ - - # Cache entry definition - defstruct key: nil, - value: nil, - touched: nil, - ttl: :infinity, - time_unit: :millisecond - - @typedoc """ - Defines a generic struct for a cache entry. - - The entry depends on the adapter completely, this struct/type aims to define - the common fields. - """ - @type t :: %__MODULE__{ - key: any, - value: any, - touched: integer, - ttl: timeout, - time_unit: System.time_unit() - } - - alias Nebulex.Time - - @doc """ - Encodes a cache entry. - - ## Example - - iex> "hello" - ...> |> Nebulex.Entry.encode() - ...> |> Nebulex.Entry.decode() - "hello" - - """ - @spec encode(term, [term]) :: binary - def encode(data, opts \\ []) do - data - |> :erlang.term_to_binary(opts) - |> Base.encode64() - end - - @doc """ - Decodes a previously encoded entry. - - ## Example - - iex> "hello" - ...> |> Nebulex.Entry.encode() - ...> |> Nebulex.Entry.decode() - "hello" - - """ - # sobelow_skip ["Misc.BinToTerm"] - @spec decode(binary, [term]) :: term - def decode(data, opts \\ []) when is_binary(data) do - data - |> Base.decode64!() - |> :erlang.binary_to_term(opts) - end - - @doc """ - Returns whether the given `entry` has expired or not. - - ## Example - - iex> Nebulex.Entry.expired?(%Nebulex.Entry{}) - false - - iex> Nebulex.Entry.expired?( - ...> %Nebulex.Entry{touched: Nebulex.Time.now() - 10, ttl: 1} - ...> ) - true - - """ - @spec expired?(t) :: boolean - def expired?(%__MODULE__{ttl: :infinity}), do: false - - def expired?(%__MODULE__{touched: touched, ttl: ttl, time_unit: unit}) do - Time.now(unit) - touched >= ttl - end - - @doc """ - Returns the remaining time-to-live. - - ## Example - - iex> Nebulex.Entry.ttl(%Nebulex.Entry{}) - :infinity - - iex> ttl = - ...> Nebulex.Entry.ttl( - ...> %Nebulex.Entry{touched: Nebulex.Time.now(), ttl: 100} - ...> ) - iex> ttl > 0 - true - - """ - @spec ttl(t) :: timeout - def ttl(%__MODULE__{ttl: :infinity}), do: :infinity - - def ttl(%__MODULE__{ttl: ttl, touched: touched, time_unit: unit}) do - ttl - (Time.now(unit) - touched) - end -end diff --git a/lib/nebulex/exceptions.ex b/lib/nebulex/exceptions.ex index 3e5f536e..d68f6093 100644 --- a/lib/nebulex/exceptions.ex +++ b/lib/nebulex/exceptions.ex @@ -1,116 +1,220 @@ -defmodule Nebulex.RegistryLookupError do +defmodule Nebulex.Error do @moduledoc """ - Raised at runtime when the cache was not started or it does not exist. + This exception represents command execution errors. For example, the cache + cannot perform a command because it has not started, it does not exist, or + the adapter failed to perform it for any reason. + + ## Exception fields + + See `t:t/0`. + + ## Error reasons + + The `:reason` field can assume the following values: + + * `:registry_lookup_error` - the cache cannot be retrieved from + the registry because it was not started or it does not exist. + + * `:timeout` - if there is a timeout when executing the cache command. + + * `:transaction_aborted` - if a transaction execution fails and aborts. + + * `t:Exception.t/0` - if the underlying adapter fails due to an exception. + + * `t:any/0` - the command fails with an adapter-specific error. + """ - @type t :: %__MODULE__{message: binary, name: atom} + @typedoc "Error reason type" + @type reason() :: atom() | {atom(), any()} | Exception.t() - defexception [:message, :name] + @typedoc """ + The type for this exception struct. - @doc false - def exception(opts) do - name = Keyword.fetch!(opts, :name) + This exception has the following public fields: - msg = - "could not lookup Nebulex cache #{inspect(name)} because it was " <> - "not started or it does not exist" + * `:reason` - the error reason. It can be one of the Nebulex-specific + reasons described in the ["Error reasons"](#module-error-reasons) + section in the module documentation. - %__MODULE__{message: msg, name: name} - end -end + * `:module` - a custom error formatter module. When it is present, it + invokes `module.format_error(reason, opts)` to format the error reason. + The argument `opts` is a keyword with the options (or metadata) given + to the exception. See `format_error/2` for more information. -defmodule Nebulex.KeyAlreadyExistsError do - @moduledoc """ - Raised at runtime when a key already exists in cache. """ + @type t() :: %__MODULE__{reason: reason(), module: module(), opts: keyword()} - @type t :: %__MODULE__{key: term, cache: atom} + # Exception struct + defexception reason: nil, module: __MODULE__, opts: [] - defexception [:key, :cache] + ## Callbacks - @doc false - def message(%{key: key, cache: cache}) do - "key #{inspect(key)} already exists in cache #{inspect(cache)}" + @impl true + def exception(opts) do + {reason, opts} = Keyword.pop!(opts, :reason) + {module, opts} = Keyword.pop(opts, :module, __MODULE__) + + %__MODULE__{reason: reason, module: module, opts: opts} end -end -defmodule Nebulex.QueryError do - @moduledoc """ - Raised at runtime when the query is invalid. + @impl true + def message(%__MODULE__{reason: reason, module: module, opts: opts}) do + module.format_error(reason, opts) + end + + ## Helpers + + @doc """ + A callback invoked when a custom formatter module is provided. + + ## Arguments + + * `reason` - the error reason. + * `opts` - a keyword with the options (or metadata) given to the exception. + + For example, if an adapter returns: + + wrap_error Nebulex.Error, + reason: :my_reason, + module: MyAdapter.Formatter, + foo: :bar + + the exception invokes: + + MyAdapter.Formatter.format_error(:my_reason, foo: :bar) + """ + @spec format_error(any(), keyword()) :: binary() + def format_error(reason, opts) - @type t :: %__MODULE__{message: binary} + def format_error(:registry_lookup_error, opts) do + cache = Keyword.get(opts, :cache) - defexception [:message] + "could not lookup Nebulex cache #{inspect(cache)} because it was " <> + "not started or it does not exist" + end - @doc false - def exception(opts) do - message = Keyword.fetch!(opts, :message) - query = Keyword.fetch!(opts, :query) + def format_error(:timeout, _opts) do + "command execution timed out" + end - message = """ - #{message} in query: + def format_error(:transaction_aborted, _opts) do + "transaction aborted" + end - #{inspect(query, pretty: true)} + def format_error(exception, _opts) when is_exception(exception) do """ + the following exception occurred when executing a command. + + #{Exception.format(:error, exception, []) |> String.replace("\n", "\n ")} - %__MODULE__{message: message} + """ + end + + def format_error(reason, _opts) do + "command failed with reason: #{inspect(reason)}" end end -defmodule Nebulex.RPCMultiCallError do +defmodule Nebulex.KeyError do @moduledoc """ - Raised at runtime when a RPC multi_call error occurs. + Raised at runtime when a key does not exist in the cache. + + This exception denotes the cache executed a command, but there was an issue + with the requested key; for example, it doesn't exist. + + ## Exception fields + + See `t:t/0`. + + ## Error reasons + + The `:reason` field can assume a few Nebulex-specific values: + + * `:not_found` - the key doesn't exist in the cache. + + * `:expired` - The key doesn't exist in the cache because it is expired. + """ - @type t :: %__MODULE__{message: binary} + @typedoc """ + The type for this exception struct. - defexception [:message] + This exception has the following public fields: - @doc false - def exception(opts) do - action = Keyword.fetch!(opts, :action) - errors = Keyword.fetch!(opts, :errors) - responses = Keyword.fetch!(opts, :responses) + * `:key` - the requested key. - message = """ - RPC error while executing action #{inspect(action)} + * `:reason` - the error reason. The two possible reasons are `:not_found` + or `:expired`. Defaults to `:not_found`. - Successful responses: + """ + @type t() :: %__MODULE__{key: any(), reason: atom()} - #{inspect(responses, pretty: true)} + # Exception struct + defexception key: nil, reason: :not_found - Remote errors: + ## Callbacks - #{inspect(errors, pretty: true)} - """ + @impl true + def exception(opts) do + key = Keyword.fetch!(opts, :key) + reason = Keyword.get(opts, :reason, :not_found) + + %__MODULE__{key: key, reason: reason} + end - %__MODULE__{message: message} + @impl true + def message(%__MODULE__{key: key, reason: reason}) do + format_reason(reason, key) + end + + ## Helpers + + defp format_reason(:not_found, key) do + "key #{inspect(key)} not found" + end + + defp format_reason(:expired, key) do + "key #{inspect(key)} has expired" end end -defmodule Nebulex.RPCError do +defmodule Nebulex.QueryError do @moduledoc """ - Raised at runtime when a RPC error occurs. + Raised at runtime when the query is invalid. """ - @type t :: %__MODULE__{reason: atom, node: node} + @typedoc """ + The type for this exception struct. - defexception [:reason, :node] + This exception has the following public fields: - @doc false - def message(%__MODULE__{reason: reason, node: node}) do - format_reason(reason, node) - end + * `:message` - the error message. - # :erpc.call/5 doesn't format error messages. - defp format_reason({:erpc, _} = reason, node) do - """ - The RPC operation failed on node #{inspect(node)} with reason: + * `:query` - the query value. - #{inspect(reason)} + """ + @type t() :: %__MODULE__{message: binary(), query: any()} - See :erpc.call/5 for more information about the error reasons. - """ + # Exception struct + defexception query: nil, message: nil + + ## Callbacks + + @impl true + def exception(opts) do + query = Keyword.fetch!(opts, :query) + + message = + Keyword.get_lazy(opts, :message, fn -> + """ + invalid query: + + #{inspect(query)} + """ + end) + + %__MODULE__{query: query, message: message} end end diff --git a/lib/nebulex/helpers.ex b/lib/nebulex/helpers.ex deleted file mode 100644 index 4e4b51c5..00000000 --- a/lib/nebulex/helpers.ex +++ /dev/null @@ -1,60 +0,0 @@ -defmodule Nebulex.Helpers do - # Module for general purpose helpers. - @moduledoc false - - ## API - - @spec get_option(Keyword.t(), atom, binary, (any -> boolean), term) :: term - def get_option(opts, key, expected, valid?, default \\ nil) - when is_list(opts) and is_atom(key) do - value = Keyword.get(opts, key, default) - - if valid?.(value) do - value - else - raise ArgumentError, "expected #{key}: to be #{expected}, got: #{inspect(value)}" - end - end - - @spec get_boolean_option(Keyword.t(), atom, boolean) :: term - def get_boolean_option(opts, key, default \\ false) - when is_list(opts) and is_atom(key) and is_boolean(default) do - value = Keyword.get(opts, key, default) - - if is_boolean(value) do - value - else - raise ArgumentError, "expected #{key}: to be boolean, got: #{inspect(value)}" - end - end - - @spec assert_behaviour(module, module, binary) :: module - def assert_behaviour(module, behaviour, msg \\ "module") do - if behaviour in module_behaviours(module, msg) do - module - else - raise ArgumentError, - "expected #{inspect(module)} to implement the behaviour #{inspect(behaviour)}" - end - end - - @spec module_behaviours(module, binary) :: [module] - def module_behaviours(module, msg) do - if Code.ensure_compiled(module) != {:module, module} do - raise ArgumentError, - "#{msg} #{inspect(module)} was not compiled, " <> - "ensure it is correct and it is included as a project dependency" - end - - for {:behaviour, behaviours} <- module.__info__(:attributes), - behaviour <- behaviours, - do: behaviour - end - - @spec normalize_module_name([atom | binary | number]) :: module - def normalize_module_name(list) when is_list(list) do - list - |> Enum.map(&Macro.camelize("#{&1}")) - |> Module.concat() - end -end diff --git a/lib/nebulex/hook.ex b/lib/nebulex/hook.ex deleted file mode 100644 index 4601102b..00000000 --- a/lib/nebulex/hook.ex +++ /dev/null @@ -1,269 +0,0 @@ -if Code.ensure_loaded?(Decorator.Define) do - defmodule Nebulex.Hook do - @moduledoc """ - Pre/Post Hooks - - Since `v2.0.0`, pre/post hooks are not supported and/or handled by `Nebulex` - itself. Hooks feature is not a common use-case and also it is something that - can be be easily implemented on top of the Cache at the application level. - - Nevertheless, to keep backward compatibility somehow, `Nebulex` provides the - next decorators for implementing pre/post hooks very easily. - - ## `before` decorator - - The `before` decorator is declared for performing a hook action or callback - before the annotated function is executed. - - @decorate before(fn %Nebulex.Hook{} = hook -> inspect(hook) end) - def some_fun(var) do - # logic ... - end - - ## `after_return` decorator - - The `after_return` decorator is declared for performing a hook action or - callback after the annotated function is executed and its return is passed - through the `return:` attribute. - - @decorate after_return(&inspect(&1.return)) - def some_fun(var) do - # logic ... - end - - ## `around` decorator - - The final kind of hook is `around` decorator. The `around` decorator runs - "around" the annotated function execution. It has the opportunity to do - work both **before** and **after** the function executes. This means the - given hook function is invoked twice, before and after the code-block is - evaluated. - - @decorate around(&inspect(&1.step)) - def some_fun(var) do - # logic ... - end - - ## Putting all together - - Suppose we want to track all cache calls (before and after they are called) - by logging them (including the execution time). In this case, we need to - provide a pre/post hook to log these calls. - - First of all, we have to create a module implementing the hook function: - - defmodule MyApp.Tracker do - use GenServer - - alias Nebulex.Hook - - require Logger - - @actions [:get, :put] - - ## API - - def start_link(opts \\\\ []) do - GenServer.start_link(__MODULE__, opts, name: __MODULE__) - end - - def track(%Hook{step: :before, name: name}) when name in @actions do - System.system_time(:microsecond) - end - - def track(%Hook{step: :after_return, name: name} = event) when name in @actions do - GenServer.cast(__MODULE__, {:track, event}) - end - - def track(hook), do: hook - - ## GenServer Callbacks - - @impl true - def init(_opts) do - {:ok, %{}} - end - - @impl true - def handle_cast({:track, %Hook{acc: start} = hook}, state) do - diff = System.system_time(:microsecond) - start - Logger.info("#=> #\{hook.module}.#\{hook.name}/#\{hook.arity}, Duration: #\{diff}") - {:noreply, state} - end - end - - And then, in the Cache: - - defmodule MyApp.Cache do - use Nebulex.Hook - @decorate_all around(&MyApp.Tracker.track/1) - - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local - end - - Try it out: - - iex> MyApp.Cache.put 1, 1 - 10:19:47.736 [info] Elixir.MyApp.Cache.put/3, Duration: 27 - iex> MyApp.Cache.get 1 - 10:20:14.941 [info] Elixir.MyApp.Cache.get/2, Duration: 11 - - """ - - use Decorator.Define, before: 1, after_return: 1, around: 1 - - @enforce_keys [:step, :module, :name, :arity] - defstruct [:step, :module, :name, :arity, :return, :acc] - - @type t :: %__MODULE__{ - step: :before | :after_return, - module: Nebulex.Cache.t(), - name: atom, - arity: non_neg_integer, - return: term, - acc: term - } - - @type hook_fun :: (t -> term) - - alias Nebulex.Hook - - @doc """ - Before decorator. - - Intercepts any call to the annotated function and calls the given `fun` - before the logic is executed. - - ## Example - - defmodule MyApp.Example do - use Nebulex.Hook - - @decorate before(&inspect(&1)) - def some_fun(var) do - # logic ... - end - end - - """ - @spec before(hook_fun, term, map) :: term - def before(fun, block, context) do - with_hook([:before], fun, block, context) - end - - @doc """ - After-return decorator. - - Intercepts any call to the annotated function and calls the given `fun` - after the logic is executed, and the returned result is passed through - the `return:` attribute. - - ## Example - - defmodule MyApp.Example do - use Nebulex.Hook - - @decorate after_return(&inspect(&1)) - def some_fun(var) do - # logic ... - end - end - - """ - @spec after_return(hook_fun, term, map) :: term - def after_return(fun, block, context) do - with_hook([:after_return], fun, block, context) - end - - @doc """ - Around decorator. - - Intercepts any call to the annotated function and calls the given `fun` - before and after the logic is executed. The result of the first call to - the hook function is passed through the `acc:` attribute, so it can be - used in the next call (after return). Finally, as the `after_return` - decorator, the returned code-block evaluation is passed through the - `return:` attribute. - - ## Example - - defmodule MyApp.Profiling do - alias Nebulex.Hook - - def prof(%Hook{step: :before}) do - System.system_time(:microsecond) - end - - def prof(%Hook{step: :after_return, acc: start} = hook) do - :telemetry.execute( - [:my_app, :profiling], - %{duration: System.system_time(:microsecond) - start}, - %{module: hook.module, name: hook.name} - ) - end - end - - defmodule MyApp.Example do - use Nebulex.Hook - - @decorate around(&MyApp.Profiling.prof/1) - def some_fun(var) do - # logic ... - end - end - - """ - @spec around(hook_fun, term, map) :: term - def around(fun, block, context) do - with_hook([:before, :after_return], fun, block, context) - end - - defp with_hook(hooks, fun, block, context) do - quote do - hooks = unquote(hooks) - fun = unquote(fun) - - hook = %Nebulex.Hook{ - step: :before, - module: unquote(context.module), - name: unquote(context.name), - arity: unquote(context.arity) - } - - # eval before - acc = - if :before in hooks do - Hook.eval_hook(:before, fun, hook) - end - - # eval code-block - return = unquote(block) - - # eval after_return - if :after_return in hooks do - Hook.eval_hook( - :after_return, - fun, - %{hook | step: :after_return, return: return, acc: acc} - ) - end - - return - end - end - - @doc """ - This function is for internal purposes. - """ - @spec eval_hook(:before | :after_return, hook_fun, t) :: term - def eval_hook(step, fun, hook) do - fun.(hook) - rescue - e -> - msg = "hook execution failed on step #{inspect(step)} with error #{inspect(e)}" - reraise RuntimeError, msg, __STACKTRACE__ - end - end -end diff --git a/lib/nebulex/rpc.ex b/lib/nebulex/rpc.ex deleted file mode 100644 index a6b5ac88..00000000 --- a/lib/nebulex/rpc.ex +++ /dev/null @@ -1,254 +0,0 @@ -defmodule Nebulex.RPC do - @moduledoc """ - RPC utilities for distributed task execution. - - This module uses supervised tasks underneath `Task.Supervisor`. - - > **NOTE:** The approach by using distributed tasks will be deprecated - in the future in favor of `:erpc`. - """ - - @typedoc "Task supervisor" - @type task_sup :: Supervisor.supervisor() - - @typedoc "Task callback" - @type callback :: {module, atom, [term]} - - @typedoc "Group entry: node -> callback" - @type node_callback :: {node, callback} - - @typedoc "Node group" - @type node_group :: %{optional(node) => callback} | [node_callback] - - @typedoc "Reducer function spec" - @type reducer_fun :: ({:ok, term} | {:error, term}, node_callback | node, term -> term) - - @typedoc "Reducer spec" - @type reducer :: {acc :: term, reducer_fun} - - ## API - - @doc """ - Evaluates `apply(mod, fun, args)` on node `node` and returns the corresponding - evaluation result, or `{:badrpc, reason}` if the call fails. - - A timeout, in milliseconds or `:infinity`, can be given with a default value - of `5000`. It uses `Task.await/2` internally. - - ## Example - - iex> Nebulex.RPC.call(:my_task_sup, :node1, Kernel, :to_string, [1]) - "1" - - """ - @spec call(task_sup, node, module, atom, [term], timeout) :: term | {:badrpc, term} - def call(supervisor, node, mod, fun, args, timeout \\ 5000) do - rpc_call(supervisor, node, mod, fun, args, timeout) - end - - @doc """ - In contrast to a regular single-node RPC, a multicall is an RPC that is sent - concurrently from one client to multiple servers. The function evaluates - `apply(mod, fun, args)` on each `node_group` entry and collects the answers. - Then, evaluates the `reducer` function (set in the `opts`) on each answer. - - This function is similar to `:rpc.multicall/5`. - - ## Options - - * `:timeout` - A timeout, in milliseconds or `:infinity`, can be given with - a default value of `5000`. It uses `Task.yield_many/2` internally. - - * `:reducer` - Reducer function to be executed on each collected result. - (check out `reducer` type). - - ## Example - - iex> Nebulex.RPC.multi_call( - ...> :my_task_sup, - ...> %{ - ...> node1: {Kernel, :to_string, [1]}, - ...> node2: {Kernel, :to_string, [2]} - ...> }, - ...> timeout: 10_000, - ...> reducer: { - ...> [], - ...> fn - ...> {:ok, res}, _node_callback, acc -> - ...> [res | acc] - ...> - ...> {:error, _}, _node_callback, acc -> - ...> acc - ...> end - ...> } - ...> ) - ["1", "2"] - - """ - @spec multi_call(task_sup, node_group, Keyword.t()) :: term - def multi_call(supervisor, node_group, opts \\ []) do - rpc_multi_call(supervisor, node_group, opts) - end - - @doc """ - Similar to `multi_call/3` but the same `node_callback` (given by `module`, - `fun`, `args`) is executed on all `nodes`; Internally it creates a - `node_group` with the same `node_callback` for each node. - - ## Options - - Same options as `multi_call/3`. - - ## Example - - iex> Nebulex.RPC.multi_call( - ...> :my_task_sup, - ...> [:node1, :node2], - ...> Kernel, - ...> :to_string, - ...> [1], - ...> timeout: 5000, - ...> reducer: { - ...> [], - ...> fn - ...> {:ok, res}, _node_callback, acc -> - ...> [res | acc] - ...> - ...> {:error, _}, _node_callback, acc -> - ...> acc - ...> end - ...> } - ...> ) - ["1", "1"] - - """ - @spec multi_call(task_sup, [node], module, atom, [term], Keyword.t()) :: term - def multi_call(supervisor, nodes, mod, fun, args, opts \\ []) do - rpc_multi_call(supervisor, nodes, mod, fun, args, opts) - end - - ## Helpers - - if Code.ensure_loaded?(:erpc) do - defp rpc_call(_supervisor, node, mod, fun, args, _timeout) when node == node() do - apply(mod, fun, args) - end - - defp rpc_call(_supervisor, node, mod, fun, args, timeout) do - :erpc.call(node, mod, fun, args, timeout) - rescue - e in ErlangError -> - case e.original do - {:exception, original, _} when is_struct(original) -> - reraise original, __STACKTRACE__ - - {:exception, original, _} -> - :erlang.raise(:error, original, __STACKTRACE__) - - other -> - reraise %Nebulex.RPCError{reason: other, node: node}, __STACKTRACE__ - end - end - - def rpc_multi_call(_supervisor, node_group, opts) do - {reducer_acc, reducer_fun} = opts[:reducer] || default_reducer() - timeout = opts[:timeout] || 5000 - - node_group - |> Enum.map(fn {node, {mod, fun, args}} = group -> - {:erpc.send_request(node, mod, fun, args), group} - end) - |> Enum.reduce(reducer_acc, fn {req_id, group}, acc -> - try do - res = :erpc.receive_response(req_id, timeout) - reducer_fun.({:ok, res}, group, acc) - rescue - exception -> - reducer_fun.({:error, exception}, group, acc) - catch - :exit, reason -> - reducer_fun.({:error, {:exit, reason}}, group, acc) - end - end) - end - - def rpc_multi_call(_supervisor, nodes, mod, fun, args, opts) do - {reducer_acc, reducer_fun} = opts[:reducer] || default_reducer() - - nodes - |> :erpc.multicall(mod, fun, args, opts[:timeout] || 5000) - |> :lists.zip(nodes) - |> Enum.reduce(reducer_acc, fn {res, node}, acc -> - reducer_fun.(res, node, acc) - end) - end - else - # TODO: This approach by using distributed tasks will be deprecated in the - # future in favor of `:erpc` which is proven to improve performance - # almost by 3x. - - defp rpc_call(_supervisor, node, mod, fun, args, _timeout) when node == node() do - apply(mod, fun, args) - rescue - # FIXME: this is because coveralls does not check this as covered - # coveralls-ignore-start - exception -> - {:badrpc, exception} - # coveralls-ignore-stop - end - - defp rpc_call(supervisor, node, mod, fun, args, timeout) do - {supervisor, node} - |> Task.Supervisor.async_nolink( - __MODULE__, - :call, - [supervisor, node, mod, fun, args, timeout] - ) - |> Task.await(timeout) - end - - defp rpc_multi_call(supervisor, node_group, opts) do - node_group - |> Enum.map(fn {node, {mod, fun, args}} -> - Task.Supervisor.async_nolink({supervisor, node}, mod, fun, args) - end) - |> handle_multi_call(node_group, opts) - end - - defp rpc_multi_call(supervisor, nodes, mod, fun, args, opts) do - rpc_multi_call(supervisor, Enum.map(nodes, &{&1, {mod, fun, args}}), opts) - end - - defp handle_multi_call(tasks, node_group, opts) do - {reducer_acc, reducer_fun} = Keyword.get(opts, :reducer, default_reducer()) - - tasks - |> Task.yield_many(opts[:timeout] || 5000) - |> :lists.zip(node_group) - |> Enum.reduce(reducer_acc, fn - {{_task, {:ok, res}}, group}, acc -> - reducer_fun.({:ok, res}, group, acc) - - {{_task, {:exit, reason}}, group}, acc -> - reducer_fun.({:error, {:exit, reason}}, group, acc) - - {{task, nil}, group}, acc -> - _ = Task.shutdown(task, :brutal_kill) - reducer_fun.({:error, :timeout}, group, acc) - end) - end - end - - defp default_reducer do - { - {[], []}, - fn - {:ok, res}, _node_callback, {ok, err} -> - {[res | ok], err} - - {kind, _} = error, node_callback, {ok, err} when kind in [:error, :exit, :throw] -> - {ok, [{error, node_callback} | err]} - end - } - end -end diff --git a/lib/nebulex/stats.ex b/lib/nebulex/stats.ex deleted file mode 100644 index 957af598..00000000 --- a/lib/nebulex/stats.ex +++ /dev/null @@ -1,46 +0,0 @@ -defmodule Nebulex.Stats do - @moduledoc """ - Stats data type. - - Stats struct defines two main keys: - - * `:measurements` - A map with the measurements provided by the underlying - adapter. - * `:metadata` - A map for including additional information; also provided - by the underlying adapter. - - ## Measurements - - The following measurements are expected to be present and fed by the - underlying adapter: - - * `:evictions` - When a cache entry is removed. - * `:expirations` - When a cache entry is expired. - * `:hits` - When a key is looked up in cache and found. - * `:misses` - When a key is looked up in cache but not found. - * `:updates` - When an existing cache entry is or updated. - * `:writes` - When a cache entry is inserted or overwritten. - - ## Metadata - - Despite the adapters can include any additional or custom metadata, It is - recommended they include the following keys: - - * `:cache` - The cache module, or the name (if an explicit name has been - given to the cache). - - **IMPORTANT:** Since the adapter may include any additional or custom - measurements, as well as metadata, it is recommended to check out the - adapter's documentation. - """ - - # Stats data type - defstruct measurements: %{}, - metadata: %{} - - @typedoc "Nebulex.Stats data type" - @type t :: %__MODULE__{ - measurements: %{optional(atom) => term}, - metadata: %{optional(atom) => term} - } -end diff --git a/lib/nebulex/telemetry.ex b/lib/nebulex/telemetry.ex index 3a48fe8f..2e93cce3 100644 --- a/lib/nebulex/telemetry.ex +++ b/lib/nebulex/telemetry.ex @@ -1,18 +1,21 @@ defmodule Nebulex.Telemetry do - @moduledoc """ - Telemetry wrapper. - """ + # Telemetry wrapper + @moduledoc false # Inline common instructions - @compile {:inline, execute: 3, span: 3, attach_many: 4, detach: 1} + @compile {:inline, execute: 3, span: 3, attach_many: 4, detach: 1, default_event_prefix: 0} if Code.ensure_loaded?(:telemetry) do + @doc false defdelegate execute(event, measurements, metadata), to: :telemetry + @doc false defdelegate span(event_prefix, start_meta, span_fn), to: :telemetry + @doc false defdelegate attach_many(handler_id, events, fun, config), to: :telemetry + @doc false defdelegate detach(handler_id), to: :telemetry else @doc false @@ -27,4 +30,7 @@ defmodule Nebulex.Telemetry do @doc false def detach(_handler_id), do: :ok end + + @doc false + def default_event_prefix, do: [:nebulex, :cache] end diff --git a/lib/nebulex/telemetry/stats_handler.ex b/lib/nebulex/telemetry/stats_handler.ex deleted file mode 100644 index 141c560c..00000000 --- a/lib/nebulex/telemetry/stats_handler.ex +++ /dev/null @@ -1,109 +0,0 @@ -defmodule Nebulex.Telemetry.StatsHandler do - @moduledoc """ - Telemetry handler for aggregating cache stats; it relies on the default stats - implementation based on Erlang counters. See `Nebulex.Adapter.Stats`. - - This handler is used by the built-in local adapter when the option `:stats` - is set to `true`. - """ - - alias Nebulex.Adapter.Stats - - ## Handler - - @doc false - def handle_event(_event, _measurements, %{adapter_meta: %{stats_counter: ref}} = metadata, ref) do - update_stats(metadata) - end - - # coveralls-ignore-start - - def handle_event(_event, _measurements, _metadata, _ref) do - :ok - end - - # coveralls-ignore-stop - - defp update_stats(%{ - function_name: action, - result: :"$expired", - adapter_meta: %{stats_counter: ref} - }) - when action in [:get, :take, :ttl] do - :ok = Stats.incr(ref, :misses) - :ok = Stats.incr(ref, :evictions) - :ok = Stats.incr(ref, :expirations) - end - - defp update_stats(%{function_name: action, result: nil, adapter_meta: %{stats_counter: ref}}) - when action in [:get, :take, :ttl] do - :ok = Stats.incr(ref, :misses) - end - - defp update_stats(%{function_name: action, result: _, adapter_meta: %{stats_counter: ref}}) - when action in [:get, :ttl] do - :ok = Stats.incr(ref, :hits) - end - - defp update_stats(%{function_name: :take, result: _, adapter_meta: %{stats_counter: ref}}) do - :ok = Stats.incr(ref, :hits) - :ok = Stats.incr(ref, :evictions) - end - - defp update_stats(%{ - function_name: :put, - args: [_, _, _, :replace, _], - result: true, - adapter_meta: %{stats_counter: ref} - }) do - :ok = Stats.incr(ref, :updates) - end - - defp update_stats(%{function_name: :put, result: true, adapter_meta: %{stats_counter: ref}}) do - :ok = Stats.incr(ref, :writes) - end - - defp update_stats(%{ - function_name: :put_all, - result: true, - args: [entries | _], - adapter_meta: %{stats_counter: ref} - }) do - :ok = Stats.incr(ref, :writes, Enum.count(entries)) - end - - defp update_stats(%{function_name: :delete, result: _, adapter_meta: %{stats_counter: ref}}) do - :ok = Stats.incr(ref, :evictions) - end - - defp update_stats(%{ - function_name: :execute, - args: [:delete_all | _], - result: result, - adapter_meta: %{stats_counter: ref} - }) do - :ok = Stats.incr(ref, :evictions, result) - end - - defp update_stats(%{function_name: action, result: true, adapter_meta: %{stats_counter: ref}}) - when action in [:expire, :touch] do - :ok = Stats.incr(ref, :updates) - end - - defp update_stats(%{ - function_name: :update_counter, - args: [_, amount, _, default, _], - result: result, - adapter_meta: %{stats_counter: ref} - }) do - offset = if amount >= 0, do: -1, else: 1 - - if result + amount * offset === default do - :ok = Stats.incr(ref, :writes) - else - :ok = Stats.incr(ref, :updates) - end - end - - defp update_stats(_), do: :ok -end diff --git a/lib/nebulex/time.ex b/lib/nebulex/time.ex index c1191992..e0e960e7 100644 --- a/lib/nebulex/time.ex +++ b/lib/nebulex/time.ex @@ -44,7 +44,7 @@ defmodule Nebulex.Time do false """ - @spec timeout?(term) :: boolean + @spec timeout?(any()) :: boolean() def timeout?(timeout) do (is_integer(timeout) and timeout >= 0) or timeout == :infinity end diff --git a/lib/nebulex/utils.ex b/lib/nebulex/utils.ex new file mode 100644 index 00000000..c3e3e85c --- /dev/null +++ b/lib/nebulex/utils.ex @@ -0,0 +1,212 @@ +defmodule Nebulex.Utils do + @moduledoc """ + General purpose utilities. + """ + + # Nebulex exceptions + @nbx_exception [ + Nebulex.Error, + Nebulex.KeyError, + Nebulex.QueryError + ] + + ## Guards + + @doc """ + Convenience guard to determine whether the given argument `e` is a Nebulex + exception or not. + + ## Example + + iex> import Nebulex.Utils + iex> is_nebulex_exception(%Nebulex.Error{reason: :error}) + true + iex> is_nebulex_exception(%{}) + false + + """ + defguard is_nebulex_exception(e) + when is_exception(e) and :erlang.map_get(:__struct__, e) in @nbx_exception + + ## Macros + + @doc """ + Convenience macro for unwrapping a function call result and deciding whether + to raise an exception or return the unwrapped value. + + ## Example + + iex> import Nebulex.Utils + iex> unwrap_or_raise {:ok, "ok"} + "ok" + iex> unwrap_or_raise {:error, %Nebulex.Error{reason: :error}} + ** (Nebulex.Error) command failed with reason: :error + iex> unwrap_or_raise :other + :other + + """ + defmacro unwrap_or_raise(call) do + quote do + unquote(call) + |> unquote(__MODULE__).do_unwrap_or_raise() + end + end + + @doc """ + Convenience macro for wrapping the given `call` result into a tuple in the + shape of `{:ok, result}`. + + ## Example + + iex> import Nebulex.Utils + iex> wrap_ok "hello" + {:ok, "hello"} + + """ + defmacro wrap_ok(call) do + quote do + {:ok, unquote(call)} + end + end + + @doc """ + Convenience macro for wrapping the given `exception` into a tuple in the + shape of `{:error, exception}`. + + ## Example + + iex> import Nebulex.Utils + iex> wrap_error Nebulex.Error, reason: :error + {:error, %Nebulex.Error{reason: :error}} + + """ + defmacro wrap_error(exception, opts) do + quote do + {:error, unquote(exception).exception(unquote(opts))} + end + end + + ## Utility functions + + @doc """ + Helper function for unwrapping a function result and deciding whether + to raise an exception or return the unwrapped value. + + ## Examples + + iex> Nebulex.Utils.do_unwrap_or_raise({:ok, "ok"}) + "ok" + + iex> Nebulex.Utils.do_unwrap_or_raise( + ...> {:error, %Nebulex.Error{reason: :error}} + ...> ) + ** (Nebulex.Error) command failed with reason: :error + + iex> Nebulex.Utils.do_unwrap_or_raise({:error, :error}) + ** (Nebulex.Error) command failed with reason: :error + + iex> Nebulex.Utils.do_unwrap_or_raise(:other) + :other + + """ + def do_unwrap_or_raise(result) + + def do_unwrap_or_raise({:ok, value}) do + value + end + + def do_unwrap_or_raise({:error, reason}) when is_nebulex_exception(reason) do + raise reason + end + + def do_unwrap_or_raise({:error, reason}) do + raise Nebulex.Error, reason: reason + end + + def do_unwrap_or_raise(other) do + other + end + + @doc """ + A wrapper for `Keyword.get/3` but validates the returned value invoking + the function `valid?`. + + Raises an `ArgumentError` in case the validation fails. + + ## Examples + + iex> Nebulex.Utils.get_option( + ...> [keys: [1, 2, 3]], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + [1, 2, 3] + + iex> Nebulex.Utils.get_option( + ...> [], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + nil + + iex> Nebulex.Utils.get_option( + ...> [keys: 123], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + ** (ArgumentError) expected keys: to be a list with at least one element, got: 123 + + """ + @spec get_option(keyword(), atom(), binary(), (any() -> boolean()), any()) :: any() + def get_option(opts, key, expected, valid?, default \\ nil) + when is_list(opts) and is_atom(key) do + case Keyword.fetch(opts, key) do + {:ok, value} -> + if valid?.(value) do + value + else + raise ArgumentError, "expected #{key}: to be #{expected}, got: #{inspect(value)}" + end + + :error -> + default + end + end + + @doc """ + Returns the implemented behaviours for the given `module`. + """ + @spec module_behaviours(module()) :: [module()] + def module_behaviours(module) do + for {:behaviour, behaviours} <- module.__info__(:attributes), behaviour <- behaviours do + behaviour + end + end + + @doc """ + Concatenates a list of "camelized" aliases and returns a new alias. + + It handles binaries, atoms, and numbers. + + ## Examples + + iex> Nebulex.Utils.camelize_and_concat([Foo, :bar]) + Foo.Bar + + iex> Nebulex.Utils.camelize_and_concat([Foo, "bar"]) + Foo.Bar + + iex> Nebulex.Utils.camelize_and_concat([Foo, "Bar", 1]) + :"Elixir.Foo.Bar.1" + + """ + @spec camelize_and_concat([atom() | binary() | number()]) :: atom() + def camelize_and_concat(list) when is_list(list) do + list + |> Enum.map(&Macro.camelize("#{&1}")) + |> Module.concat() + end +end diff --git a/mix.exs b/mix.exs index 83e2f4d8..b2059da4 100644 --- a/mix.exs +++ b/mix.exs @@ -2,13 +2,13 @@ defmodule Nebulex.MixProject do use Mix.Project @source_url "https://github.com/cabol/nebulex" - @version "2.6.1" + @version "3.0.0-dev" def project do [ app: :nebulex, version: @version, - elixir: "~> 1.12", + elixir: "~> 1.11", elixirc_paths: elixirc_paths(Mix.env()), aliases: aliases(), deps: deps(), @@ -36,7 +36,7 @@ defmodule Nebulex.MixProject do ] end - defp elixirc_paths(:test), do: ["lib", "test/support", "test/dialyzer"] + defp elixirc_paths(:test), do: ["lib", "test/dialyzer"] defp elixirc_paths(_), do: ["lib"] def application do @@ -48,18 +48,20 @@ defmodule Nebulex.MixProject do defp deps do [ - {:shards, "~> 1.1", optional: true}, + # Required + {:nimble_options, "~> 0.5 or ~> 1.0"}, + + # Optional {:decorator, "~> 1.4", optional: true}, {:telemetry, "~> 0.4 or ~> 1.0", optional: true}, # Test & Code Analysis - {:ex2ms, "~> 1.6", only: :test}, - {:mock, "~> 0.3", only: :test}, {:excoveralls, "~> 0.18", only: :test}, {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false}, {:sobelow, "~> 0.13", only: [:dev, :test], runtime: false}, {:stream_data, "~> 0.6", only: [:dev, :test]}, + {:mimic, "~> 1.7", only: :test}, {:doctor, "~> 0.21", only: [:dev, :test]}, # Benchmark Test @@ -78,7 +80,7 @@ defmodule Nebulex.MixProject do "format --check-formatted", "credo --strict", "coveralls.html", - "sobelow --exit --skip", + "sobelow --skip --exit Low", "dialyzer --format short", "doctor" ] @@ -105,17 +107,65 @@ defmodule Nebulex.MixProject do source_url: @source_url, extras: [ "guides/getting-started.md", + "guides/migrating-to-v3.md", "guides/cache-usage-patterns.md", "guides/telemetry.md", - "guides/migrating-to-v2.md", - "guides/creating-new-adapter.md" + "guides/creating-new-adapter.md", + "guides/cache-info.md" + ], + groups_for_functions: [ + # Caching decorators + group_for_function("Decorator API"), + group_for_function("Decorator Helpers"), + group_for_function("Internal API"), + # Cache API + group_for_function("User callbacks"), + group_for_function("Runtime API"), + group_for_function("KV API"), + group_for_function("Query API"), + group_for_function("Persistence API"), + group_for_function("Transaction API"), + group_for_function("Info API") + ], + groups_for_modules: [ + # Nebulex, + # Nebulex.Cache, + + "Caching decorators": [ + Nebulex.Caching, + Nebulex.Caching.Decorators, + Nebulex.Caching.Decorators.Context, + Nebulex.Caching.KeyGenerator, + Nebulex.Caching.SimpleKeyGenerator + ], + "Adapter specification": [ + Nebulex.Adapter, + Nebulex.Adapter.KV, + Nebulex.Adapter.Queryable, + Nebulex.Adapter.Info, + Nebulex.Adapter.Transaction, + Nebulex.Adapter.Persistence + ], + "Built-in adapters": [ + Nebulex.Adapters.Nil + ], + "Built-in info implementation": [ + Nebulex.Adapters.Common.Info, + Nebulex.Adapters.Common.Info.Stats + ], + Utilities: [ + Nebulex.Time, + Nebulex.Utils + ] ] ] end + defp group_for_function(group), do: {String.to_atom(group), &(&1[:group] == group)} + defp dialyzer do [ - plt_add_apps: [:shards, :mix, :telemetry], + plt_add_apps: [:mix, :telemetry, :ex_unit], plt_file: {:no_warn, "priv/plts/" <> plt_file_name()}, flags: [ :unmatched_returns, diff --git a/mix.lock b/mix.lock index 0d6cce33..e2dac025 100644 --- a/mix.lock +++ b/mix.lock @@ -3,7 +3,7 @@ "benchee_html": {:hex, :benchee_html, "1.0.1", "1e247c0886c3fdb0d3f4b184b653a8d6fb96e4ad0d0389267fe4f36968772e24", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:benchee_json, "~> 1.0", [hex: :benchee_json, repo: "hexpm", optional: false]}], "hexpm", "b00a181af7152431901e08f3fc9f7197ed43ff50421a8347b0c80bf45d5b3fef"}, "benchee_json": {:hex, :benchee_json, "1.0.0", "cc661f4454d5995c08fe10dd1f2f72f229c8f0fb1c96f6b327a8c8fc96a91fe5", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "da05d813f9123505f870344d68fb7c86a4f0f9074df7d7b7e2bb011a63ec231c"}, "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, - "credo": {:hex, :credo, "1.7.3", "05bb11eaf2f2b8db370ecaa6a6bda2ec49b2acd5e0418bc106b73b07128c0436", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "35ea675a094c934c22fb1dca3696f3c31f2728ae6ef5a53b5d648c11180a4535"}, + "credo": {:hex, :credo, "1.7.4", "68ca5cf89071511c12fd9919eb84e388d231121988f6932756596195ccf7fd35", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "9cf776d062c78bbe0f0de1ecaee183f18f2c3ec591326107989b054b7dddefc2"}, "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, "decorator": {:hex, :decorator, "1.4.0", "a57ac32c823ea7e4e67f5af56412d12b33274661bb7640ec7fc882f8d23ac419", [:mix], [], "hexpm", "0a07cedd9083da875c7418dea95b78361197cf2bf3211d743f6f7ce39656597f"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, @@ -11,18 +11,16 @@ "doctor": {:hex, :doctor, "0.21.0", "20ef89355c67778e206225fe74913e96141c4d001cb04efdeba1a2a9704f1ab5", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "a227831daa79784eb24cdeedfa403c46a4cb7d0eab0e31232ec654314447e4e0"}, "earmark_parser": {:hex, :earmark_parser, "1.4.39", "424642f8335b05bb9eb611aa1564c148a8ee35c9c8a8bba6e129d51a3e3c6769", [:mix], [], "hexpm", "06553a88d1f1846da9ef066b87b57c6f605552cfbe40d20bd8d59cc6bde41944"}, "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, - "ex2ms": {:hex, :ex2ms, "1.6.1", "66d472eb14da43087c156e0396bac3cc7176b4f24590a251db53f84e9a0f5f72", [:mix], [], "hexpm", "a7192899d84af03823a8ec2f306fa858cbcce2c2e7fd0f1c49e05168fb9c740e"}, "ex_doc": {:hex, :ex_doc, "0.31.1", "8a2355ac42b1cc7b2379da9e40243f2670143721dd50748bf6c3b1184dae2089", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.1", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "3178c3a407c557d8343479e1ff117a96fd31bafe52a039079593fb0524ef61b0"}, "excoveralls": {:hex, :excoveralls, "0.18.0", "b92497e69465dc51bc37a6422226ee690ab437e4c06877e836f1c18daeb35da9", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "1109bb911f3cb583401760be49c02cbbd16aed66ea9509fc5479335d284da60b"}, "file_system": {:hex, :file_system, "1.0.0", "b689cc7dcee665f774de94b5a832e578bd7963c8e637ef940cd44327db7de2cd", [:mix], [], "hexpm", "6752092d66aec5a10e662aefeed8ddb9531d79db0bc145bb8c40325ca1d8536d"}, "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"}, "makeup": {:hex, :makeup, "1.1.1", "fa0bc768698053b2b3869fa8a62616501ff9d11a562f3ce39580d60860c3a55e", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "5dc62fbdd0de44de194898b6710692490be74baa02d9d108bc29f007783b0b48"}, "makeup_elixir": {:hex, :makeup_elixir, "0.16.1", "cc9e3ca312f1cfeccc572b37a09980287e243648108384b97ff2b76e505c3555", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "e127a341ad1b209bd80f7bd1620a15693a9908ed780c3b763bccf7d200c767c6"}, - "makeup_erlang": {:hex, :makeup_erlang, "0.1.3", "d684f4bac8690e70b06eb52dad65d26de2eefa44cd19d64a8095e1417df7c8fd", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "b78dc853d2e670ff6390b605d807263bf606da3c82be37f9d7f68635bd886fc9"}, - "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"}, - "mock": {:hex, :mock, "0.3.8", "7046a306b71db2488ef54395eeb74df0a7f335a7caca4a3d3875d1fc81c884dd", [:mix], [{:meck, "~> 0.9.2", [hex: :meck, repo: "hexpm", optional: false]}], "hexpm", "7fa82364c97617d79bb7d15571193fc0c4fe5afd0c932cef09426b3ee6fe2022"}, + "makeup_erlang": {:hex, :makeup_erlang, "0.1.4", "29563475afa9b8a2add1b7a9c8fb68d06ca7737648f28398e04461f008b69521", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "f4ed47ecda66de70dd817698a703f8816daa91272e7e45812469498614ae8b29"}, + "mimic": {:hex, :mimic, "1.7.4", "cd2772ffbc9edefe964bc668bfd4059487fa639a5b7f1cbdf4fd22946505aa4f", [:mix], [], "hexpm", "437c61041ecf8a7fae35763ce89859e4973bb0666e6ce76d75efc789204447c3"}, + "nimble_options": {:hex, :nimble_options, "1.1.0", "3b31a57ede9cb1502071fade751ab0c7b8dbe75a9a4c2b5bbb0943a690b63172", [:mix], [], "hexpm", "8bbbb3941af3ca9acc7835f5655ea062111c9c27bcac53e004460dfd19008a99"}, "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, - "shards": {:hex, :shards, "1.1.0", "ed3032e63ae99f0eaa6d012b8b9f9cead48b9a810b3f91aeac266cfc4118eff6", [:make, :rebar3], [], "hexpm", "1d188e565a54a458a7a601c2fd1e74f5cfeba755c5a534239266d28b7ff124c7"}, "sobelow": {:hex, :sobelow, "0.13.0", "218afe9075904793f5c64b8837cc356e493d88fddde126a463839351870b8d1e", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cd6e9026b85fc35d7529da14f95e85a078d9dd1907a9097b3ba6ac7ebbe34a0d"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, "stream_data": {:hex, :stream_data, "0.6.0", "e87a9a79d7ec23d10ff83eb025141ef4915eeb09d4491f79e52f2562b73e5f47", [:mix], [], "hexpm", "b92b5031b650ca480ced047578f1d57ea6dd563f5b57464ad274718c9c29501c"}, diff --git a/test/dialyzer/caching_decorators.ex b/test/dialyzer/caching_decorators.ex index 14ae56ef..0d14c2bd 100644 --- a/test/dialyzer/caching_decorators.ex +++ b/test/dialyzer/caching_decorators.ex @@ -4,29 +4,47 @@ defmodule Nebulex.Dialyzer.CachingDecorators do defmodule Account do @moduledoc false - defstruct [:id, :username, :password] - @type t :: %__MODULE__{} + defstruct [:id, :username, :password, :email] + + @type t() :: %__MODULE__{} end + @cache Cache @ttl :timer.seconds(3600) ## Annotated Functions - @spec get_account(integer) :: Account.t() - @decorate cacheable(cache: Cache, key: {Account, id}) + @spec get_account(integer()) :: Account.t() + @decorate cacheable(cache: @cache, key: {Account, id}) def get_account(id) do %Account{id: id} end - @spec get_account_by_username(binary) :: Account.t() - @decorate cacheable(cache: Cache, key: {Account, username}, opts: [ttl: @ttl]) + @spec get_account_by_username(binary()) :: Account.t() + @decorate cacheable( + cache: dynamic_cache(@cache, Cache), + key: {Account, username}, + references: & &1.id, + opts: [ttl: @ttl] + ) def get_account_by_username(username) do %Account{username: username} end + @spec get_account_by_email(Account.t()) :: Account.t() + @decorate cacheable( + cache: YetAnotherCache, + key: email, + references: &keyref(&1.id, cache: Cache), + opts: [ttl: @ttl] + ) + def get_account_by_email(%Account{email: email} = acct) do + %{acct | email: email} + end + @spec update_account(Account.t()) :: {:ok, Account.t()} @decorate cache_put( - cache: Cache, + cache: @cache, keys: [{Account, acct.id}, {Account, acct.username}], match: &match/1, opts: [ttl: @ttl] @@ -35,33 +53,33 @@ defmodule Nebulex.Dialyzer.CachingDecorators do {:ok, acct} end - @spec update_account_by_id(binary, %{optional(atom) => term}) :: {:ok, Account.t()} + @spec update_account_by_id(binary(), %{optional(atom()) => any()}) :: {:ok, Account.t()} @decorate cache_put(cache: Cache, key: {Account, id}, match: &match/1, opts: [ttl: @ttl]) def update_account_by_id(id, attrs) do {:ok, struct(Account, Map.put(attrs, :id, id))} end @spec delete_account(Account.t()) :: Account.t() - @decorate cache_evict(cache: Cache, keys: [{Account, acct.id}, {Account, acct.username}]) + @decorate cache_evict( + cache: &cache_fun/1, + keys: [{Account, acct.id}, {Account, acct.username}] + ) def delete_account(%Account{} = acct) do acct end - @spec delete_all_accounts(term) :: :ok - @decorate cache_evict(cache: Cache, all_entries: true) + @spec delete_all_accounts(any()) :: any() + @decorate cache_evict(cache: &cache_fun/1, all_entries: true) def delete_all_accounts(filter) do filter end - @spec get_user_key(integer) :: binary - @decorate cacheable( - cache: {__MODULE__, :dynamic_cache, [:dynamic]}, - key_generator: {__MODULE__, [id]} - ) + @spec get_user_key(binary()) :: binary() + @decorate cacheable(cache: &cache_fun/1, key: &generate_key/1) def get_user_key(id), do: id - @spec update_user_key(integer) :: binary - @decorate cacheable(cache: Cache, key_generator: {__MODULE__, :generate_key, [id]}) + @spec update_user_key(binary()) :: binary() + @decorate cacheable(cache: Cache, key: &generate_key({"custom", &1.args})) def update_user_key(id), do: id ## Helpers @@ -69,9 +87,11 @@ defmodule Nebulex.Dialyzer.CachingDecorators do defp match({:ok, _} = ok), do: {true, ok} defp match({:error, _}), do: false - def generate(mod, fun, args), do: :erlang.phash2({mod, fun, args}) + def generate_key(ctx), do: :erlang.phash2(ctx) - def generate_key(args), do: :erlang.phash2(args) + def cache_fun(ctx) do + _ = send(self(), ctx) - def dynamic_cache(_, _, _, _), do: Cache + Cache + end end diff --git a/test/mix/nebulex_test.exs b/test/mix/nebulex_test.exs index 505aa21c..b319798d 100644 --- a/test/mix/nebulex_test.exs +++ b/test/mix/nebulex_test.exs @@ -1,14 +1,15 @@ defmodule Mix.NebulexTest do use ExUnit.Case, async: true + use Mimic import Mix.Nebulex - import Mock test "fail because umbrella project" do - with_mock Mix.Project, umbrella?: fn -> true end do - assert_raise Mix.Error, ~r"Cannot run task", fn -> - no_umbrella!("nebulex.gen.cache") - end + Mix.Project + |> expect(:umbrella?, fn -> true end) + + assert_raise Mix.Error, ~r"Cannot run task", fn -> + no_umbrella!("nebulex.gen.cache") end end end diff --git a/test/nebulex/adapter_test.exs b/test/nebulex/adapter_test.exs new file mode 100644 index 00000000..d594f21a --- /dev/null +++ b/test/nebulex/adapter_test.exs @@ -0,0 +1,31 @@ +defmodule Nebulex.AdapterTest do + use ExUnit.Case, async: true + + describe "defcommand/2" do + test "ok: function is created" do + defmodule Test1 do + import Nebulex.Adapter, only: [defcommand: 1, defcommand: 2] + + defcommand c1(a1) + + defcommand c2(a1), command: :c11 + + defcommand c3(a1), command: :c11, largs: [:l], rargs: [:r] + end + end + end + + describe "defcommandp/2" do + test "ok: function is created" do + defmodule Test2 do + import Nebulex.Adapter, only: [defcommandp: 1, defcommandp: 2] + + defcommandp c1(a1) + + defcommandp c2(a1), command: :c11 + + defcommandp c3(a1), command: :c11, largs: [:l], rargs: [:r] + end + end + end +end diff --git a/test/nebulex/adapters/local/generation_test.exs b/test/nebulex/adapters/local/generation_test.exs deleted file mode 100644 index a9661f2f..00000000 --- a/test/nebulex/adapters/local/generation_test.exs +++ /dev/null @@ -1,382 +0,0 @@ -defmodule Nebulex.Adapters.Local.GenerationTest do - use ExUnit.Case, async: true - - defmodule LocalWithSizeLimit do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local, - gc_interval: :timer.hours(1) - end - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Adapters.Local.GenerationTest.LocalWithSizeLimit - alias Nebulex.TestCache.Cache - - describe "init" do - test "ok: with default options" do - assert {:ok, _pid} = LocalWithSizeLimit.start_link() - - assert %Nebulex.Adapters.Local.Generation{ - allocated_memory: nil, - backend: :ets, - backend_opts: [ - :set, - :public, - {:keypos, 2}, - {:read_concurrency, true}, - {:write_concurrency, true} - ], - gc_cleanup_max_timeout: 600_000, - gc_cleanup_min_timeout: 10_000, - gc_cleanup_ref: nil, - gc_heartbeat_ref: nil, - gc_interval: nil, - max_size: nil, - meta_tab: meta_tab, - stats_counter: nil - } = Generation.get_state(LocalWithSizeLimit) - - assert is_reference(meta_tab) - - :ok = LocalWithSizeLimit.stop() - end - - test "ok: with custom options" do - assert {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 10, - max_size: 10, - allocated_memory: 1000 - ) - - assert %Nebulex.Adapters.Local.Generation{ - allocated_memory: 1000, - backend: :ets, - backend_opts: [ - :set, - :public, - {:keypos, 2}, - {:read_concurrency, true}, - {:write_concurrency, true} - ], - gc_cleanup_max_timeout: 600_000, - gc_cleanup_min_timeout: 10_000, - gc_cleanup_ref: gc_cleanup_ref, - gc_heartbeat_ref: gc_heartbeat_ref, - gc_interval: 10, - max_size: 10, - meta_tab: meta_tab, - stats_counter: nil - } = Generation.get_state(LocalWithSizeLimit) - - assert is_reference(gc_cleanup_ref) - assert is_reference(gc_heartbeat_ref) - assert is_reference(meta_tab) - - :ok = LocalWithSizeLimit.stop() - end - - test "error: invalid gc_cleanup_min_timeout" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {:shutdown, {_, _, {:shutdown, {_, _, {%ArgumentError{message: err}, _}}}}}} = - LocalWithSizeLimit.start_link( - gc_interval: 3600, - gc_cleanup_min_timeout: -1, - gc_cleanup_max_timeout: -1 - ) - - assert err == "expected gc_cleanup_min_timeout: to be an integer > 0, got: -1" - end - end - - describe "gc" do - setup_with_dynamic_cache(Cache, :gc_test, - backend: :shards, - gc_interval: 1000, - compressed: true - ) - - test "create generations", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(1020) - assert generations_len(name) == 2 - - assert cache.delete_all() == 0 - - :ok = Process.sleep(1020) - assert generations_len(name) == 2 - end - - test "create new generation and reset timeout", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.new_generation() - end) - - assert generations_len(name) == 2 - - :ok = Process.sleep(500) - assert generations_len(name) == 2 - - :ok = Process.sleep(520) - assert generations_len(name) == 2 - end - - test "create new generation without reset timeout", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.new_generation(reset_timer: false) - end) - - assert generations_len(name) == 2 - - :ok = Process.sleep(500) - assert generations_len(name) == 2 - end - - test "reset timer", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.reset_generation_timer() - end) - - :ok = Process.sleep(220) - assert generations_len(name) == 1 - - :ok = Process.sleep(1000) - assert generations_len(name) == 2 - end - end - - describe "allocated memory" do - test "cleanup is triggered when max generation size is reached" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - allocated_memory: 100_000, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 3000 - ) - - assert generations_len(LocalWithSizeLimit) == 1 - - {mem_size, _} = Generation.memory_info(LocalWithSizeLimit) - :ok = Generation.realloc(LocalWithSizeLimit, mem_size * 2) - - # Trigger the cleanup event - :ok = check_cache_size(LocalWithSizeLimit) - - assert generations_len(LocalWithSizeLimit) == 1 - - :ok = flood_cache(mem_size, mem_size * 2) - - assert_mem_size(:>) - - # Wait until the cleanup event is triggered - :ok = Process.sleep(3100) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:<=) - end) - - :ok = flood_cache(mem_size, mem_size * 2) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:>) - end) - - :ok = flood_cache(mem_size, mem_size * 2) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:>) - end) - - # triggers the cleanup event - :ok = check_cache_size(LocalWithSizeLimit) - - assert generations_len(LocalWithSizeLimit) == 2 - - :ok = LocalWithSizeLimit.stop() - end - - test "cleanup while cache is being used" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - allocated_memory: 100, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 3000 - ) - - assert generations_len(LocalWithSizeLimit) == 1 - - tasks = for i <- 1..3, do: Task.async(fn -> task_fun(LocalWithSizeLimit, i) end) - - for _ <- 1..100 do - :ok = Process.sleep(10) - - LocalWithSizeLimit - |> Generation.server() - |> send(:cleanup) - end - - for task <- tasks, do: Task.shutdown(task) - - :ok = LocalWithSizeLimit.stop() - end - end - - describe "max size" do - test "cleanup is triggered when size limit is reached" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - max_size: 3, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 1500 - ) - - # Initially there should be only 1 generation and no entries - assert generations_len(LocalWithSizeLimit) == 1 - assert LocalWithSizeLimit.count_all() == 0 - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 1..4) - - # Validate current size - assert LocalWithSizeLimit.count_all() == 4 - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # There should be 2 generation now - assert generations_len(LocalWithSizeLimit) == 2 - - # The entries should be now in the older generation - assert LocalWithSizeLimit.count_all() == 4 - - # Wait the min cleanup timeout since max size is exceeded - :ok = Process.sleep(1100) - - # Cache should be empty now - assert LocalWithSizeLimit.count_all() == 0 - - # Put some entries without exceeding the max size - _ = cache_put(LocalWithSizeLimit, 5..6) - - # Validate current size - assert LocalWithSizeLimit.count_all() == 2 - - # Wait the max cleanup timeout (timeout should be relative to the size) - :ok = Process.sleep(1600) - - # The entries should be in the newer generation yet - assert LocalWithSizeLimit.count_all() == 2 - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 7..8) - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # The entries should be in the newer generation yet - assert LocalWithSizeLimit.count_all() == 4 - - # Wait the min cleanup timeout since max size is exceeded - :ok = Process.sleep(1100) - - # Cache should be empty now - assert LocalWithSizeLimit.count_all() == 0 - - # Stop the cache - :ok = LocalWithSizeLimit.stop() - end - end - - describe "cleanup cover" do - test "cleanup when gc_interval not set" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - max_size: 3, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 1500 - ) - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 1..4) - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # assert not crashed - assert LocalWithSizeLimit.count_all() == 4 - - # Stop the cache - :ok = LocalWithSizeLimit.stop() - end - end - - ## Private Functions - - defp check_cache_size(cache) do - :cleanup = - cache - |> Generation.server() - |> send(:cleanup) - - :ok = Process.sleep(1000) - end - - defp flood_cache(mem_size, max_size) when mem_size > max_size do - :ok - end - - defp flood_cache(mem_size, max_size) when mem_size <= max_size do - :ok = - 100_000 - |> :rand.uniform() - |> LocalWithSizeLimit.put(generate_value(1000)) - - :ok = Process.sleep(500) - {mem_size, _} = Generation.memory_info(LocalWithSizeLimit) - flood_cache(mem_size, max_size) - end - - defp assert_mem_size(greater_or_less) do - {mem_size, max_size} = Generation.memory_info(LocalWithSizeLimit) - assert apply(Kernel, greater_or_less, [mem_size, max_size]) - end - - defp generate_value(n) do - for(_ <- 1..n, do: "a") - end - - defp generations_len(name) do - name - |> Generation.list() - |> length() - end - - defp task_fun(cache, i) do - :ok = cache.put("#{inspect(self())}.#{i}", i) - :ok = Process.sleep(1) - task_fun(cache, i + 1) - end -end diff --git a/test/nebulex/adapters/local_boolean_keys_test.exs b/test/nebulex/adapters/local_boolean_keys_test.exs deleted file mode 100644 index c66b99e3..00000000 --- a/test/nebulex/adapters/local_boolean_keys_test.exs +++ /dev/null @@ -1,101 +0,0 @@ -defmodule Nebulex.Adapters.LocalBooleanKeysTest do - use ExUnit.Case, async: true - - defmodule ETS do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule Shards do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - alias Nebulex.Adapters.LocalBooleanKeysTest.{ETS, Shards} - - setup do - {:ok, ets} = ETS.start_link() - {:ok, shards} = Shards.start_link(backend: :shards) - - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(ets), do: ETS.stop() - if Process.alive?(shards), do: Shards.stop() - end) - - {:ok, caches: [ETS, Shards]} - end - - describe "boolean keys" do - test "get and get_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: true, b: false) - - assert cache.get(:a) == true - assert cache.get(:b) == false - - assert cache.get_all([:a, :b]) == %{a: true, b: false} - end) - end - - test "take", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: true, b: false) - - assert cache.take(:a) == true - assert cache.take(:b) == false - - assert cache.get_all([:a, :b]) == %{} - end) - end - - test "delete true value", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, true) - - assert cache.get(:a) == true - assert cache.delete(:a) - assert cache.get(:a) == nil - end) - end - - test "delete false value", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, false) - - assert cache.get(:a) == false - assert cache.delete(:a) - assert cache.get(:a) == nil - end) - end - - test "put_new", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert cache.put_new(:a, true) - :ok = cache.put(:a, false) - refute cache.put_new(:a, false) - - assert cache.get(:a) == false - end) - end - - test "has_key?", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, true) - - assert cache.has_key?(:a) - refute cache.has_key?(:b) - end) - end - end - - ## Helpers - - defp for_all_caches(caches, fun) do - Enum.each(caches, fn cache -> - fun.(cache) - end) - end -end diff --git a/test/nebulex/adapters/local_duplicate_keys_test.exs b/test/nebulex/adapters/local_duplicate_keys_test.exs deleted file mode 100644 index d3927b70..00000000 --- a/test/nebulex/adapters/local_duplicate_keys_test.exs +++ /dev/null @@ -1,180 +0,0 @@ -defmodule Nebulex.Adapters.LocalDuplicateKeysTest do - use ExUnit.Case, async: true - - defmodule ETS do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule Shards do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - import Ex2ms - - alias Nebulex.Adapters.LocalDuplicateKeysTest.{ETS, Shards} - - setup do - {:ok, ets} = ETS.start_link(backend_type: :duplicate_bag) - {:ok, shards} = Shards.start_link(backend: :shards, backend_type: :duplicate_bag) - - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(ets), do: ETS.stop() - if Process.alive?(shards), do: Shards.stop() - end) - - {:ok, caches: [ETS, Shards]} - end - - describe "duplicate keys" do - test "get and get_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.get(:a) == [1, 2, 2] - assert cache.get(:b) == [1, 2] - assert cache.get(:c) == 1 - - assert cache.get_all([:a, :b, :c]) == %{a: [1, 2, 2], b: [1, 2], c: 1} - end) - end - - test "take", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.take(:a) == [1, 2, 2] - assert cache.take(:b) == [1, 2] - assert cache.take(:c) == 1 - - assert cache.get_all([:a, :b, :c]) == %{} - end) - end - - test "delete", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - :ok = cache.put(:a, 2) - - assert cache.get(:a) == [1, 2, 2] - assert cache.delete(:a) - refute cache.get(:a) - end) - end - - test "put_new", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert cache.put_new(:a, 1) - :ok = cache.put(:a, 2) - refute cache.put_new(:a, 3) - - assert cache.get(:a) == [1, 2] - end) - end - - test "has_key?", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert cache.has_key?(:a) - refute cache.has_key?(:b) - end) - end - - test "ttl", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1, ttl: 5000) - :ok = cache.put(:a, 2, ttl: 10_000) - :ok = cache.put(:a, 3) - - [ttl1, ttl2, ttl3] = cache.ttl(:a) - assert ttl1 > 1000 - assert ttl2 > 6000 - assert ttl3 == :infinity - - refute cache.ttl(:b) - end) - end - - test "count_all and delete_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.count_all() == 6 - assert cache.delete_all() == 6 - assert cache.count_all() == 0 - end) - end - - test "all and stream using match_spec queries", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - test_ms = - fun do - {_, key, value, _, _} when value == 2 -> key - end - - res_stream = test_ms |> cache.stream() |> Enum.to_list() |> Enum.sort() - res_query = test_ms |> cache.all() |> Enum.sort() - - assert res_stream == [:a, :a, :b] - assert res_query == res_stream - end) - end - end - - describe "unsupported commands" do - test "replace", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert_raise ArgumentError, fn -> - cache.replace(:a, 1) - end - end) - end - - test "incr", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert_raise ArgumentError, fn -> - cache.incr(:a) - end - end) - end - - test "expire", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert_raise ArgumentError, fn -> - cache.expire(:a, 5000) - end - end) - end - - test "touch", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert_raise ArgumentError, fn -> - cache.touch(:a) - end - end) - end - end - - ## Helpers - - defp for_all_caches(caches, fun) do - Enum.each(caches, fn cache -> - fun.(cache) - end) - end -end diff --git a/test/nebulex/adapters/local_ets_test.exs b/test/nebulex/adapters/local_ets_test.exs deleted file mode 100644 index 4619b5bc..00000000 --- a/test/nebulex/adapters/local_ets_test.exs +++ /dev/null @@ -1,20 +0,0 @@ -defmodule Nebulex.Adapters.LocalEtsTest do - use ExUnit.Case, async: true - use Nebulex.LocalTest - use Nebulex.CacheTest - - import Nebulex.CacheCase - - alias Nebulex.Adapter - alias Nebulex.TestCache.Cache - - setup_with_dynamic_cache(Cache, :local_with_ets, purge_batch_size: 10) - - describe "ets" do - test "backend", %{name: name} do - Adapter.with_meta(name, fn _, meta -> - assert meta.backend == :ets - end) - end - end -end diff --git a/test/nebulex/adapters/local_shards_test.exs b/test/nebulex/adapters/local_shards_test.exs deleted file mode 100644 index 53c7366a..00000000 --- a/test/nebulex/adapters/local_shards_test.exs +++ /dev/null @@ -1,37 +0,0 @@ -defmodule Nebulex.Adapters.LocalWithShardsTest do - use ExUnit.Case, async: true - use Nebulex.LocalTest - use Nebulex.CacheTest - - import Nebulex.CacheCase - - alias Nebulex.Adapter - alias Nebulex.TestCache.Cache - - setup_with_dynamic_cache(Cache, :local_with_shards, backend: :shards) - - describe "shards" do - test "backend", %{name: name} do - Adapter.with_meta(name, fn _, meta -> - assert meta.backend == :shards - end) - end - - test "custom partitions" do - defmodule CustomPartitions do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - :ok = Application.put_env(:nebulex, CustomPartitions, backend: :shards, partitions: 2) - {:ok, _pid} = CustomPartitions.start_link() - - assert CustomPartitions.newer_generation() - |> :shards.table_meta() - |> :shards_meta.partitions() == 2 - - :ok = CustomPartitions.stop() - end - end -end diff --git a/test/nebulex/adapters/multilevel_concurrency_test.exs b/test/nebulex/adapters/multilevel_concurrency_test.exs deleted file mode 100644 index eefcf01c..00000000 --- a/test/nebulex/adapters/multilevel_concurrency_test.exs +++ /dev/null @@ -1,159 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelConcurrencyTest do - use ExUnit.Case, async: true - - import Nebulex.CacheCase - - alias Nebulex.TestCache.Multilevel.L2 - - defmodule SleeperMock do - @moduledoc false - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - alias Nebulex.Adapters.Local - - @impl true - defmacro __before_compile__(_), do: :ok - - @impl true - defdelegate init(opts), to: Local - - def post(opts) do - with f when is_function(f) <- opts[:post] do - f.() - end - end - - @impl true - defdelegate get(meta, key, opts), to: Local - - @impl true - defdelegate put(meta, key, value, ttl, on_write, opts), to: Local - - @impl true - def delete(meta, key, opts) do - result = Local.delete(meta, key, opts) - post(opts) - result - end - - @impl true - defdelegate take(meta, key, opts), to: Local - - @impl true - defdelegate has_key?(meta, key), to: Local - - @impl true - defdelegate ttl(meta, key), to: Local - - @impl true - defdelegate expire(meta, key, ttl), to: Local - - @impl true - defdelegate touch(meta, key), to: Local - - @impl true - defdelegate update_counter(meta, key, amount, ttl, default, opts), to: Local - - @impl true - defdelegate get_all(meta, keys, opts), to: Local - - @impl true - defdelegate put_all(meta, entries, ttl, on_write, opts), to: Local - - @impl true - def execute(meta, operation, query, opts) do - result = Local.execute(meta, operation, query, opts) - post(opts) - result - end - - @impl true - defdelegate stream(meta, query, opts), to: Local - end - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: SleeperMock - end - - defmodule Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - end - - @levels [ - {L1, name: :multilevel_concurrency_l1}, - {L2, name: :multilevel_concurrency_l2} - ] - - setup_with_cache(Multilevel, - model: :inclusive, - levels: @levels - ) - - describe "delete" do - test "deletes in reverse order", %{cache: cache} do - test_pid = self() - - assert :ok = cache.put("foo", "stale") - - task = - Task.async(fn -> - cache.delete("foo", - post: fn -> - send(test_pid, :deleted_in_l1) - - receive do - :continue -> :ok - after - 5000 -> - raise "Did not receive continue message" - end - end - ) - end) - - assert_receive :deleted_in_l1 - refute cache.get("foo") - send(task.pid, :continue) - assert Task.await(task) == :ok - assert cache.get("foo", level: 1) == nil - assert cache.get("foo", level: 2) == nil - end - end - - describe "delete_all" do - test "deletes in reverse order", %{cache: cache} do - test_pid = self() - - assert :ok = cache.put_all(%{a: "stale", b: "stale"}) - - task = - Task.async(fn -> - cache.delete_all(nil, - post: fn -> - send(test_pid, :deleted_in_l1) - - receive do - :continue -> :ok - after - 5000 -> - raise "Did not receive continue message" - end - end - ) - end) - - assert_receive :deleted_in_l1 - refute cache.get(:a) - refute cache.get(:b) - send(task.pid, :continue) - assert Task.await(task) == 4 - assert cache.get_all([:a, :b]) == %{} - end - end -end diff --git a/test/nebulex/adapters/multilevel_exclusive_test.exs b/test/nebulex/adapters/multilevel_exclusive_test.exs deleted file mode 100644 index 287242fb..00000000 --- a/test/nebulex/adapters/multilevel_exclusive_test.exs +++ /dev/null @@ -1,94 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelExclusiveTest do - use ExUnit.Case, async: true - use Nebulex.NodeCase - use Nebulex.MultilevelTest - use Nebulex.Cache.QueryableTest - use Nebulex.Cache.TransactionTest - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Cache.Cluster - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - { - L1, - name: :multilevel_exclusive_l1, gc_interval: @gc_interval, backend: :shards, partitions: 2 - }, - { - L2, - name: :multilevel_exclusive_l2, primary: [gc_interval: @gc_interval] - }, - { - L3, - name: :multilevel_exclusive_l3, - primary: [gc_interval: @gc_interval, backend: :shards, partitions: 2] - } - ] - - setup_with_dynamic_cache(Multilevel, :multilevel_exclusive, - model: :exclusive, - levels: @levels - ) - - describe "multilevel exclusive" do - test "returns partitions for L1 with shards backend", %{name: name} do - assert :"#{name}_l1" - |> Generation.newer() - |> :shards.table_meta() - |> :shards_meta.partitions() == 2 - end - - test "get" do - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get(1) == 1 - assert Multilevel.get(2, return: :key) == 2 - assert Multilevel.get(3) == 3 - refute Multilevel.get(2, level: 1) - refute Multilevel.get(3, level: 1) - refute Multilevel.get(1, level: 2) - refute Multilevel.get(3, level: 2) - refute Multilevel.get(1, level: 3) - refute Multilevel.get(2, level: 3) - end - end - - describe "partitioned level" do - test "returns cluster nodes" do - assert Cluster.get_nodes(:multilevel_exclusive_l3) == [node()] - end - - test "joining new node" do - node = :"node1@127.0.0.1" - - {:ok, pid} = - start_cache(node, Multilevel, - name: :multilevel_exclusive, - model: :exclusive, - levels: @levels - ) - - # check cluster nodes - assert Cluster.get_nodes(:multilevel_exclusive_l3) == [node, node()] - - kv_pairs = for k <- 1..100, do: {k, k} - - Multilevel.transaction(fn -> - assert Multilevel.put_all(kv_pairs) == :ok - - for k <- 1..100 do - assert Multilevel.get(k) == k - end - end) - - :ok = stop_cache(:"node1@127.0.0.1", pid) - end - end -end diff --git a/test/nebulex/adapters/multilevel_inclusive_test.exs b/test/nebulex/adapters/multilevel_inclusive_test.exs deleted file mode 100644 index 2ce57930..00000000 --- a/test/nebulex/adapters/multilevel_inclusive_test.exs +++ /dev/null @@ -1,190 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelInclusiveTest do - use ExUnit.Case, async: true - use Nebulex.NodeCase - use Nebulex.MultilevelTest - use Nebulex.Cache.QueryableTest - use Nebulex.Cache.TransactionTest - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Cache.Cluster - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - { - L1, - name: :multilevel_inclusive_l1, gc_interval: @gc_interval, backend: :shards, partitions: 2 - }, - { - L2, - name: :multilevel_inclusive_l2, primary: [gc_interval: @gc_interval] - }, - { - L3, - name: :multilevel_inclusive_l3, - primary: [gc_interval: @gc_interval, backend: :shards, partitions: 2] - } - ] - - setup_with_dynamic_cache(Multilevel, :multilevel_inclusive, - model: :inclusive, - levels: @levels - ) - - describe "multilevel inclusive" do - test "returns partitions for L1 with shards backend", %{name: name} do - assert :"#{name}_l1" - |> Generation.newer() - |> :shards.table_meta() - |> :shards_meta.partitions() == 2 - end - - test "get" do - :ok = Process.sleep(2000) - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get(1) == 1 - refute Multilevel.get(1, level: 2) - refute Multilevel.get(1, level: 3) - - assert Multilevel.get(2) == 2 - assert Multilevel.get(2, level: 1) == 2 - assert Multilevel.get(2, level: 2) == 2 - refute Multilevel.get(2, level: 3) - - assert Multilevel.get(3, level: 3) == 3 - refute Multilevel.get(3, level: 1) - refute Multilevel.get(3, level: 2) - - assert Multilevel.get(3) == 3 - assert Multilevel.get(3, level: 1) == 3 - assert Multilevel.get(3, level: 2) == 3 - assert Multilevel.get(3, level: 3) == 3 - end - - test "get_all [replicate: true]" do - :ok = Process.sleep(2000) - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get_all([1]) == %{1 => 1} - refute Multilevel.get(1, level: 2) - refute Multilevel.get(1, level: 3) - - assert Multilevel.get_all([1, 2]) == %{1 => 1, 2 => 2} - assert Multilevel.get(2, level: 1) == 2 - assert Multilevel.get(2, level: 2) == 2 - refute Multilevel.get(2, level: 3) - - assert Multilevel.get(3, level: 3) == 3 - refute Multilevel.get(3, level: 1) - refute Multilevel.get(3, level: 2) - - assert Multilevel.get_all([1, 2, 3]) == %{1 => 1, 2 => 2, 3 => 3} - assert Multilevel.get(3, level: 1) == 3 - assert Multilevel.get(3, level: 2) == 3 - assert Multilevel.get(3, level: 3) == 3 - end - - test "get_all [replicate: false]" do - :ok = Process.sleep(2000) - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get_all([1], replicate: false) == %{1 => 1} - refute Multilevel.get(1, level: 2) - refute Multilevel.get(1, level: 3) - - assert Multilevel.get_all([1, 2], replicate: false) == %{1 => 1, 2 => 2} - refute Multilevel.get(2, level: 1) - assert Multilevel.get(2, level: 2) == 2 - refute Multilevel.get(2, level: 3) - - assert Multilevel.get(3, level: 3) == 3 - refute Multilevel.get(3, level: 1) - refute Multilevel.get(3, level: 2) - - assert Multilevel.get_all([1, 2, 3], replicate: false) == %{1 => 1, 2 => 2, 3 => 3} - refute Multilevel.get(3, level: 1) - refute Multilevel.get(3, level: 2) - assert Multilevel.get(3, level: 3) == 3 - end - - test "get boolean" do - :ok = Multilevel.put(1, true, level: 1) - :ok = Multilevel.put(2, false, level: 1) - - assert Multilevel.get(1) == true - assert Multilevel.get(2) == false - end - - test "fetched value is replicated with TTL on previous levels" do - assert Multilevel.put(:a, 1, ttl: 1000) == :ok - assert Multilevel.ttl(:a) > 0 - - :ok = Process.sleep(1100) - refute Multilevel.get(:a, level: 1) - refute Multilevel.get(:a, level: 2) - refute Multilevel.get(:a, level: 3) - - assert Multilevel.put(:b, 1, level: 3) == :ok - assert Multilevel.ttl(:b) == :infinity - assert Multilevel.expire(:b, 1000) - assert Multilevel.ttl(:b) > 0 - refute Multilevel.get(:b, level: 1) - refute Multilevel.get(:b, level: 2) - assert Multilevel.get(:b, level: 3) == 1 - - assert Multilevel.get(:b) == 1 - assert Multilevel.get(:b, level: 1) == 1 - assert Multilevel.get(:b, level: 2) == 1 - assert Multilevel.get(:b, level: 3) == 1 - - :ok = Process.sleep(1100) - refute Multilevel.get(:b, level: 1) - refute Multilevel.get(:b, level: 2) - refute Multilevel.get(:b, level: 3) - end - end - - describe "distributed levels" do - test "return cluster nodes" do - assert Cluster.get_nodes(:multilevel_inclusive_l2) == [node()] - assert Cluster.get_nodes(:multilevel_inclusive_l3) == [node()] - end - - test "joining new node" do - node = :"node1@127.0.0.1" - - {:ok, pid} = - start_cache(node, Multilevel, - name: :multilevel_inclusive, - model: :inclusive, - levels: @levels - ) - - # check cluster nodes - assert Cluster.get_nodes(:multilevel_inclusive_l3) == [node, node()] - - kv_pairs = for k <- 1..100, do: {k, k} - - Multilevel.transaction(fn -> - assert Multilevel.put_all(kv_pairs) == :ok - - for k <- 1..100 do - assert Multilevel.get(k) == k - end - end) - - :ok = stop_cache(:"node1@127.0.0.1", pid) - end - end -end diff --git a/test/nebulex/adapters/nil_test.exs b/test/nebulex/adapters/nil_test.exs index bbacb002..3115c627 100644 --- a/test/nebulex/adapters/nil_test.exs +++ b/test/nebulex/adapters/nil_test.exs @@ -15,117 +15,104 @@ defmodule Nebulex.Adapters.NilTest do describe "entry" do property "put", %{cache: cache} do check all term <- term() do - refute cache.get(term) + assert cache.has_key?(term) == {:ok, false} - assert cache.replace(term, term) + assert cache.replace(term, term) == {:ok, true} assert cache.put(term, term) == :ok - assert cache.put_new(term, term) - refute cache.get(term) + assert cache.put_new(term, term) == {:ok, true} + assert cache.has_key?(term) == {:ok, false} end end test "put_all", %{cache: cache} do assert cache.put_all(a: 1, b: 2, c: 3) == :ok - refute cache.get(:a) - refute cache.get(:b) - refute cache.get(:c) + assert cache.has_key?(:a) == {:ok, false} + assert cache.has_key?(:b) == {:ok, false} + assert cache.has_key?(:c) == {:ok, false} end - test "get", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.get("foo") - end - - test "get_all", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - assert cache.get_all("foo") == %{} + test "fetch", %{cache: cache} do + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.fetch("foo") end test "delete", %{cache: cache} do - assert cache.put("foo", "bar") == :ok assert cache.delete("foo") == :ok end test "take", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.take("foo") + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.take("foo") end test "has_key?", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.has_key?("foo") + assert cache.has_key?("foo") == {:ok, false} end test "ttl", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.ttl("foo") + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.ttl("foo") end test "expire", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - assert cache.expire("foo", 1000) - refute cache.get("foo") + assert cache.expire("foo", 1000) == {:ok, false} end test "touch", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - assert cache.touch("foo") - refute cache.get("foo") + assert cache.touch("foo") == {:ok, false} end test "incr", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter, 10) == 10 - assert cache.incr(:counter, -10) == -10 - assert cache.incr(:counter, 5, default: 10) == 15 - assert cache.incr(:counter, -5, default: 10) == 5 + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter, 10) == 10 + assert cache.incr!(:counter, -10) == -10 + assert cache.incr!(:counter, 5, default: 10) == 15 + assert cache.incr!(:counter, -5, default: 10) == 5 end test "decr", %{cache: cache} do - assert cache.decr(:counter) == -1 - assert cache.decr(:counter, 10) == -10 - assert cache.decr(:counter, -10) == 10 - assert cache.decr(:counter, 5, default: 10) == 5 - assert cache.decr(:counter, -5, default: 10) == 15 + assert cache.decr!(:counter) == -1 + assert cache.decr!(:counter, 10) == -10 + assert cache.decr!(:counter, -10) == 10 + assert cache.decr!(:counter, 5, default: 10) == 5 + assert cache.decr!(:counter, -5, default: 10) == 15 end end describe "queryable" do - test "all", %{cache: cache} do + test "get_all", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.all() == [] + assert cache.get_all!() == [] end test "stream", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.stream() |> Enum.to_list() == [] + assert cache.stream!() |> Enum.to_list() == [] end test "count_all", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.count_all() == 0 + assert cache.count_all!() == 0 end test "delete_all", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.delete_all() == 0 + assert cache.delete_all!() == 0 end end describe "transaction" do test "single transaction", %{cache: cache} do - refute cache.transaction(fn -> + assert cache.transaction(fn -> :ok = cache.put("foo", "bar") - cache.get("foo") - end) + cache.get!("foo") + end) == {:ok, nil} end test "in_transaction?", %{cache: cache} do - refute cache.in_transaction?() + assert cache.in_transaction?() == {:ok, false} cache.transaction(fn -> - :ok = cache.put(1, 11, return: :key) - true = cache.in_transaction?() + :ok = cache.put(1, 11) + + assert cache.in_transaction?() == {:ok, true} end) end end @@ -139,28 +126,42 @@ defmodule Nebulex.Adapters.NilTest do assert cache.dump(path) == :ok assert cache.load(path) == :ok - assert cache.count_all() == 0 + assert cache.count_all!() == 0 end end - describe "stats" do - test "stats/0", %{cache: cache} do + describe "info" do + test "ok: returns stats info", %{cache: cache} do assert cache.put("foo", "bar") == :ok - refute cache.get("foo") - assert cache.stats() == %Nebulex.Stats{} + refute cache.get!("foo") + + assert cache.info!(:stats) == %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } end end ## Private Functions defp setup_cache(_config) do - {:ok, pid} = NilCache.start_link() + {:ok, _pid} = NilCache.start_link(telemetry: false) - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(pid), do: NilCache.stop() - end) + on_exit(fn -> safe_stop() end) {:ok, cache: NilCache} end + + defp safe_stop do + NilCache.stop() + catch + # Perhaps the `pid` has terminated already (race-condition), + # so we don't want to crash the test + :exit, _ -> :ok + end end diff --git a/test/nebulex/adapters/partitioned_test.exs b/test/nebulex/adapters/partitioned_test.exs deleted file mode 100644 index cc446c6c..00000000 --- a/test/nebulex/adapters/partitioned_test.exs +++ /dev/null @@ -1,293 +0,0 @@ -defmodule Nebulex.Adapters.PartitionedTest do - use Nebulex.NodeCase - use Nebulex.CacheTest - - import Nebulex.CacheCase - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.TestCache.{Partitioned, PartitionedMock} - - @primary :"primary@127.0.0.1" - @cache_name :partitioned_cache - - # Set config - :ok = Application.put_env(:nebulex, Partitioned, primary: [backend: :shards]) - - setup do - cluster = :lists.usort([@primary | Application.get_env(:nebulex, :nodes, [])]) - - node_pid_list = - start_caches( - [node() | Node.list()], - [ - {Partitioned, [name: @cache_name, join_timeout: 2000]}, - {PartitionedMock, []} - ] - ) - - default_dynamic_cache = Partitioned.get_dynamic_cache() - _ = Partitioned.put_dynamic_cache(@cache_name) - - on_exit(fn -> - _ = Partitioned.put_dynamic_cache(default_dynamic_cache) - :ok = Process.sleep(100) - stop_caches(node_pid_list) - end) - - {:ok, cache: Partitioned, name: @cache_name, cluster: cluster} - end - - describe "c:init/1" do - test "initializes the primary store metadata" do - Adapter.with_meta(PartitionedCache.Primary, fn adapter, meta -> - assert adapter == Nebulex.Adapters.Local - assert meta.backend == :shards - end) - end - - test "raises an exception because invalid primary store" do - assert_raise ArgumentError, ~r"adapter Invalid was not compiled", fn -> - defmodule Demo do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Invalid - end - end - end - - test "fails because unloaded keyslot module" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :unloaded_keyslot, - keyslot: UnloadedKeyslot - ) - - assert Regex.match?(~r"keyslot UnloadedKeyslot was not compiled", msg) - end - - test "fails because keyslot module does not implement expected behaviour" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :invalid_keyslot, - keyslot: __MODULE__ - ) - - mod = inspect(__MODULE__) - behaviour = "Nebulex.Adapter.Keyslot" - assert Regex.match?(~r"expected #{mod} to implement the behaviour #{behaviour}", msg) - end - - test "fails because invalid keyslot option" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :invalid_keyslot, - keyslot: "invalid" - ) - - assert Regex.match?(~r"expected keyslot: to be an atom, got: \"invalid\"", msg) - end - end - - describe "partitioned cache" do - test "custom keyslot" do - defmodule Keyslot do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> rem(range) - end - end - - test_with_dynamic_cache(Partitioned, [name: :custom_keyslot, keyslot: Keyslot], fn -> - refute Partitioned.get("foo") - assert Partitioned.put("foo", "bar") == :ok - assert Partitioned.get("foo") == "bar" - end) - end - - test "custom keyslot supports two item tuple keys for get_all" do - defmodule TupleKeyslot do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot({_, _} = key, range) do - key - |> :erlang.phash2() - |> rem(range) - end - end - - test_with_dynamic_cache( - Partitioned, - [name: :custom_keyslot_with_tuple_keys, keyslot: TupleKeyslot], - fn -> - assert Partitioned.put_all([{{"foo", 1}, "bar"}]) == :ok - assert Partitioned.get_all([{"foo", 1}]) == %{{"foo", 1} => "bar"} - end - ) - end - - test "get_and_update" do - assert Partitioned.get_and_update(1, &Partitioned.get_and_update_fun/1) == {nil, 1} - assert Partitioned.get_and_update(1, &Partitioned.get_and_update_fun/1) == {1, 2} - assert Partitioned.get_and_update(1, &Partitioned.get_and_update_fun/1) == {2, 4} - - assert_raise ArgumentError, fn -> - Partitioned.get_and_update(1, &Partitioned.get_and_update_bad_fun/1) - end - end - - test "incr raises when the counter is not an integer" do - :ok = Partitioned.put(:counter, "string") - - assert_raise ArgumentError, fn -> - Partitioned.incr(:counter, 10) - end - end - end - - describe "cluster scenario:" do - test "node leaves and then rejoins", %{name: name, cluster: cluster} do - assert node() == @primary - assert :lists.usort(Node.list()) == cluster -- [node()] - assert Partitioned.nodes() == cluster - - Partitioned.with_dynamic_cache(name, fn -> - :ok = Partitioned.leave_cluster() - assert Partitioned.nodes() == cluster -- [node()] - end) - - Partitioned.with_dynamic_cache(name, fn -> - :ok = Partitioned.join_cluster() - assert Partitioned.nodes() == cluster - end) - end - - test "teardown cache node", %{cluster: cluster} do - assert Partitioned.nodes() == cluster - - assert Partitioned.put(1, 1) == :ok - assert Partitioned.get(1) == 1 - - node = teardown_cache(1) - - wait_until(fn -> - assert Partitioned.nodes() == cluster -- [node] - end) - - refute Partitioned.get(1) - - assert :ok == Partitioned.put_all([{4, 44}, {2, 2}, {1, 1}]) - - assert Partitioned.get(4) == 44 - assert Partitioned.get(2) == 2 - assert Partitioned.get(1) == 1 - end - - test "bootstrap leaves cache from the cluster when terminated and then rejoins when restarted", - %{name: name} do - prefix = [:nebulex, :test_cache, :partitioned, :bootstrap] - started = prefix ++ [:started] - stopped = prefix ++ [:stopped] - joined = prefix ++ [:joined] - exit_sig = prefix ++ [:exit] - - with_telemetry_handler(__MODULE__, [started, stopped, joined, exit_sig], fn -> - assert node() in Partitioned.nodes() - - true = - [name, Bootstrap] - |> normalize_module_name() - |> Process.whereis() - |> Process.exit(:stop) - - assert_receive {^exit_sig, %{system_time: _}, %{reason: :stop}}, 5000 - assert_receive {^stopped, %{system_time: _}, %{reason: :stop, cluster_nodes: nodes}}, 5000 - - refute node() in nodes - - assert_receive {^started, %{system_time: _}, %{}}, 5000 - assert_receive {^joined, %{system_time: _}, %{cluster_nodes: nodes}}, 5000 - - assert node() in nodes - assert nodes -- Partitioned.nodes() == [] - - :ok = Process.sleep(2100) - - assert_receive {^joined, %{system_time: _}, %{cluster_nodes: nodes}}, 5000 - assert node() in nodes - end) - end - end - - describe "rpc" do - test "timeout error" do - assert Partitioned.put_all(for(x <- 1..100_000, do: {x, x}), timeout: 60_000) == :ok - assert Partitioned.get(1, timeout: 1000) == 1 - - msg = ~r"RPC error while executing action :all\n\nSuccessful responses:" - - assert_raise Nebulex.RPCMultiCallError, msg, fn -> - Partitioned.all(nil, timeout: 1) - end - end - - test "runtime error" do - _ = Process.flag(:trap_exit, true) - - assert [1, 2] |> PartitionedMock.get_all(timeout: 10) |> map_size() == 0 - - assert PartitionedMock.put_all(a: 1, b: 2) == :ok - - assert [1, 2] |> PartitionedMock.get_all() |> map_size() == 0 - - assert_raise ArgumentError, fn -> - PartitionedMock.get(1) - end - - msg = ~r"RPC error while executing action :count_all\n\nSuccessful responses:" - - assert_raise Nebulex.RPCMultiCallError, msg, fn -> - PartitionedMock.count_all() - end - end - end - - if Code.ensure_loaded?(:erpc) do - describe ":erpc" do - test "timeout error" do - assert Partitioned.put(1, 1) == :ok - assert Partitioned.get(1, timeout: 1000) == 1 - - node = "#{inspect(Partitioned.get_node(1))}" - reason = "#{inspect({:erpc, :timeout})}" - - msg = ~r"The RPC operation failed on node #{node} with reason:\n\n#{reason}" - - assert_raise Nebulex.RPCError, msg, fn -> - Partitioned.get(1, timeout: 0) - end - end - end - end - - ## Private Functions - - defp teardown_cache(key) do - node = Partitioned.get_node(key) - remote_pid = :rpc.call(node, Process, :whereis, [@cache_name]) - :ok = :rpc.call(node, Supervisor, :stop, [remote_pid]) - node - end -end diff --git a/test/nebulex/adapters/replicated_test.exs b/test/nebulex/adapters/replicated_test.exs deleted file mode 100644 index c0629c52..00000000 --- a/test/nebulex/adapters/replicated_test.exs +++ /dev/null @@ -1,347 +0,0 @@ -defmodule Nebulex.Adapters.ReplicatedTest do - use Nebulex.NodeCase - use Nebulex.CacheTest - - import Mock - import Nebulex.CacheCase - - alias Nebulex.TestCache.{Replicated, ReplicatedMock} - - @cache_name :replicated_cache - - setup_all do - node_pid_list = start_caches(cluster_nodes(), [{Replicated, [name: @cache_name]}]) - - on_exit(fn -> - :ok = Process.sleep(100) - stop_caches(node_pid_list) - end) - - {:ok, cache: Replicated, name: @cache_name} - end - - setup do - default_dynamic_cache = Replicated.get_dynamic_cache() - _ = Replicated.put_dynamic_cache(@cache_name) - - _ = Replicated.delete_all() - - on_exit(fn -> - Replicated.put_dynamic_cache(default_dynamic_cache) - end) - - :ok - end - - describe "c:init/1" do - test "raises an exception because invalid primary store" do - assert_raise ArgumentError, ~r"adapter Invalid was not compiled", fn -> - defmodule Demo do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Invalid - end - end - end - end - - describe "replicated cache:" do - test "put/3" do - assert Replicated.put(1, 1) == :ok - assert Replicated.get(1) == 1 - - assert_for_all_replicas(Replicated, :get, [1], 1) - - assert Replicated.put_all(a: 1, b: 2, c: 3) == :ok - - assert_for_all_replicas(Replicated, :get_all, [[:a, :b, :c]], %{a: 1, b: 2, c: 3}) - end - - test "delete/2" do - assert Replicated.put("foo", "bar") == :ok - assert Replicated.get("foo") == "bar" - - assert_for_all_replicas(Replicated, :get, ["foo"], "bar") - - assert Replicated.delete("foo") == :ok - refute Replicated.get("foo") - - assert_for_all_replicas(Replicated, :get, ["foo"], nil) - end - - test "take/2" do - assert Replicated.put("foo", "bar") == :ok - assert Replicated.get("foo") == "bar" - - assert_for_all_replicas(Replicated, :get, ["foo"], "bar") - - assert Replicated.take("foo") == "bar" - refute Replicated.get("foo") - - assert_for_all_replicas(Replicated, :take, ["foo"], nil) - end - - test "incr/3" do - assert Replicated.incr(:counter, 3) == 3 - assert Replicated.incr(:counter) == 4 - - assert_for_all_replicas(Replicated, :get, [:counter], 4) - end - - test "incr/3 raises when the counter is not an integer" do - :ok = Replicated.put(:counter, "string") - - assert_raise ArgumentError, fn -> - Replicated.incr(:counter, 10) - end - end - - test "delete_all/2" do - assert Replicated.put_all(a: 1, b: 2, c: 3) == :ok - - assert_for_all_replicas(Replicated, :get_all, [[:a, :b, :c]], %{a: 1, b: 2, c: 3}) - - assert Replicated.delete_all() == 3 - assert Replicated.count_all() == 0 - - assert_for_all_replicas(Replicated, :get_all, [[:a, :b, :c]], %{}) - end - end - - describe "cluster" do - test "node leaves and then rejoins", %{name: name} do - cluster = :lists.usort(cluster_nodes()) - - wait_until(fn -> - assert Replicated.nodes() == cluster - end) - - Replicated.with_dynamic_cache(name, fn -> - :ok = Replicated.leave_cluster() - end) - - wait_until(fn -> - assert Replicated.nodes() == cluster -- [node()] - end) - - Replicated.with_dynamic_cache(name, fn -> - :ok = Replicated.join_cluster() - end) - - wait_until(fn -> - assert Replicated.nodes() == cluster - end) - end - - test "error: rpc error" do - node_pid_list = start_caches(cluster_nodes(), [{ReplicatedMock, []}]) - - try do - _ = Process.flag(:trap_exit, true) - - msg = ~r"RPC error while executing action :put_all\n\nSuccessful responses:" - - assert_raise Nebulex.RPCMultiCallError, msg, fn -> - ReplicatedMock.put_all(a: 1, b: 2) - end - after - stop_caches(node_pid_list) - end - end - - test "ok: start/stop cache nodes" do - event = [:nebulex, :test_cache, :replicated, :replication] - - with_telemetry_handler(__MODULE__, [event], fn -> - assert Replicated.nodes() |> :lists.usort() == :lists.usort(cluster_nodes()) - - assert Replicated.put_all(a: 1, b: 2) == :ok - assert Replicated.put(:c, 3, ttl: 5000) == :ok - - assert_for_all_replicas( - Replicated, - :get_all, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - - # start new cache nodes - nodes = [:"node3@127.0.0.1", :"node4@127.0.0.1"] - node_pid_list = start_caches(nodes, [{Replicated, [name: @cache_name]}]) - - wait_until(fn -> - assert Replicated.nodes() |> :lists.usort() == :lists.usort(nodes ++ cluster_nodes()) - end) - - wait_until(10, 1000, fn -> - assert_for_all_replicas( - Replicated, - :get_all, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - end) - - # stop cache node - :ok = node_pid_list |> hd() |> List.wrap() |> stop_caches() - - if Code.ensure_loaded?(:pg) do - # errors on failed nodes should be ignored - with_mock Nebulex.Cache.Cluster, [:passthrough], - get_nodes: fn _ -> [:"node5@127.0.0.1"] ++ nodes end do - assert Replicated.put(:foo, :bar) == :ok - - assert_receive {^event, %{rpc_errors: 2}, meta} - assert meta[:adapter_meta][:cache] == Replicated - assert meta[:adapter_meta][:name] == :replicated_cache - assert meta[:function_name] == :put - - assert [ - "node5@127.0.0.1": :noconnection, - "node3@127.0.0.1": %Nebulex.RegistryLookupError{} - ] = meta[:rpc_errors] - end - end - - wait_until(10, 1000, fn -> - assert Replicated.nodes() |> :lists.usort() == - :lists.usort(cluster_nodes() ++ [:"node4@127.0.0.1"]) - end) - - assert_for_all_replicas( - Replicated, - :get_all, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - - :ok = stop_caches(node_pid_list) - end) - end - end - - describe "write-like operations locked" do - test "when a delete_all command is ongoing" do - test_with_dynamic_cache(ReplicatedMock, [name: :replicated_global_mock], fn -> - true = Process.register(self(), __MODULE__) - _ = Process.flag(:trap_exit, true) - - task1 = - Task.async(fn -> - _ = ReplicatedMock.put_dynamic_cache(:replicated_global_mock) - _ = ReplicatedMock.delete_all() - send(__MODULE__, :delete_all) - end) - - task2 = - Task.async(fn -> - :ok = Process.sleep(1000) - _ = ReplicatedMock.put_dynamic_cache(:replicated_global_mock) - :ok = ReplicatedMock.put("foo", "bar") - :ok = Process.sleep(100) - send(__MODULE__, :put) - end) - - assert_receive :delete_all, 5000 - assert_receive :put, 5000 - - [_, _] = Task.yield_many([task1, task2]) - end) - end - end - - describe "doesn't leave behind EXIT messages after calling, with exits trapped:" do - test "all/0" do - put_all_and_trap_exits(a: 1, b: 2, c: 3) - Replicated.all() - refute_receive {:EXIT, _, :normal} - end - - test "delete/1" do - put_all_and_trap_exits(a: 1) - Replicated.delete(:a) - refute_receive {:EXIT, _, :normal} - end - - test "delete_all/2" do - put_all_and_trap_exits(a: 1, b: 2, c: 3) - Replicated.delete_all() - refute_receive {:EXIT, _, :normal} - end - - test "get/1" do - put_all_and_trap_exits(a: 1) - Replicated.get(:a) - refute_receive {:EXIT, _, :normal} - end - - test "incr/1" do - put_all_and_trap_exits(a: 1) - Replicated.incr(:a) - refute_receive {:EXIT, _, :normal} - end - - test "nodes/0" do - put_all_and_trap_exits([]) - Replicated.nodes() - refute_receive {:EXIT, _, :normal} - end - - test "put/2" do - put_all_and_trap_exits([]) - Replicated.put(:a, 1) - refute_receive {:EXIT, _, :normal} - end - - test "put_all/1" do - put_all_and_trap_exits([]) - Replicated.put_all(a: 1, b: 2, c: 3) - refute_receive {:EXIT, _, :normal} - end - - test "count_all/2" do - put_all_and_trap_exits([]) - Replicated.count_all() - refute_receive {:EXIT, _, :normal} - end - - test "stream/0" do - put_all_and_trap_exits(a: 1, b: 2, c: 3) - Replicated.stream() |> Enum.take(10) - refute_receive {:EXIT, _, :normal} - end - - test "take/1" do - put_all_and_trap_exits(a: 1) - Replicated.take(:a) - refute_receive {:EXIT, _, :normal} - end - - # Put the values, ensure we didn't generate a message before trapping exits, - # then trap exits. - defp put_all_and_trap_exits(kv_pairs) do - Replicated.put_all(kv_pairs, ttl: :infinity) - refute_receive {:EXIT, _, :normal} - Process.flag(:trap_exit, true) - end - end - - ## Helpers - - defp assert_for_all_replicas(cache, action, args, expected) do - assert {res_lst, []} = - :rpc.multicall( - cache.nodes(), - cache, - :with_dynamic_cache, - [@cache_name, cache, action, args] - ) - - Enum.each(res_lst, fn res -> assert res == expected end) - end - - defp cluster_nodes do - [node() | Node.list()] -- [:"node3@127.0.0.1", :"node4@127.0.0.1"] - end -end diff --git a/test/nebulex/adapters/stats_test.exs b/test/nebulex/adapters/stats_test.exs deleted file mode 100644 index 3e5b67ca..00000000 --- a/test/nebulex/adapters/stats_test.exs +++ /dev/null @@ -1,367 +0,0 @@ -defmodule Nebulex.Adapters.StatsTest do - use ExUnit.Case - - import Nebulex.CacheCase - - alias Nebulex.Cache.Stats - - ## Shared cache - - defmodule Cache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - - defmodule L4 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - end - - ## Shared constants - - @config [ - model: :inclusive, - levels: [ - {Cache.L1, gc_interval: :timer.hours(1), backend: :shards}, - {Cache.L2, primary: [gc_interval: :timer.hours(1)]}, - {Cache.L3, primary: [gc_interval: :timer.hours(1)]} - ] - ] - - @event [:nebulex, :adapters, :stats_test, :cache, :stats] - - ## Tests - - describe "(multilevel) stats/0" do - setup_with_cache(Cache, [stats: true] ++ @config) - - test "hits and misses" do - :ok = Cache.put_all(a: 1, b: 2) - - assert Cache.get(:a) == 1 - assert Cache.has_key?(:a) - assert Cache.ttl(:b) == :infinity - refute Cache.get(:c) - refute Cache.get(:d) - - assert Cache.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert_stats_measurements(Cache, - l1: [hits: 5, misses: 4, writes: 2], - l2: [hits: 0, misses: 4, writes: 2], - l3: [hits: 0, misses: 4, writes: 2] - ) - end - - test "writes and updates" do - assert Cache.put_all(a: 1, b: 2) == :ok - assert Cache.put_all(%{a: 1, b: 2}) == :ok - refute Cache.put_new_all(a: 1, b: 2) - assert Cache.put_new_all(c: 3, d: 4, e: 3) - assert Cache.put(1, 1) == :ok - refute Cache.put_new(1, 2) - refute Cache.replace(2, 2) - assert Cache.put_new(2, 2) - assert Cache.replace(2, 22) - assert Cache.incr(:counter) == 1 - assert Cache.incr(:counter) == 2 - refute Cache.expire(:f, 1000) - assert Cache.expire(:a, 1000) - refute Cache.touch(:f) - assert Cache.touch(:b) - - :ok = Process.sleep(1100) - refute Cache.get(:a) - - wait_until(fn -> - assert_stats_measurements(Cache, - l1: [expirations: 1, misses: 1, writes: 10, updates: 4], - l2: [expirations: 1, misses: 1, writes: 10, updates: 4], - l3: [expirations: 1, misses: 1, writes: 10, updates: 4] - ) - end) - end - - test "evictions" do - entries = for x <- 1..10, do: {x, x} - :ok = Cache.put_all(entries) - - assert Cache.delete(1) == :ok - assert Cache.take(2) == 2 - refute Cache.take(20) - - assert_stats_measurements(Cache, - l1: [evictions: 2, misses: 1, writes: 10], - l2: [evictions: 2, misses: 1, writes: 10], - l3: [evictions: 2, misses: 1, writes: 10] - ) - - assert Cache.delete_all() == 24 - - assert_stats_measurements(Cache, - l1: [evictions: 10, misses: 1, writes: 10], - l2: [evictions: 10, misses: 1, writes: 10], - l3: [evictions: 10, misses: 1, writes: 10] - ) - end - - test "expirations" do - :ok = Cache.put_all(a: 1, b: 2) - :ok = Cache.put_all([c: 3, d: 4], ttl: 1000) - - assert Cache.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2, c: 3, d: 4} - - :ok = Process.sleep(1100) - assert Cache.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - wait_until(fn -> - assert_stats_measurements(Cache, - l1: [evictions: 2, expirations: 2, hits: 6, misses: 2, writes: 4], - l2: [evictions: 2, expirations: 2, hits: 0, misses: 2, writes: 4], - l3: [evictions: 2, expirations: 2, hits: 0, misses: 2, writes: 4] - ) - end) - end - end - - describe "(replicated) stats/0" do - alias Cache.L2, as: Replicated - - setup_with_cache(Replicated, [stats: true] ++ @config) - - test "hits and misses" do - :ok = Replicated.put_all(a: 1, b: 2) - - assert Replicated.get(:a) == 1 - assert Replicated.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert %Nebulex.Stats{measurements: measurements} = Replicated.stats() - assert measurements.hits == 3 - assert measurements.misses == 2 - end - end - - describe "(partitioned) stats/0" do - alias Cache.L3, as: Partitioned - - setup_with_cache(Partitioned, [stats: true] ++ @config) - - test "hits and misses" do - :ok = Partitioned.put_all(a: 1, b: 2) - - assert Partitioned.get(:a) == 1 - assert Partitioned.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert %Nebulex.Stats{measurements: measurements} = Partitioned.stats() - assert measurements.hits == 3 - assert measurements.misses == 2 - end - end - - describe "disabled stats in a cache level" do - setup_with_cache( - Cache, - [stats: true] ++ - Keyword.update!( - @config, - :levels, - &(&1 ++ [{Cache.L4, gc_interval: :timer.hours(1), stats: false}]) - ) - ) - - test "ignored when returning stats" do - measurements = Cache.stats().measurements - assert Map.get(measurements, :l1) - assert Map.get(measurements, :l2) - assert Map.get(measurements, :l3) - refute Map.get(measurements, :l4) - end - end - - describe "cache init error" do - test "because invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {%ArgumentError{message: msg}, _}} = - Cache.start_link(stats: 123, levels: [{Cache.L1, []}]) - - assert msg == "expected stats: to be boolean, got: 123" - end - - test "L1: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L1, {error, _}}}}}} = - Cache.start_link(stats: true, levels: [{Cache.L1, [stats: 123]}]) - - assert error == %ArgumentError{message: "expected stats: to be boolean, got: 123"} - end - - test "L2: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L2, {error, _}}}}}} = - Cache.start_link(stats: true, levels: [{Cache.L1, []}, {Cache.L2, [stats: 123]}]) - - assert error == %ArgumentError{message: "expected stats: to be boolean, got: 123"} - end - - test "L3: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L3, {error, _}}}}}} = - Cache.start_link( - stats: true, - levels: [{Cache.L1, []}, {Cache.L2, []}, {Cache.L3, [stats: 123]}] - ) - - assert error == %ArgumentError{message: "expected stats: to be boolean, got: 123"} - end - end - - describe "new generation" do - alias Cache.L1 - alias Cache.L2.Primary, as: L2Primary - alias Cache.L3.Primary, as: L3Primary - - setup_with_cache(Cache, [stats: true] ++ @config) - - test "updates evictions" do - :ok = Cache.put_all(a: 1, b: 2, c: 3) - assert Cache.count_all() == 9 - - assert_stats_measurements(Cache, - l1: [evictions: 0, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L1.new_generation() - assert Cache.count_all() == 9 - - assert_stats_measurements(Cache, - l1: [evictions: 0, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L1.new_generation() - assert Cache.count_all() == 6 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L2Primary.new_generation() - _ = L2Primary.new_generation() - assert Cache.count_all() == 3 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 3, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L3Primary.new_generation() - _ = L3Primary.new_generation() - assert Cache.count_all() == 0 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 3, writes: 3], - l3: [evictions: 3, writes: 3] - ) - end - end - - describe "disabled stats:" do - setup_with_cache(Cache, @config) - - test "stats/0 returns nil" do - refute Cache.stats() - end - - test "dispatch_stats/1 is skipped" do - with_telemetry_handler(__MODULE__, [@event], fn -> - :ok = Cache.dispatch_stats() - - refute_receive {@event, _, %{cache: Nebulex.Cache.StatsTest.Cache}} - end) - end - end - - describe "dispatch_stats/1" do - setup_with_cache(Cache, [stats: true] ++ @config) - - test "emits a telemetry event when called" do - with_telemetry_handler(__MODULE__, [@event], fn -> - :ok = Cache.dispatch_stats(metadata: %{node: node()}) - node = node() - - assert_receive {@event, measurements, - %{cache: Nebulex.Adapters.StatsTest.Cache, node: ^node}} - - assert measurements == %{ - l1: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l2: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l3: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0} - } - end) - end - end - - describe "dispatch_stats/1 with dynamic cache" do - setup_with_dynamic_cache( - Cache, - :stats_with_dispatch, - [telemetry_prefix: [:my_event], stats: true] ++ @config - ) - - test "emits a telemetry event with custom telemetry_prefix when called" do - with_telemetry_handler(__MODULE__, [[:my_event, :stats]], fn -> - :ok = Cache.dispatch_stats(metadata: %{foo: :bar}) - - assert_receive {[:my_event, :stats], measurements, - %{cache: :stats_with_dispatch, foo: :bar}} - - assert measurements == %{ - l1: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l2: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l3: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0} - } - end) - end - end - - ## Helpers - - defp assert_stats_measurements(cache, levels) do - measurements = cache.stats().measurements - - for {level, stats} <- levels, {stat, expected} <- stats do - assert get_in(measurements, [level, stat]) == expected - end - end -end diff --git a/test/nebulex/cache/info_stats_test.exs b/test/nebulex/cache/info_stats_test.exs new file mode 100644 index 00000000..d43d109b --- /dev/null +++ b/test/nebulex/cache/info_stats_test.exs @@ -0,0 +1,128 @@ +defmodule Nebulex.Cache.InfoStatsTest do + use ExUnit.Case, asyc: true + use Mimic + + import Nebulex.CacheCase + + alias Nebulex.Adapters.Common.Info.Stats + + ## Internals + + defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.TestAdapter + end + + ## Tests + + describe "c:Nebulex.Cache.stats/1" do + setup_with_cache Cache, stats: true + + test "returns an error" do + Nebulex.Cache.Registry + |> expect(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) + + assert Cache.info() == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} + end + + test "hits and misses" do + :ok = Cache.put_all!(a: 1, b: 2) + + assert Cache.get!(:a) == 1 + assert Cache.has_key?(:a) + assert Cache.ttl!(:b) == :infinity + refute Cache.get!(:c) + refute Cache.get!(:d) + + assert Cache.get_all!(in: [:a, :b, :c, :d]) |> Map.new() == %{a: 1, b: 2} + + assert Cache.info!(:stats) == %{ + hits: 5, + misses: 4, + writes: 2, + evictions: 0, + expirations: 0, + deletions: 0, + updates: 0 + } + end + + test "writes and updates" do + assert Cache.put_all!(a: 1, b: 2) == :ok + assert Cache.put_all(%{a: 1, b: 2}) == :ok + refute Cache.put_new_all!(a: 1, b: 2) + assert Cache.put_new_all!(c: 3, d: 4, e: 3) + assert Cache.put!(1, 1) == :ok + refute Cache.put_new!(1, 2) + refute Cache.replace!(2, 2) + assert Cache.put_new!(2, 2) + assert Cache.replace!(2, 22) + assert Cache.incr!(:counter) == 1 + assert Cache.incr!(:counter) == 2 + refute Cache.expire!(:f, 1000) + assert Cache.expire!(:a, 1000) + refute Cache.touch!(:f) + assert Cache.touch!(:b) + + :ok = Process.sleep(1100) + + refute Cache.get!(:a) + + wait_until(fn -> + assert Cache.info!(:stats) == %{ + hits: 0, + misses: 1, + writes: 10, + evictions: 1, + expirations: 1, + deletions: 1, + updates: 4 + } + end) + end + + test "deletions" do + entries = for x <- 1..10, do: {x, x} + :ok = Cache.put_all!(entries) + + assert Cache.delete!(1) == :ok + assert Cache.take!(2) == 2 + + assert_raise Nebulex.KeyError, fn -> + Cache.take!(20) + end + + assert Cache.info!(:stats) == %{ + hits: 1, + misses: 1, + writes: 10, + evictions: 0, + expirations: 0, + deletions: 2, + updates: 0 + } + + assert Cache.delete_all!() == 8 + + assert Cache.info!(:stats) == %{ + hits: 1, + misses: 1, + writes: 10, + evictions: 0, + expirations: 0, + deletions: 10, + updates: 0 + } + end + end + + describe "disabled stats:" do + setup_with_cache Cache, stats: false + + test "c:Nebulex.Cache.stats/1 returns empty stats when counter is not set" do + assert Cache.info!(:stats) == Stats.new() + end + end +end diff --git a/test/nebulex/cache/info_test.exs b/test/nebulex/cache/info_test.exs new file mode 100644 index 00000000..7ee401b0 --- /dev/null +++ b/test/nebulex/cache/info_test.exs @@ -0,0 +1,85 @@ +defmodule Nebulex.Cache.InfoTest do + use ExUnit.Case, asyc: true + + import Nebulex.CacheCase + + alias Nebulex.Adapter + alias Nebulex.Adapters.Common.Info.Stats + + ## Internals + + defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.TestAdapter + end + + @empty_stats Stats.new() + + @empty_mem %{ + total: 1_000_000, + used: 0 + } + + ## Tests + + describe "c:Nebulex.Cache.info/1" do + setup_with_cache Cache, stats: true + + test "ok: returns all info" do + assert info = Cache.info!() + assert Cache.info!(:all) == info + + assert info[:server] == server_info() + assert info[:memory] == @empty_mem + assert info[:stats] == @empty_stats + end + + test "ok: returns item's info" do + assert Cache.info!(:server) == server_info() + assert Cache.info!(:memory) == @empty_mem + assert Cache.info!(:stats) == @empty_stats + end + + test "ok: returns multiple items info" do + assert Cache.info!([:server]) == %{server: server_info()} + assert Cache.info!([:memory]) == %{memory: @empty_mem} + assert Cache.info!([:server, :memory]) == %{server: server_info(), memory: @empty_mem} + end + + test "error: raises an exception because the requested item doesn't exist" do + for spec <- [:unknown, [:memory, :unknown], [:unknown, :unknown]] do + assert_raise ArgumentError, ~r"invalid information specification key :unknown", fn -> + Cache.info!(spec) + end + end + end + end + + describe "c:Nebulex.Cache.info/1 (stats disabled)" do + setup_with_cache Cache, stats: false + + test "ok: returns all info" do + assert info = Cache.info!() + + assert info[:server] == server_info() + assert info[:memory] == @empty_mem + assert info[:stats] == @empty_stats + end + end + + ## Provate functions + + defp server_info do + {:ok, adapter_meta} = Adapter.lookup_meta(Cache) + + %{ + nbx_version: Nebulex.vsn(), + cache_module: adapter_meta[:cache], + cache_adapter: adapter_meta[:adapter], + cache_name: adapter_meta[:name], + cache_pid: adapter_meta[:pid] + } + end +end diff --git a/test/nebulex/cache/registry_test.exs b/test/nebulex/cache/registry_test.exs new file mode 100644 index 00000000..f681a469 --- /dev/null +++ b/test/nebulex/cache/registry_test.exs @@ -0,0 +1,33 @@ +defmodule Nebulex.Cache.RegistryTest do + use ExUnit.Case, async: true + + import Nebulex.CacheCase, only: [test_with_dynamic_cache: 3] + + alias Nebulex.TestCache.Cache + + describe "lookup/1" do + test "error: returns an error with reason ':registry_lookup_error'" do + assert Nebulex.Cache.Registry.lookup(self()) == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + reason: :registry_lookup_error, + opts: [cache: self()] + }} + end + end + + describe "all_running/0" do + test "ok: returns all running cache names" do + test_with_dynamic_cache(Cache, [name: :registry_test_cache], fn -> + assert :registry_test_cache in Nebulex.Cache.Registry.all_running() + end) + end + + test "ok: returns all running cache pids" do + test_with_dynamic_cache(Cache, [name: nil], fn -> + assert Nebulex.Cache.Registry.all_running() |> Enum.any?(&is_pid/1) + end) + end + end +end diff --git a/test/nebulex/cache/supervisor_test.exs b/test/nebulex/cache/supervisor_test.exs index 6e2e9a6f..062774fb 100644 --- a/test/nebulex/cache/supervisor_test.exs +++ b/test/nebulex/cache/supervisor_test.exs @@ -4,7 +4,7 @@ defmodule Nebulex.Cache.SupervisorTest do defmodule MyCache do use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter @impl true def init(opts) do @@ -17,74 +17,122 @@ defmodule Nebulex.Cache.SupervisorTest do import Nebulex.CacheCase + alias Nebulex.{Adapter, Telemetry} alias Nebulex.TestCache.Cache - test "fails on init because :ignore is returned" do - assert MyCache.start_link(ignore: true) == :ignore - end + describe "compile_config/1" do + test "error: missing :otp_app option" do + assert_raise NimbleOptions.ValidationError, ~r"required :otp_app option not found", fn -> + Nebulex.Cache.Supervisor.compile_config(adapter: TestAdapter) + end + end - test "fails on compile_config because missing otp_app" do - assert_raise ArgumentError, "expected otp_app: to be given as argument", fn -> - Nebulex.Cache.Supervisor.compile_config(adapter: TestAdapter) + test "error: missing :adapter option" do + assert_raise NimbleOptions.ValidationError, ~r"required :adapter option not found", fn -> + Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex) + end end - end - test "fails on compile_config because missing adapter" do - assert_raise ArgumentError, "expected adapter: to be given as argument", fn -> - Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex) + test "error: adapter was not compiled" do + msg = ~r"invalid value for :adapter option: adapter TestAdapter was not compiled" + + assert_raise NimbleOptions.ValidationError, msg, fn -> + Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex, adapter: TestAdapter) + end end - end - test "fails on compile_config because adapter was not compiled" do - msg = ~r"adapter TestAdapter was not compiled, ensure" + test "error: adapter doesn't implement the required behaviour" do + msg = + "invalid value for :adapter option: expected the adapter module " <> + "given to Nebulex.Cache to list Nebulex.Adapter as a behaviour" - assert_raise ArgumentError, msg, fn -> - Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex, adapter: TestAdapter) + assert_raise NimbleOptions.ValidationError, msg, fn -> + defmodule MyAdapter do + end + + defmodule MyCache2 do + use Nebulex.Cache, + otp_app: :nebulex, + adapter: MyAdapter + end + end end - end - test "fails on compile_config because adapter error" do - msg = "expected :adapter option given to Nebulex.Cache to list Nebulex.Adapter as a behaviour" + test "error: invalid value for :adapter option" do + msg = ~r"invalid value for :adapter option: expected a module" - assert_raise ArgumentError, msg, fn -> - defmodule MyAdapter do + assert_raise NimbleOptions.ValidationError, msg, fn -> + Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex, adapter: 123) end + end + end + + describe "start_link/1" do + test "starts anonymous cache" do + assert {:ok, pid} = Cache.start_link(name: nil) + assert Process.alive?(pid) + + assert Cache.stop(pid, []) == :ok + refute Process.alive?(pid) + end + + test "starts cache with via" do + {:ok, _} = Registry.start_link(keys: :unique, name: Registry.ViaTest) + name = {:via, Registry, {Registry.ViaTest, "test"}} + + assert {:ok, pid} = Cache.start_link(name: name) + assert Process.alive?(pid) + + assert [{^pid, _}] = Registry.lookup(Registry.ViaTest, "test") - defmodule MyCache2 do + assert Cache.stop(pid, []) == :ok + refute Process.alive?(pid) + end + + test "starts cache with custom adapter" do + defmodule CustomCache do use Nebulex.Cache, otp_app: :nebulex, - adapter: MyAdapter + adapter: Nebulex.TestCache.AdapterMock end - Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex) - end - end + assert {:ok, _pid} = CustomCache.start_link(child_name: :custom_cache) - test "start cache with custom adapter" do - defmodule CustomCache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.TestCache.AdapterMock + _ = Process.flag(:trap_exit, true) + + assert {:error, _error} = + CustomCache.start_link(name: :another_custom_cache, child_name: :custom_cache) + + assert CustomCache.stop() == :ok end - assert {:ok, _pid} = CustomCache.start_link(child_name: :custom_cache) + test "starts cache with [bypass_mode: true]" do + assert {:ok, pid} = Cache.start_link(bypass_mode: true) + assert Process.alive?(pid) - _ = Process.flag(:trap_exit, true) + assert {:ok, adapter_meta} = Adapter.lookup_meta(Cache) + assert adapter_meta.adapter == Nebulex.Adapters.Nil - assert {:error, _reason} = - CustomCache.start_link(name: :another_custom_cache, child_name: :custom_cache) + assert Cache.put("foo", "bar") == :ok + assert Cache.get!("foo") == nil - assert CustomCache.stop() == :ok - end + assert Cache.stop(pid, []) == :ok + refute Process.alive?(pid) + end - test "emits telemetry event upon cache start" do - with_telemetry_handler([[:nebulex, :cache, :init]], fn -> - {:ok, _} = Cache.start_link(name: :telemetry_test) + test "emits telemetry event upon cache start" do + with_telemetry_handler([[:nebulex, :cache, :init]], fn -> + {:ok, _} = Cache.start_link(name: :telemetry_test) - assert_receive {[:nebulex, :cache, :init], _, %{cache: Cache, opts: opts}} - assert opts[:telemetry_prefix] == [:nebulex, :test_cache, :cache] - assert opts[:name] == :telemetry_test - end) + assert_receive {[:nebulex, :cache, :init], _, %{cache: Cache, opts: opts}} + assert opts[:telemetry_prefix] == Telemetry.default_event_prefix() + assert opts[:name] == :telemetry_test + end) + end + + test "error: fails on init because :ignore is returned" do + assert MyCache.start_link(ignore: true) == :ignore + end end ## Helpers diff --git a/test/nebulex/cache_error_test.exs b/test/nebulex/cache_error_test.exs new file mode 100644 index 00000000..ef1de2d2 --- /dev/null +++ b/test/nebulex/cache_error_test.exs @@ -0,0 +1,35 @@ +defmodule Nebulex.CacheErrorTest do + use ExUnit.Case, async: true + use Mimic + + # Inherit error tests + use Nebulex.Cache.KVErrorTest + use Nebulex.Cache.KVExpirationErrorTest + + setup do + Nebulex.Cache.Registry + |> stub(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) + + {:ok, cache: Nebulex.TestCache.Cache, name: :test_cache_local_error} + end + + describe "put!/3" do + test "raises an error due to a timeout", %{cache: cache} do + assert_raise Nebulex.Error, ~r|command execution timed out|, fn -> + cache.put!(:error, :timeout) + end + end + + test "raises an error due to RuntimeError", %{cache: cache} do + msg = + Regex.escape( + "the following exception occurred when executing a command.\n\n" <> + " ** (RuntimeError) runtime error" + ) + + assert_raise Nebulex.Error, ~r|#{msg}|, fn -> + cache.put!(:error, %RuntimeError{}) + end + end + end +end diff --git a/test/nebulex/cache_test.exs b/test/nebulex/cache_test.exs new file mode 100644 index 00000000..bcd35ace --- /dev/null +++ b/test/nebulex/cache_test.exs @@ -0,0 +1,115 @@ +defmodule Nebulex.Adapters.CacheTest do + use ExUnit.Case, async: true + + # Cache API test cases + use Nebulex.CacheTestCase + + import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 2] + + setup_with_dynamic_cache Nebulex.TestCache.Cache, :test_cache_local + + describe "KV:" do + test "get_and_update", %{cache: cache} do + fun = fn + nil -> {nil, 1} + val -> {val, val * 2} + end + + assert cache.get_and_update!(1, fun) == {nil, 1} + assert cache.get_and_update!(1, &{&1, &1 * 2}) == {1, 2} + assert cache.get_and_update!(1, &{&1, &1 * 3}) == {2, 6} + assert cache.get_and_update!(1, &{&1, nil}) == {6, 6} + assert cache.get!(1) == 6 + assert cache.get_and_update!(1, fn _ -> :pop end) == {6, nil} + assert cache.get_and_update!(1, fn _ -> :pop end) == {nil, nil} + assert cache.get_and_update!(3, &{&1, 3}) == {nil, 3} + end + + test "get_and_update fails because function returns invalid value", %{cache: cache} do + assert_raise ArgumentError, fn -> + cache.get_and_update(1, fn _ -> :other end) + end + end + + test "get_and_update fails because cache is not started", %{cache: cache} do + :ok = cache.stop() + + assert_raise Nebulex.Error, fn -> + assert cache.get_and_update!(1, fn _ -> :pop end) + end + end + + test "incr and update", %{cache: cache} do + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter) == 2 + + assert cache.get_and_update!(:counter, &{&1, &1 * 2}) == {2, 4} + assert cache.incr!(:counter) == 5 + + assert cache.update!(:counter, 1, &(&1 * 2)) == 10 + assert cache.incr!(:counter, -10) == 0 + + assert cache.put("foo", "bar") == :ok + + assert_raise Nebulex.Error, fn -> + cache.incr!("foo") + end + end + + test "incr with ttl", %{cache: cache} do + assert cache.incr!(:counter_with_ttl, 1, ttl: 1000) == 1 + assert cache.incr!(:counter_with_ttl) == 2 + assert cache.fetch!(:counter_with_ttl) == 2 + + :ok = Process.sleep(1010) + + assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) + + assert cache.incr!(:counter_with_ttl, 1, ttl: 5000) == 1 + assert {:ok, ttl} = cache.ttl(:counter_with_ttl) + assert ttl > 1000 + + assert cache.expire(:counter_with_ttl, 500) == {:ok, true} + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) + end + + test "incr existing entry", %{cache: cache} do + assert cache.put(:counter, 0) == :ok + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter, 2) == 3 + end + end + + describe "queryable:" do + test "raises an exception because of an invalid query", %{cache: cache} do + for action <- [:get_all, :stream] do + assert_raise Nebulex.QueryError, fn -> + apply(cache, action, [[query: :invalid]]) + end + end + end + end + + describe "error" do + test "because cache is stopped", %{cache: cache, name: name} do + :ok = cache.stop() + + assert cache.put(1, 13) == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + reason: :registry_lookup_error, + opts: [cache: name] + }} + + msg = ~r"could not lookup Nebulex cache" + + assert_raise Nebulex.Error, msg, fn -> cache.put!(1, 13) end + assert_raise Nebulex.Error, msg, fn -> cache.get!(1) end + assert_raise Nebulex.Error, msg, fn -> cache.delete!(1) end + end + end +end diff --git a/test/nebulex/caching_test.exs b/test/nebulex/caching_test.exs index 45c11bcf..95b2805e 100644 --- a/test/nebulex/caching_test.exs +++ b/test/nebulex/caching_test.exs @@ -1,61 +1,58 @@ defmodule Nebulex.CachingTest do use ExUnit.Case, async: true - use Nebulex.Caching - - @behaviour Nebulex.Caching.KeyGenerator defmodule Cache do + @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter end - defmodule CacheWithDefaultKeyGenerator do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local, - default_key_generator: __MODULE__ - - @behaviour Nebulex.Caching.KeyGenerator + use Nebulex.Caching, cache: Cache - @impl true - def generate(mod, fun, args), do: :erlang.phash2({mod, fun, args}) - end + import Nebulex.CacheCase defmodule YetAnotherCache do + @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter end defmodule Meta do - defstruct [:id, :count] + @moduledoc false + @type t :: %__MODULE__{} + + defstruct [:id, :count] end - defmodule TestKeyGenerator do - @behaviour Nebulex.Caching.KeyGenerator + ## Tests - @impl true - def generate(_, :put_with_keygen, [arg1, _arg2]) do - arg1 - end + setup_with_cache Cache - def generate(mod, fun, args) do - :erlang.phash2({mod, fun, args}) + describe "caching definition" do + test "ok: valid :default_key_generator option" do + defmodule ValidCompileOptsTest do + use Nebulex.Caching, default_key_generator: Nebulex.Caching.SimpleKeyGenerator + end end - end - - import Nebulex.CacheCase - alias Nebulex.CachingTest.{Cache, Meta} + test "error: invalid :default_key_generator option" do + msg = ~r|invalid value for :default_key_generator option: key-generator InvalidKeyGenerator| - setup_with_cache(Cache) + assert_raise NimbleOptions.ValidationError, msg, fn -> + defmodule InvalidCompileOptsTest do + use Nebulex.Caching, default_key_generator: InvalidKeyGenerator + end + end + end + end describe "decorator" do test "cacheable fails because missing cache" do - assert_raise ArgumentError, "expected cache: to be given as argument", fn -> - defmodule Test do + assert_raise ArgumentError, ~r|expected :cache option to be found within the decorator|, fn -> + defmodule MissingCacheTest do use Nebulex.Caching @decorate cacheable(a: 1) @@ -63,10 +60,12 @@ defmodule Nebulex.CachingTest do {a, b} end end + + MissingCacheTest.t(1, 2) end end - test "cacheable fails invalid option :on_error" do + test "cacheable fails because invalid :on_error option value" do msg = "expected on_error: to be :raise or :nothing, got: :invalid" assert_raise ArgumentError, msg, fn -> @@ -81,7 +80,7 @@ defmodule Nebulex.CachingTest do end end - test "cache_evict fails invalid option :keys" do + test "cache_evict fails because invalid :keys option value" do msg = "expected keys: to be a list with at least one element, got: []" assert_raise ArgumentError, msg, fn -> @@ -99,294 +98,383 @@ defmodule Nebulex.CachingTest do describe "cacheable" do test "with default opts" do - refute Cache.get("x") - assert get_by_x("x") == nil - refute Cache.get("x") - - assert get_by_x(1, 11) == 11 - assert Cache.get(1) == 11 + refute Cache.get!("x") + assert get_by_xy("x") == nil + assert Cache.fetch!("x") == nil - assert get_by_x(2, {:ok, 22}) == {:ok, 22} - assert Cache.get(2) == {:ok, 22} + assert get_by_xy(1, 11) == 11 + assert Cache.get!(1) == 11 - assert get_by_x(3, :error) == :error - refute Cache.get(3) + assert get_by_xy(2, {:ok, 22}) == {:ok, 22} + assert Cache.get!(2) == {:ok, 22} - assert get_by_x(4, {:error, 4}) == {:error, 4} - refute Cache.get(4) + assert get_by_xy(3, :error) == :error + refute Cache.get!(3) - refute Cache.get({:xy, 2}) - assert get_by_xy(:xy, 2) == {:xy, 4} - assert Cache.get({:xy, 2}) == {:xy, 4} + assert get_by_xy(4, {:error, 4}) == {:error, 4} + refute Cache.get!(4) - :ok = Process.sleep(1100) + refute Cache.get!({:xy, 2}) + assert multiply_xy(:xy, 2) == {:xy, 4} + assert Cache.get!({:xy, 2}) == {:xy, 4} - refute Cache.get("x") - assert Cache.get(1) == 11 - assert Cache.get(2) == {:ok, 22} - refute Cache.get(3) - refute Cache.get(4) - assert Cache.get({:xy, 2}) == {:xy, 4} + assert Cache.fetch!("x") == nil + assert Cache.get!(1) == 11 + assert Cache.get!(2) == {:ok, 22} + refute Cache.get!(3) + refute Cache.get!(4) + assert Cache.get!({:xy, 2}) == {:xy, 4} end test "with opts" do - refute Cache.get("x") + refute Cache.get!("x") assert get_with_opts(1) == 1 - assert Cache.get(1) == 1 + assert Cache.get!(1) == 1 :ok = Process.sleep(1100) - refute Cache.get(1) + + refute Cache.get!(1) end test "with match function" do - refute Cache.get(:x) + refute Cache.get!(:x) assert get_with_match(:x) == :x - refute Cache.get(:x) + refute Cache.get!(:x) - refute Cache.get(:y) + refute Cache.get!(:y) assert get_with_match(:y) == :y - assert Cache.get(:y) + assert Cache.get!(:y) - refute Cache.get("true") - assert get_with_match_fun("true") == {:ok, "true"} - assert Cache.get("true") == {:ok, "true"} + refute Cache.get!(true) + assert get_with_match_fun(true) == {:ok, "true"} + assert Cache.get!(true) == {:ok, "true"} - refute Cache.get(1) + refute Cache.get!(1) assert get_with_match_fun(1) == {:ok, "1"} - assert Cache.get(1) == "1" + assert Cache.get!(1) == "1" - refute Cache.get({:ok, "hello"}) + refute Cache.get!({:ok, "hello"}) assert get_with_match_fun({:ok, "hello"}) == :error - refute Cache.get({:ok, "hello"}) + refute Cache.get!({:ok, "hello"}) + end + + test "with match function and context" do + refute Cache.get!(:x) + assert get_with_match_fun_and_ctx(:x) == {:ok, "x"} + assert_receive %{module: __MODULE__, function_name: :get_with_match_fun_and_ctx, args: [:x]} + assert Cache.get!(:x) == "x" + + refute Cache.get!(true) + assert get_with_match_fun_and_ctx(true) == {:ok, "true"} + assert_receive %{module: __MODULE__, function_name: :get_with_match_fun_and_ctx, args: [true]} + assert Cache.get!(true) == {:ok, "true"} end test "with match function and custom opts" do - refute Cache.get(300) + refute Cache.get!(300) assert get_with_custom_ttl(300) == {:ok, %{ttl: 300}} - assert Cache.get(300) == {:ok, %{ttl: 300}} + assert Cache.get!(300) == {:ok, %{ttl: 300}} :ok = Process.sleep(400) - refute Cache.get(300) + refute Cache.get!(300) end test "with default key" do assert get_with_default_key(123, {:foo, "bar"}) == :ok - assert [123, {:foo, "bar"}] |> :erlang.phash2() |> Cache.get() == :ok + assert [123, {:foo, "bar"}] |> :erlang.phash2() |> Cache.get!() == :ok + assert get_with_default_key(:foo, "bar") == :ok - assert [:foo, "bar"] |> :erlang.phash2() |> Cache.get() == :ok + assert [:foo, "bar"] |> :erlang.phash2() |> Cache.get!() == :ok end test "defining keys using structs and maps" do - refute Cache.get("x") + refute Cache.get!("x") + assert get_meta(%Meta{id: 1, count: 1}) == %Meta{id: 1, count: 1} - assert Cache.get({Meta, 1}) == %Meta{id: 1, count: 1} + assert Cache.get!({Meta, 1}) == %Meta{id: 1, count: 1} + + refute Cache.get!("y") - refute Cache.get("y") assert get_map(%{id: 1}) == %{id: 1} - assert Cache.get(1) == %{id: 1} + assert Cache.get!(1) == %{id: 1} end test "with multiple clauses" do - refute Cache.get(2) + refute Cache.get!(2) + assert multiple_clauses(2, 2) == 4 - assert Cache.get(2) == 4 + assert Cache.get!(2) == 4 + + refute Cache.get!("foo") - refute Cache.get("foo") assert multiple_clauses("foo", "bar") == {"foo", "bar"} - assert Cache.get("foo") == {"foo", "bar"} + assert Cache.get!("foo") == {"foo", "bar"} end test "without args" do - refute Cache.get(0) + refute Cache.get!(0) assert get_without_args() == "hello" - assert Cache.get(0) == "hello" + assert Cache.get!(0) == "hello" end test "with side effects and returning false (issue #111)" do - refute Cache.get("side-effect") + refute Cache.get!("side-effect") assert get_false_with_side_effect(false) == false - assert Cache.get("side-effect") == 1 + assert Cache.get!("side-effect") == 1 assert get_false_with_side_effect(false) == false - assert Cache.get("side-effect") == 1 + assert Cache.get!("side-effect") == 1 end end describe "cachable with references" do - setup_with_cache(YetAnotherCache) + setup_with_cache YetAnotherCache - test "with referenced key" do + test "returns referenced key" do # Expected values referenced_key = keyref "referenced_id" result = %{id: "referenced_id", name: "referenced_name"} + assert_common_references_flow("referenced_id", referenced_key, result, &get_with_keyref/1) + end + + test "returns referenced key by calling function with context" do + # Expected values + key = :erlang.phash2({"referenced_id", ["referenced_name"]}) + referenced_key = keyref key + result = %{id: "referenced_id", name: "referenced_name"} + + assert_common_references_flow(key, referenced_key, result, &get_with_keyref_fn_ctx/1) + end + + test "returns referenced key by calling referenced cache" do + # Expected values + referenced_key = keyref "referenced_id", cache: YetAnotherCache, ttl: 5000 + result = %{id: "referenced_id", name: "referenced_name"} + + assert_common_references_flow( + YetAnotherCache, + "referenced_id", + referenced_key, + result, + &get_with_keyref_cache/1 + ) + end + + test "returns referenced key from the args" do + # Expected values + referenced_key = keyref "id" + result = %{attrs: %{id: "id"}, name: "name"} + # Nothing is cached yet - refute Cache.get("referenced_id") - refute Cache.get("referenced_name") + refute Cache.get!("id") + refute Cache.get!("name") # First run: the function block is executed and its result is cached under # the referenced key, and the referenced key is cached under the given key - assert get_with_referenced_key("referenced_name") == result + assert get_with_keyref_from_args("name", %{id: "id"}) == result # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("name") == referenced_key # Assert the referenced key points to the cached value - assert Cache.get("referenced_id") == result + assert Cache.get!("id") == result # Next run: the value should come from the cache - assert get_with_referenced_key("referenced_name") == result - - # Simulate a cache eviction for the referenced key - :ok = Cache.delete("referenced_id") + assert get_with_keyref_from_args("name", %{id: "id"}) == result + end - # The value under the referenced key should not longer exist - refute Cache.get("referenced_id") + test "returns fixed referenced key" do + # Expected values + referenced_key = keyref "fixed_id" + result = %{id: "fixed_id", name: "name"} - # Assert the key still points to the referenced key - assert Cache.get("referenced_name") == referenced_key + # Nothing is cached yet + refute Cache.get!("fixed_id") + refute Cache.get!("name") - # Next run: the key does exist but the referenced key doesn't, then the - # function block is executed and the result is cached under the referenced - # key back again - assert get_with_referenced_key("referenced_name") == result + # First run: the function block is executed and its result is cached under + # the referenced key, and the referenced key is cached under the given key + assert get_with_fixed_keyref("name") == result # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("name") == referenced_key # Assert the referenced key points to the cached value - assert Cache.get("referenced_id") == result - - # Similate the referenced key is overridden - :ok = Cache.put("referenced_name", "overridden") + assert Cache.get!("fixed_id") == result - # The referenced key is overridden - assert get_with_referenced_key("referenced_name") == "overridden" + # Next run: the value should come from the cache + assert get_with_fixed_keyref("name") == result end - test "with referenced key from args" do + test "removes the reference's parent key due to the value was updated, causing a mismatch" do # Expected values - referenced_key = keyref "id" - result = %{attrs: %{id: "id"}, name: "name"} + referenced_key = keyref "referenced_id" + result = %{id: "referenced_id", name: "referenced_name"} # Nothing is cached yet - refute Cache.get("id") - refute Cache.get("name") + refute Cache.get!("referenced_id") + refute Cache.get!("referenced_name") - # First run: the function block is executed and its result is cached under - # the referenced key, and the referenced key is cached under the given key - assert get_with_referenced_key_from_args("name", %{id: "id"}) == result + # First time: everything works as usual + assert get_with_keyref_and_match("referenced_name", result) == result # Assert the key points to the referenced key - assert Cache.get("name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Assert the referenced key points to the cached value - assert Cache.get("id") == result + assert Cache.get!("referenced_id") == result - # Next run: the value should come from the cache - assert get_with_referenced_key_from_args("name", %{id: "id"}) == result + # Update the cached value + another_result = %{result | name: "another_referenced_name"} + + # Replace cached value with the updated result + :ok = Cache.put("referenced_id", another_result) + + # Assert the cached value + assert Cache.get!("referenced_id") == another_result + + # Next run: Since the cached value was intentionally modified, there will + # be a mismatch with the given key, hence, the reference is removed from + # the cache and the function block is executed + assert get_with_keyref_and_match("referenced_name", another_result) == another_result + + # Refute the key does exist in the cache + refute Cache.get!("referenced_name") + + # The referenced key points to the updated value + assert Cache.get!("referenced_id") == another_result + + # Next run: works as usual since there isn't a mismatch this time + assert get_with_keyref_and_match("another_referenced_name", another_result) == another_result + + # Assert the key points to the referenced key + assert Cache.get!("another_referenced_name") == referenced_key + + # Assert the referenced key points to the cached value + assert Cache.get!("referenced_id") == another_result end - test "returns fixed referenced" do + test "removes the reference's parent key due to the value was deleted, causing a mismatch" do # Expected values - referenced_key = keyref "fixed_id" - result = %{id: "fixed_id", name: "name"} + referenced_key = keyref "referenced_id" + result = %{id: "referenced_id", name: "referenced_name"} # Nothing is cached yet - refute Cache.get("fixed_id") - refute Cache.get("name") + refute Cache.get!("referenced_id") + refute Cache.get!("referenced_name") - # First run: the function block is executed and its result is cached under - # the referenced key, and the referenced key is cached under the given key - assert get_with_fixed_referenced_key("name") == result + # First time: everything works as usual + assert get_with_keyref_and_match("referenced_name", result) == result # Assert the key points to the referenced key - assert Cache.get("name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Assert the referenced key points to the cached value - assert Cache.get("fixed_id") == result + assert Cache.get!("referenced_id") == result - # Next run: the value should come from the cache - assert get_with_fixed_referenced_key("name") == result + # Delete the referenced key + :ok = Cache.delete("referenced_id") + + # Assert the cached value + refute Cache.get!("referenced_id") + + # Update the cached value + another_result = %{result | name: "another_referenced_name"} + + # Next run: Since the cached value was intentionally deleted, there will + # be a mismatch with the given new result, hence, the reference is removed + # from the cache and the function block is executed + assert get_with_keyref_and_match("referenced_name", another_result) == another_result + + # Refute the key does exist in the cache + refute Cache.get!("referenced_name") + + # Refute the referenced key does exist in the cache + refute Cache.get!("referenced_id") end - test "returns referenced key by calling referenced cache" do - # Expected values - referenced_key = keyref YetAnotherCache, "referenced_id" - result = %{id: "referenced_id", name: "referenced_name"} + ## Private functions + + defp assert_common_references_flow(ref_cache \\ nil, key, referenced_key, result, fun) do + # Resolve ref cache if any + ref_cache = ref_cache || Cache # Nothing is cached yet - refute Cache.get("referenced_id") - refute Cache.get("referenced_name") + refute Cache.get!("referenced_id") + refute Cache.get!("referenced_name") # First run: the function block is executed and its result is cached under # the referenced key, and the referenced key is cached under the given key - assert get_with_ref_key_with_cache("referenced_name") == result + assert fun.("referenced_name") == result # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Assert the referenced key points to the cached value - assert YetAnotherCache.get("referenced_id") == result + assert ref_cache.get!(key) == result # Next run: the value should come from the cache - assert get_with_ref_key_with_cache("referenced_name") == result + assert fun.("referenced_name") == result # Simulate a cache eviction for the referenced key - :ok = YetAnotherCache.delete("referenced_id") + :ok = ref_cache.delete!(key) # The value under the referenced key should not longer exist - refute YetAnotherCache.get("referenced_id") + refute ref_cache.get!(key) # Assert the key still points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Next run: the key does exist but the referenced key doesn't, then the # function block is executed and the result is cached under the referenced # key back again - assert get_with_ref_key_with_cache("referenced_name") == result + assert fun.("referenced_name") == result # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Assert the referenced key points to the cached value - assert YetAnotherCache.get("referenced_id") == result + assert ref_cache.get!(key) == result - # Similate the referenced key is overridden - :ok = Cache.put("referenced_name", "overridden") + # Simulate the referenced key is overridden + :ok = Cache.put!("referenced_name", "overridden") # The referenced key is overridden - assert get_with_ref_key_with_cache("referenced_name") == "overridden" + assert fun.("referenced_name") == "overridden" + + # Assert the previously referenced key remains the same + assert ref_cache.get!(key) == result end end describe "cache_put" do test "with default opts" do assert update_fun(1) == nil - refute Cache.get(1) + refute Cache.get!(1) assert update_fun(1, :error) == :error - refute Cache.get(1) + refute Cache.get!(1) assert update_fun(1, {:error, :error}) == {:error, :error} - refute Cache.get(1) + refute Cache.get!(1) assert set_keys(x: 1, y: 2, z: 3) == :ok assert update_fun(:x, 2) == 2 assert update_fun(:y, {:ok, 4}) == {:ok, 4} - assert Cache.get(:x) == 2 - assert Cache.get(:y) == {:ok, 4} - assert Cache.get(:z) == 3 + assert Cache.get!(:x) == 2 + assert Cache.get!(:y) == {:ok, 4} + assert Cache.get!(:z) == 3 :ok = Process.sleep(1100) - assert Cache.get(:x) == 2 - assert Cache.get(:y) == {:ok, 4} - assert Cache.get(:z) == 3 + assert Cache.get!(:x) == 2 + assert Cache.get!(:y) == {:ok, 4} + assert Cache.get!(:z) == 3 end test "with opts" do @@ -395,37 +483,41 @@ defmodule Nebulex.CachingTest do assert update_with_opts(:y) == :y :ok = Process.sleep(1100) - refute Cache.get(:x) - refute Cache.get(:y) + + refute Cache.get!(:x) + refute Cache.get!(:y) end test "with match function" do assert update_with_match(:x) == {:ok, "x"} + assert Cache.get!(:x) == "x" + assert update_with_match(true) == {:ok, "true"} + assert Cache.get!(true) == {:ok, "true"} + assert update_with_match({:z, 1}) == :error - assert Cache.get(:x) == "x" - assert Cache.get(true) == {:ok, "true"} - refute Cache.get({:z, 1}) + refute Cache.get!({:z, 1}) end test "without args" do - refute Cache.get(0) + refute Cache.get!(0) assert update_without_args() == "hello" - assert Cache.get(0) == "hello" + assert Cache.get!(0) == "hello" end test "with multiple keys and ttl" do assert set_keys(x: 1, y: 2, z: 3) == :ok assert update_with_multiple_keys(:x, :y) == {:ok, {"x", "y"}} - assert Cache.get(:x) == {"x", "y"} - assert Cache.get(:y) == {"x", "y"} - assert Cache.get(:z) == 3 + assert Cache.get!(:x) == {"x", "y"} + assert Cache.get!(:y) == {"x", "y"} + assert Cache.get!(:z) == 3 :ok = Process.sleep(1100) - refute Cache.get(:x) - refute Cache.get(:y) - assert Cache.get(:z) == 3 + + refute Cache.get!(:x) + refute Cache.get!(:y) + assert Cache.get!(:z) == 3 end end @@ -434,269 +526,222 @@ defmodule Nebulex.CachingTest do assert set_keys(x: 1, y: 2, z: 3) == :ok assert evict_fun(:x) == :x - refute Cache.get(:x) - assert Cache.get(:y) == 2 - assert Cache.get(:z) == 3 + refute Cache.get!(:x) + assert Cache.get!(:y) == 2 + assert Cache.get!(:z) == 3 assert evict_fun(:y) == :y - refute Cache.get(:x) - refute Cache.get(:y) - assert Cache.get(:z) == 3 + refute Cache.get!(:x) + refute Cache.get!(:y) + assert Cache.get!(:z) == 3 end test "with multiple keys" do assert set_keys(x: 1, y: 2, z: 3) == :ok + assert evict_keys_fun(:x, :y) == {:x, :y} - refute Cache.get(:x) - refute Cache.get(:y) - assert Cache.get(:z) == 3 + + refute Cache.get!(:x) + refute Cache.get!(:y) + assert Cache.get!(:z) == 3 end test "all entries" do assert set_keys(x: 1, y: 2, z: 3) == :ok + assert evict_all_fun("hello") == "hello" - refute Cache.get(:x) - refute Cache.get(:y) - refute Cache.get(:z) + + refute Cache.get!(:x) + refute Cache.get!(:y) + refute Cache.get!(:z) end test "without args" do - refute Cache.get(0) + refute Cache.get!(0) assert get_without_args() == "hello" - assert Cache.get(0) == "hello" + assert Cache.get!(0) == "hello" assert evict_without_args() == "hello" - refute Cache.get(0) + refute Cache.get!(0) end end - describe "option :key_generator on" do + describe "option :key with custom key generator on" do test "cacheable annotation" do - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen, [1, 2]) + key = default_hash(:cacheable, :get_with_keygen, 2, [1, 2]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} - end - - test "cache_evict annotation" do - key = TestKeyGenerator.generate(__MODULE__, :evict_with_keygen, ["foo", "bar"]) - - :ok = Cache.put(key, {"foo", "bar"}) - assert Cache.get(key) == {"foo", "bar"} - - assert evict_with_keygen("foo", "bar") == {"foo", "bar"} - refute Cache.get(key) - end - - test "cache_put annotation" do - assert multiple_clauses(2, 2) == 4 - assert Cache.get(2) == 4 - - assert put_with_keygen(2, 4) == 8 - assert multiple_clauses(2, 2) == 8 - assert Cache.get(2) == 8 - - assert put_with_keygen(2, 8) == 16 - assert multiple_clauses(2, 2) == 16 - assert Cache.get(2) == 16 + assert Cache.get!(key) == {1, 2} end test "cacheable annotation with multiple function clauses and pattern-matching " do - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen2, [1, 2]) + key = default_hash(:cacheable, :get_with_keygen2, 3, [1, 2]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen2(1, 2, %{a: {1, 2}}) == {1, 2} - assert Cache.get(key) == {1, 2} + assert Cache.get!(key) == {1, 2} - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen2, [1, 2, %{b: 3}]) + key = default_hash(:cacheable, :get_with_keygen2, 3, [1, 2, %{b: 3}]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen2(1, 2, %{b: 3}) == {1, 2, %{b: 3}} - assert Cache.get(key) == {1, 2, %{b: 3}} + assert Cache.get!(key) == {1, 2, %{b: 3}} end test "cacheable annotation with ignored arguments" do - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen3, [1, %{b: 2}]) + key = default_hash(:cacheable, :get_with_keygen3, 7, [1, %{b: 2}]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen3(1, 2, 3, {1, 2}, [1], %{a: 1}, %{b: 2}) == {1, %{b: 2}} - assert Cache.get(key) == {1, %{b: 2}} + assert Cache.get!(key) == {1, %{b: 2}} end - end - describe "default key generator on" do - setup_with_cache(CacheWithDefaultKeyGenerator) + test "cacheable annotation with custom key" do + key = {:a, :b, 1, 2} - test "cacheable annotation" do - key = CacheWithDefaultKeyGenerator.generate(__MODULE__, :get_with_default_key_generator, [1]) - - refute CacheWithDefaultKeyGenerator.get(key) - assert get_with_default_key_generator(1) == 1 - assert CacheWithDefaultKeyGenerator.get(key) == 1 + refute Cache.get!(key) + assert get_with_keygen4(1, 2) == {1, 2} + assert Cache.get!(key) == {1, 2} end test "cache_evict annotation" do - key = CacheWithDefaultKeyGenerator.generate(__MODULE__, :del_with_default_key_generator, [1]) - - :ok = CacheWithDefaultKeyGenerator.put(key, 1) - assert CacheWithDefaultKeyGenerator.get(key) == 1 - - assert del_with_default_key_generator(1) == 1 - refute CacheWithDefaultKeyGenerator.get(key) - end - end - - describe "key-generator tuple on" do - test "cacheable annotation" do - key = generate_key({1, 2}) + key = default_hash(:cache_evict, :evict_with_keygen, 2, ["foo", "bar"]) - refute Cache.get(key) - assert get_with_tuple_keygen(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} - end - - test "cacheable annotation (with key-generator: TestKeyGenerator)" do - key = TestKeyGenerator.generate(:a, :b, [1]) + :ok = Cache.put(key, {"foo", "bar"}) + assert Cache.get!(key) == {"foo", "bar"} - refute Cache.get(key) - assert get_with_tuple_keygen2(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} + assert evict_with_keygen("foo", "bar") == {"foo", "bar"} + refute Cache.get!(key) end - test "cache_evict annotation" do - key = generate_key({"foo", "bar"}) + test "cache_evict annotation with custom key" do + key = {"foo", "bar"} :ok = Cache.put(key, {"foo", "bar"}) - assert Cache.get(key) == {"foo", "bar"} + assert Cache.get!(key) == {"foo", "bar"} - assert evict_with_tuple_keygen("foo", "bar") == {"foo", "bar"} - refute Cache.get(key) + assert evict_with_keygen2("foo", "bar") == {"foo", "bar"} + refute Cache.get!(key) end test "cache_put annotation" do assert multiple_clauses(2, 2) == 4 - assert Cache.get(2) == 4 + assert Cache.get!(2) == 4 - assert put_with_tuple_keygen(2, 4) == 8 + assert put_with_keygen(2, 4) == 8 assert multiple_clauses(2, 2) == 8 - assert Cache.get(2) == 8 + assert Cache.get!(2) == 8 - assert put_with_tuple_keygen(2, 8) == 16 + assert put_with_keygen(2, 8) == 16 assert multiple_clauses(2, 2) == 16 - assert Cache.get(2) == 16 + assert Cache.get!(2) == 16 end - end - describe "key-generator with shorthand tuple on" do - test "cacheable annotation" do - key = TestKeyGenerator.generate(__MODULE__, :get_with_shorthand_tuple_keygen, [1]) + test "cache_put annotation with custom key" do + key = {:tuple, 2} - refute Cache.get(key) - assert get_with_shorthand_tuple_keygen(1, 2, 3) == {1, 2} - assert Cache.get(key) == {1, 2} - end + assert Cache.put(key, 2) == :ok + assert Cache.get!(key) == 2 - test "cacheable annotation (with key-generator: __MODULE__)" do - key = generate(__MODULE__, :get_with_shorthand_tuple_keygen2, [1]) + assert put_with_keygen2(2, 4) == 8 + assert Cache.get!(key) == 8 - refute Cache.get(key) - assert get_with_shorthand_tuple_keygen2(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} + assert put_with_keygen2(2, 8) == 16 + assert Cache.get!(key) == 16 end + end - test "cache_evict annotation" do - key = TestKeyGenerator.generate(__MODULE__, :evict_with_shorthand_tuple_keygen, ["foo"]) - - :ok = Cache.put(key, {"foo", "bar"}) - assert Cache.get(key) == {"foo", "bar"} - - assert evict_with_shorthand_tuple_keygen("foo", "bar") == {"foo", "bar"} - refute Cache.get(key) + describe "option :on_error on" do + test "cacheable annotation raises a cache error" do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + get_and_raise_exception(:raise) + end end - test "cache_put annotation" do - key = TestKeyGenerator.generate(__MODULE__, :put_with_shorthand_tuple_keygen, ["foo"]) + test "cacheable annotation ignores the exception" do + assert get_ignoring_exception("foo") == "foo" + end - refute Cache.get(key) - assert put_with_shorthand_tuple_keygen("foo", "bar") == {"foo", "bar"} - assert Cache.get(key) == {"foo", "bar"} + test "cache_put annotation raises a cache error" do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + update_and_raise_exception(:raise) + end end - end - describe "option :on_error on" do - test "cacheable annotation" do - assert get_with_exception("foo") == "foo" + test "cache_put annotation ignores the exception" do + assert update_ignoring_exception("foo") == "foo" end - test "cache_put annotation" do - assert update_with_exception("foo") == "foo" + test "cache_evict annotation raises a cache error" do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + evict_and_raise_exception(:raise) + end end - test "cache_evict annotation" do - assert evict_with_exception("foo") == "foo" + test "cache_evict annotation ignores the exception" do + assert evict_ignoring_exception("foo") == "foo" end end - describe "option :cache with MFA" do + describe "option :cache with anonymous function on" do test "cacheable annotation" do - refute Cache.get("foo") - assert get_mfa_cache_without_extra_args("foo") == "foo" - assert Cache.get("foo") == "foo" + refute Cache.get!("foo") + + assert get_fn_cache("foo") == "foo" + assert_receive %{module: __MODULE__, function_name: :get_fn_cache, args: ["foo"]} + assert Cache.get!("foo") == "foo" end test "cache_put annotation" do :ok = Cache.put("foo", "bar") - assert update_mfa_cache_without_extra_args("bar bar") == "bar bar" - assert Cache.get("foo") == "bar bar" + assert update_fn_cache("bar bar") == "bar bar" + assert_receive %{module: __MODULE__, function_name: :update_fn_cache, args: ["bar bar"]} + assert Cache.get!("foo") == "bar bar" end test "cache_evict annotation" do :ok = Cache.put("foo", "bar") - assert delete_mfa_cache_without_extra_args("bar bar") == "bar bar" - refute Cache.get("foo") + assert delete_fn_cache("bar bar") == "bar bar" + assert_receive %{module: __MODULE__, function_name: :delete_fn_cache, args: ["bar bar"]} + refute Cache.get!("foo") end end - describe "option :cache with MFA and extra args" do - test "cacheable annotation" do - refute Cache.get("foo") - assert get_mfa_cache_with_extra_args("foo") == "foo" - assert Cache.get("foo") == "foo" - end - - test "cache_put annotation" do - :ok = Cache.put("foo", "bar") - - assert update_mfa_cache_with_extra_args("bar bar") == "bar bar" - assert Cache.get("foo") == "bar bar" - end + describe "option :cache raises an exception" do + test "due to invalid cache value" do + assert_raise ArgumentError, ~r|invalid value for :cache option|, fn -> + defmodule RuntimeCacheTest do + use Nebulex.Caching - test "cache_evict annotation" do - :ok = Cache.put("foo", "bar") + @decorate cacheable(cache: 123, key: {a, b}) + def t(a, b), do: {a, b} + end - assert delete_mfa_cache_with_extra_args("bar bar") == "bar bar" - refute Cache.get("foo") + RuntimeCacheTest.t(1, 2) + end end end ## Annotated Functions - @decorate cacheable(cache: Cache) + @cache Cache + + @decorate cacheable(cache: @cache) def get_without_args, do: "hello" - @decorate cacheable(cache: Cache, key: x) - def get_by_x(x, y \\ nil) do + @decorate cacheable(cache: @cache, key: x) + def get_by_xy(x, y \\ nil) do with _ when not is_nil(x) <- x, _ when not is_nil(y) <- y do y end end - @decorate cacheable(cache: Cache, key: {x, y}) - def get_by_xy(x, y) do + @decorate cacheable(key: {x, y}) + def multiply_xy(x, y) do {x, y * 2} end @@ -705,41 +750,55 @@ defmodule Nebulex.CachingTest do x end - @decorate cacheable(cache: Cache) + @decorate cacheable() def get_false_with_side_effect(v) do - Cache.update("side-effect", 1, &(&1 + 1)) + _ = Cache.update!("side-effect", 1, &(&1 + 1)) + v end - @decorate cacheable(cache: Cache, match: fn x -> x != :x end) + @decorate cacheable(match: &(&1 != :x)) def get_with_match(x) do x end - @decorate cacheable(cache: Cache, match: &match_fun/1) + @decorate cacheable(cache: dynamic_cache(Cache, Cache), match: &match_fun/1) def get_with_match_fun(x) do {:ok, to_string(x)} rescue _ -> :error end - @decorate cacheable(cache: Cache) + @decorate cacheable(cache: dynamic_cache(Cache, Cache), match: &match_fun_with_ctx/2) + def get_with_match_fun_and_ctx(x) do + {:ok, to_string(x)} + rescue + _ -> :error + end + + @decorate cacheable(key: ttl, match: &match_fun/1) + def get_with_custom_ttl(ttl) do + {:ok, %{ttl: ttl}} + end + + @decorate cacheable() def get_with_default_key(x, y) do _ = {x, y} + :ok end - @decorate cacheable(cache: Cache, key: {Meta, meta.id}) + @decorate cacheable(key: {Meta, meta.id}) def get_meta(%Meta{} = meta) do meta end - @decorate cacheable(cache: Cache, key: map[:id]) + @decorate cacheable(key: map[:id]) def get_map(map) do map end - @decorate cache_put(cache: Cache) + @decorate cache_put() def update_without_args, do: "hello" @decorate cache_put(cache: Cache, key: x) @@ -750,26 +809,26 @@ defmodule Nebulex.CachingTest do end end - @decorate cache_put(cache: Cache, key: x, opts: [ttl: 1000]) + @decorate cache_put(cache: dynamic_cache(Cache, Cache), keys: [x], opts: [ttl: 1000]) def update_with_opts(x) do x end - @decorate cache_put(cache: Cache, key: x, match: &match_fun/1) + @decorate cache_put(cache: dynamic_cache(Cache, Cache), key: x, match: &match_fun/1) def update_with_match(x) do {:ok, to_string(x)} rescue _ -> :error end - @decorate cache_put(cache: Cache, keys: [x, y], match: &match_fun/1, opts: [ttl: 1000]) + @decorate cache_put(keys: [x, y], match: &match_fun/1, opts: [ttl: 1000]) def update_with_multiple_keys(x, y) do {:ok, {to_string(x), to_string(y)}} rescue _ -> :error end - @decorate cache_evict(cache: Cache) + @decorate cache_evict(cache: dynamic_cache(Cache, Cache)) def evict_without_args, do: "hello" @decorate cache_evict(cache: Cache, key: x) @@ -777,17 +836,17 @@ defmodule Nebulex.CachingTest do x end - @decorate cache_evict(cache: Cache, keys: [x, y]) + @decorate cache_evict(keys: [x, y]) def evict_keys_fun(x, y) do {x, y} end - @decorate cache_evict(cache: Cache, all_entries: true, before_invocation: true) + @decorate cache_evict(all_entries: true, before_invocation: true) def evict_all_fun(x) do x end - @decorate cacheable(cache: Cache, key: x) + @decorate cacheable(key: x) def multiple_clauses(x, y \\ 0) def multiple_clauses(x, y) when is_integer(x) and is_integer(y) do @@ -798,15 +857,17 @@ defmodule Nebulex.CachingTest do {x, y} end - @decorate cacheable(cache: Cache, key_generator: TestKeyGenerator) + ## Custom key generation + + @decorate cacheable(key: &:erlang.phash2/1) def get_with_keygen(x, y) do {x, y} end - @decorate cacheable(cache: Cache, key_generator: TestKeyGenerator) - def get_with_keygen2(x, y, z) + @decorate cacheable(key: &:erlang.phash2/1) + def get_with_keygen2(x, y, z \\ %{}) - def get_with_keygen2(x, y, %{a: {_x, _y}}) do + def get_with_keygen2(x, y, %{a: {_x1, _y1}}) do {x, y} end @@ -814,135 +875,124 @@ defmodule Nebulex.CachingTest do {x, y, z} end - @decorate cacheable(cache: Cache, key_generator: TestKeyGenerator) + @decorate cacheable(key: &:erlang.phash2/1) def get_with_keygen3(x, _y, _, {_, _}, [_], %{}, %{} = z) do {x, z} end - @decorate cache_evict(cache: Cache, key_generator: TestKeyGenerator) - def evict_with_keygen(x, y) do + @decorate cacheable(key: &:erlang.list_to_tuple([:a, :b | &1.args])) + def get_with_keygen4(x, y) do {x, y} end - @decorate cache_put(cache: Cache, key_generator: TestKeyGenerator) - def put_with_keygen(x, y) do - x * y - end - - @decorate cacheable(cache: CacheWithDefaultKeyGenerator) - def get_with_default_key_generator(id), do: id - - @decorate cache_evict(cache: CacheWithDefaultKeyGenerator) - def del_with_default_key_generator(id), do: id - - @decorate cacheable(cache: Cache, key_generator: {TestKeyGenerator, [x]}) - def get_with_shorthand_tuple_keygen(x, y, _z) do + @decorate cache_evict(key: &:erlang.phash2/1) + def evict_with_keygen(x, y) do {x, y} end - @decorate cacheable(cache: Cache, key_generator: {__MODULE__, [x]}) - def get_with_shorthand_tuple_keygen2(x, y) do + @decorate cache_evict(key: &:erlang.list_to_tuple(&1.args)) + def evict_with_keygen2(x, y) do {x, y} end - @decorate cache_evict(cache: Cache, key_generator: {TestKeyGenerator, [x]}) - def evict_with_shorthand_tuple_keygen(x, y) do - {x, y} + @decorate cache_put(key: &hd(&1.args)) + def put_with_keygen(x, y) do + x * y end - @decorate cache_put(cache: Cache, key_generator: {TestKeyGenerator, [x]}) - def put_with_shorthand_tuple_keygen(x, y) do - {x, y} + @decorate cache_put(key: &{:tuple, hd(&1.args)}) + def put_with_keygen2(x, y) do + x * y end - @decorate cacheable(cache: Cache, key_generator: {__MODULE__, :generate_key, [{x, y}]}) - def get_with_tuple_keygen(x, y) do - {x, y} - end + ## on_error - @decorate cacheable(cache: Cache, key_generator: {TestKeyGenerator, :generate, [:a, :b, [x]]}) - def get_with_tuple_keygen2(x, y) do - {x, y} + @decorate cacheable(cache: YetAnotherCache, key: x, on_error: :raise) + def get_and_raise_exception(x) do + x end - @decorate cache_evict(cache: Cache, key_generator: {__MODULE__, :generate_key, [{x, y}]}) - def evict_with_tuple_keygen(x, y) do - {x, y} + @decorate cache_put(cache: YetAnotherCache, key: x, on_error: :raise) + def update_and_raise_exception(x) do + x end - @decorate cache_put(cache: Cache, key_generator: {__MODULE__, :generate_key, [x]}) - def put_with_tuple_keygen(x, y) do - x * y + @decorate cache_evict(cache: YetAnotherCache, key: x, on_error: :raise) + def evict_and_raise_exception(x) do + x end - @decorate cacheable(cache: YetAnotherCache, key: x, on_error: :nothing) - def get_with_exception(x) do + @decorate cacheable(cache: YetAnotherCache, key: x) + def get_ignoring_exception(x) do x end - @decorate cache_put(cache: YetAnotherCache, key: x, on_error: :nothing) - def update_with_exception(x) do + @decorate cache_put(cache: YetAnotherCache, key: x) + def update_ignoring_exception(x) do x end - @decorate cache_evict(cache: YetAnotherCache, key: x, on_error: :nothing) - def evict_with_exception(x) do + @decorate cache_evict(cache: YetAnotherCache, key: x) + def evict_ignoring_exception(x) do x end - @decorate cacheable(cache: {__MODULE__, :cache_with_extra_args, ["extra_arg"]}, key: var) - def get_mfa_cache_with_extra_args(var) do - var - end + ## Runtime target cache - @decorate cacheable(cache: {__MODULE__, :cache_without_extra_args, []}, key: var) - def get_mfa_cache_without_extra_args(var) do + @decorate cacheable(cache: &target_cache/1, key: var) + def get_fn_cache(var) do var end - @decorate cache_put(cache: {__MODULE__, :cache_with_extra_args, ["extra_arg"]}, key: "foo") - def update_mfa_cache_with_extra_args(var) do + @decorate cache_put(cache: &target_cache/1, key: "foo") + def update_fn_cache(var) do var end - @decorate cache_put(cache: {__MODULE__, :cache_without_extra_args, []}, key: "foo") - def update_mfa_cache_without_extra_args(var) do + @decorate cache_evict(cache: &target_cache/1, key: "foo") + def delete_fn_cache(var) do var end - @decorate cache_evict(cache: {__MODULE__, :cache_with_extra_args, ["extra_arg"]}, key: "foo") - def delete_mfa_cache_with_extra_args(var) do - var + ## Key references + + @decorate cacheable(key: name, references: & &1.id) + def get_with_keyref(name) do + %{id: "referenced_id", name: name} end - @decorate cache_evict(cache: {__MODULE__, :cache_without_extra_args, []}, key: "foo") - def delete_mfa_cache_without_extra_args(var) do - var + @decorate cacheable(key: name, references: &:erlang.phash2({&1.id, &2.args})) + def get_with_keyref_fn_ctx(name) do + %{id: "referenced_id", name: name} end - @decorate cacheable(cache: Cache, key: name, references: & &1.id) - def get_with_referenced_key(name) do + @decorate cacheable( + key: name, + references: + &keyref(&1.id, + cache: YetAnotherCache, + ttl: __MODULE__.default_ttl() + ) + ) + def get_with_keyref_cache(name) do %{id: "referenced_id", name: name} end - @decorate cacheable(cache: Cache, key: name, references: attrs.id) - def get_with_referenced_key_from_args(name, attrs) do + @decorate cacheable(cache: dynamic_cache(Cache, Cache), key: name, references: attrs.id) + def get_with_keyref_from_args(name, attrs) do %{attrs: attrs, name: name} end - @decorate cacheable(cache: Cache, key: name, references: "fixed_id") - def get_with_fixed_referenced_key(name) do + @decorate cacheable(key: name, references: "fixed_id") + def get_with_fixed_keyref(name) do %{id: "fixed_id", name: name} end - @decorate cacheable(cache: Cache, key: name, references: &keyref(YetAnotherCache, &1.id)) - def get_with_ref_key_with_cache(name) do - %{id: "referenced_id", name: name} - end + @decorate cacheable(key: name, references: & &1.id, match: &(&1[:name] == name)) + def get_with_keyref_and_match(name, value) do + _ = name - @decorate cacheable(cache: Cache, key: ttl, match: &match_fun/1) - def get_with_custom_ttl(ttl) do - {:ok, %{ttl: ttl}} + value end ## Helpers @@ -950,14 +1000,13 @@ defmodule Nebulex.CachingTest do # Custom key-generator function def generate_key(arg), do: arg - @impl Nebulex.Caching.KeyGenerator - def generate(module, function_name, args) do - :erlang.phash2({module, function_name, args}) - end + def target_cache(arg) do + _ = send(self(), arg) - def cache_with_extra_args(_mod, _fun, _args, _extra_arg), do: Cache + Cache + end - def cache_without_extra_args(_mod, _fun, _args), do: Cache + def default_ttl, do: 5000 ## Private Functions @@ -966,11 +1015,28 @@ defmodule Nebulex.CachingTest do defp match_fun({:ok, val}), do: {true, val} defp match_fun(_), do: false + defp match_fun_with_ctx(result, ctx) do + _ = send(self(), ctx) + + match_fun(result) + end + defp set_keys(entries) do assert :ok == Cache.put_all(entries) Enum.each(entries, fn {k, v} -> - assert v == Cache.get(k) + assert v == Cache.get!(k) end) end + + defp default_hash(decorator, fun, arity, args) do + %Nebulex.Caching.Decorators.Context{ + decorator: decorator, + module: __MODULE__, + function_name: fun, + arity: arity, + args: args + } + |> :erlang.phash2() + end end diff --git a/test/nebulex/entry_test.exs b/test/nebulex/entry_test.exs deleted file mode 100644 index 4adcb660..00000000 --- a/test/nebulex/entry_test.exs +++ /dev/null @@ -1,4 +0,0 @@ -defmodule Nebulex.EntryTest do - use ExUnit.Case, async: true - doctest Nebulex.Entry -end diff --git a/test/nebulex/hook_test.exs b/test/nebulex/hook_test.exs deleted file mode 100644 index 64081d3e..00000000 --- a/test/nebulex/hook_test.exs +++ /dev/null @@ -1,159 +0,0 @@ -defmodule Nebulex.HookTest do - use ExUnit.Case, async: true - - alias Nebulex.Hook - - describe "before" do - defmodule BeforeHookCache do - @moduledoc false - use Nebulex.Hook - @decorate_all before(&Nebulex.HookTest.hook_fun/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - test "hook" do - {:ok, _pid} = BeforeHookCache.start_link() - true = Process.register(self(), :hooked_cache) - _ = BeforeHookCache.new_generation() - - refute BeforeHookCache.get("foo") - assert_receive %Hook{} = hook, 200 - assert hook.step == :before - assert hook.module == BeforeHookCache - assert hook.name == :get - assert hook.arity == 2 - refute hook.return - - assert :ok == BeforeHookCache.put("foo", "bar") - assert_receive %Hook{} = hook, 200 - assert hook.step == :before - assert hook.module == BeforeHookCache - assert hook.name == :put - assert hook.arity == 3 - refute hook.return - - :ok = BeforeHookCache.stop() - end - end - - describe "after_return" do - defmodule AfterReturnHookCache do - @moduledoc false - use Nebulex.Hook - @decorate_all after_return(&Nebulex.HookTest.hook_fun/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - test "hook" do - {:ok, _pid} = AfterReturnHookCache.start_link() - true = Process.register(self(), :hooked_cache) - _ = AfterReturnHookCache.new_generation() - - refute AfterReturnHookCache.get("foo") - assert_receive %Hook{} = hook, 200 - assert hook.module == AfterReturnHookCache - assert hook.name == :get - assert hook.arity == 2 - assert hook.step == :after_return - refute hook.return - - assert :ok == AfterReturnHookCache.put("foo", "bar") - assert_receive %Hook{} = hook, 200 - assert hook.module == AfterReturnHookCache - assert hook.name == :put - assert hook.arity == 3 - assert hook.step == :after_return - assert hook.return == :ok - - :ok = AfterReturnHookCache.stop() - end - end - - describe "around" do - defmodule AroundHookCache do - @moduledoc false - use Nebulex.Hook - @decorate_all around(&Nebulex.TestCache.TestHook.track/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - - alias Nebulex.TestCache.TestHook - - def init(opts) do - {:ok, pid} = TestHook.start_link() - {:ok, Keyword.put(opts, :hook_pid, pid)} - end - end - - test "hook" do - {:ok, _pid} = AroundHookCache.start_link() - true = Process.register(self(), :hooked_cache) - _ = AroundHookCache.new_generation() - - refute AroundHookCache.get("foo") - assert_receive %Hook{module: AroundHookCache, name: :get, arity: 2} = hook, 200 - refute hook.return - assert hook.acc >= 0 - - assert :ok == AroundHookCache.put("foo", "bar") - assert_receive %Hook{module: AroundHookCache, name: :put, arity: 3} = hook, 200 - assert hook.acc >= 0 - assert hook.return == :ok - - assert :ok == AroundHookCache.put("hello", "world") - assert_receive %Hook{module: AroundHookCache, name: :put, arity: 3} = hook, 200 - assert hook.acc >= 0 - assert hook.return == :ok - - assert "bar" == AroundHookCache.get("foo") - assert_receive %Hook{module: AroundHookCache, name: :get, arity: 2} = hook, 200 - assert hook.return == "bar" - assert hook.acc >= 0 - - assert "world" == AroundHookCache.get("hello") - assert_receive %Hook{module: AroundHookCache, name: :get, arity: 2} = hook, 200 - assert hook.return == "world" - assert hook.acc >= 0 - - :ok = AroundHookCache.stop() - end - end - - describe "exception" do - defmodule ErrorCache do - @moduledoc false - use Nebulex.Hook - @decorate_all around(&Nebulex.TestCache.TestHook.hook_error/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - test "hook" do - {:ok, _pid} = ErrorCache.start_link() - - assert_raise RuntimeError, ~r"hook execution failed on step :before with error", fn -> - ErrorCache.get("foo") - end - - :ok = ErrorCache.stop() - end - end - - ## Helpers - - def hook_fun(%Hook{name: name} = hook) when name in [:get, :put] do - send(self(), hook) - end - - def hook_fun(hook), do: hook -end diff --git a/test/nebulex/telemetry_test.exs b/test/nebulex/telemetry_test.exs index bd4c4fb5..623376ae 100644 --- a/test/nebulex/telemetry_test.exs +++ b/test/nebulex/telemetry_test.exs @@ -10,134 +10,70 @@ defmodule Nebulex.TelemetryTest do defmodule Cache do use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end + adapter: Nebulex.TestAdapter end ## Shared constants - @prefix [:nebulex, :telemetry_test, :cache] - + @prefix Telemetry.default_event_prefix() @start @prefix ++ [:command, :start] @stop @prefix ++ [:command, :stop] - - @start_events [ - @prefix ++ [:command, :start], - @prefix ++ [:l1, :command, :start], - @prefix ++ [:l2, :command, :start], - @prefix ++ [:l2, :primary, :command, :start], - @prefix ++ [:l3, :command, :start], - @prefix ++ [:l3, :primary, :command, :start] - ] - - @stop_events [ - @prefix ++ [:command, :stop], - @prefix ++ [:l1, :command, :stop], - @prefix ++ [:l2, :command, :stop], - @prefix ++ [:l2, :primary, :command, :stop], - @prefix ++ [:l3, :command, :stop], - @prefix ++ [:l3, :primary, :command, :stop] - ] - - @exception_events [ - @prefix ++ [:command, :exception], - @prefix ++ [:l1, :command, :exception], - @prefix ++ [:l2, :command, :exception], - @prefix ++ [:l2, :primary, :command, :exception], - @prefix ++ [:l3, :command, :stop], - @prefix ++ [:l3, :primary, :command, :exception] - ] - - @caches [Cache, Cache.L1, Cache.L2, Cache.L2.Primary, Cache.L3, Cache.L3.Primary] - - @events Enum.zip([@caches, @start_events, @stop_events]) - - @config [ - model: :inclusive, - levels: [ - {Cache.L1, gc_interval: :timer.hours(1)}, - {Cache.L2, primary: [gc_interval: :timer.hours(1)]}, - {Cache.L3, primary: [gc_interval: :timer.hours(1)]} - ] - ] + @exception @prefix ++ [:command, :exception] + @test_adapter_start [:nebulex, :test_adapter, :start] + @events [@start, @stop, @exception, @test_adapter_start] ## Tests describe "span/3" do - setup_with_cache(Cache, @config) + setup_with_cache Cache test "ok: emits start and stop events" do - with_telemetry_handler(__MODULE__, @start_events ++ @stop_events, fn -> + with_telemetry_handler(__MODULE__, @events, fn -> assert Cache.put("foo", "bar") == :ok - for {cache, start, stop} <- @events do - assert_receive {^start, measurements, %{function_name: :put} = metadata} - assert measurements[:system_time] |> DateTime.from_unix!(:native) - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", "bar", :infinity, :put, []] - assert metadata[:telemetry_span_context] |> is_reference() - - assert_receive {^stop, measurements, %{function_name: :put} = metadata} - assert measurements[:duration] > 0 - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", "bar", :infinity, :put, []] - assert metadata[:result] == true - assert metadata[:telemetry_span_context] |> is_reference() - end + assert_receive {@start, measurements, %{command: :put} = metadata} + assert measurements[:system_time] |> DateTime.from_unix!(:native) + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:adapter_meta][:name] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, []] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{} + + assert_receive {@stop, measurements, %{command: :put} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:adapter_meta][:name] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, []] + assert metadata[:result] == {:ok, true} + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{} end) end test "raise: emits start and exception events" do - with_telemetry_handler(__MODULE__, @exception_events, fn -> - Adapter.with_meta(Cache.L3.Primary, fn _, meta -> - true = :ets.delete(meta.meta_tab) - end) + with_telemetry_handler(__MODULE__, @events, fn -> + key = {:eval, fn -> raise ArgumentError, "error" end} assert_raise ArgumentError, fn -> - Cache.get("foo") + Cache.fetch(key) end - ex_events = [ - @prefix ++ [:command, :exception], - @prefix ++ [:l3, :command, :exception], - @prefix ++ [:l3, :primary, :command, :exception] - ] - - for {cache, exception} <- ex_events do - assert_receive {^exception, measurements, %{function_name: :get} = metadata} - assert measurements[:duration] > 0 - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", []] - assert metadata[:kind] == :error - assert metadata[:reason] == :badarg - assert metadata[:stacktrace] - assert metadata[:telemetry_span_context] |> is_reference() - end + assert_receive {@exception, measurements, %{command: :fetch} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:adapter_meta][:name] == Cache + assert metadata[:args] == [key, []] + assert metadata[:kind] == :error + assert metadata[:reason] == %ArgumentError{message: "error"} + assert metadata[:stacktrace] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{} end) end test "ok: emits start and stop events with custom telemetry_span_context" do with_telemetry_handler(__MODULE__, [@start, @stop], fn -> - event_prefix = [:nebulex, :telemetry_test, :cache, :command] - - Telemetry.span(event_prefix, %{telemetry_span_context: 1}, fn -> + Telemetry.span(@prefix ++ [:command], %{telemetry_span_context: 1}, fn -> {"test", %{telemetry_span_context: 1}} end) @@ -153,23 +89,20 @@ defmodule Nebulex.TelemetryTest do end describe "span/3 bypassed" do - setup_with_cache(Cache, Keyword.put(@config, :telemetry, false)) + setup_with_cache Cache, telemetry: false test "telemetry set to false" do - for cache <- @caches do - Adapter.with_meta(cache, fn _, meta -> - assert meta.telemetry == false - end) - end + Adapter.with_meta(Cache, fn meta -> + assert meta.telemetry == false + end) end test "ok: does not emit start and stop events" do - with_telemetry_handler(__MODULE__, @start_events ++ @stop_events, fn -> + with_telemetry_handler(__MODULE__, @events, fn -> commands = [ put: ["foo", "bar"], put_all: [%{"foo foo" => "bar bar"}], get: ["foo"], - get_all: [["foo", "foo foo"]], delete: ["unknown"], take: ["foo foo"], has_key?: ["foo foo"], @@ -177,35 +110,101 @@ defmodule Nebulex.TelemetryTest do ttl: ["foo"], expire: ["foo", 60_000], touch: ["foo"], - all: [], + get_all: [[in: ["foo", "foo foo"]]], + get_all: [], stream: [], transaction: [fn -> :ok end], in_transaction?: [], dump: ["/invalid/path"], load: ["wrong_file"], - stats: [] + info: [] ] for {command, args} <- commands do - _ = apply(Cache.L1, command, args) - _ = apply(Cache.L2, command, args) - _ = apply(Cache.L3, command, args) - - for {_cache, start, stop} <- @events do - refute_received {^start, _, %{function_name: :command}} - refute_received {^stop, _, %{function_name: :command}} - end + _ = apply(Cache, command, args) + + refute_received {@start, _, %{command: :command}} + refute_received {@stop, _, %{command: :command}} end for {command, args} <- Keyword.drop(commands, [:dump, :load]) do _ = apply(Cache, command, args) - for {_cache, start, stop} <- @events do - refute_received {^start, _, %{function_name: :command}} - refute_received {^stop, _, %{function_name: :command}} - end + refute_received {@start, _, %{command: :command}} + refute_received {@stop, _, %{command: :command}} + end + end) + end + end + + describe "span/3 with custom event and metadata" do + @custom_prefix [:my, :custom, :event] + @custom_start @custom_prefix ++ [:start] + @custom_stop @custom_prefix ++ [:stop] + @custom_exception @custom_prefix ++ [:exception] + @custom_events [@custom_start, @custom_stop, @custom_exception] + + @custom_opts [ + telemetry_event: @custom_prefix, + telemetry_metadata: %{foo: "bar"} + ] + + setup_with_cache Cache + + test "ok: emits start and stop events" do + with_telemetry_handler(__MODULE__, @custom_events, fn -> + :ok = Cache.put("foo", "bar", @custom_opts) + + assert_receive {@custom_start, measurements, %{command: :put} = metadata} + assert measurements[:system_time] |> DateTime.from_unix!(:native) + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:adapter_meta][:name] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, @custom_opts] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{foo: "bar"} + + assert_receive {@custom_stop, measurements, %{command: :put} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:adapter_meta][:name] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, @custom_opts] + assert metadata[:result] == {:ok, true} + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{foo: "bar"} + end) + end + + test "raise: emits start and exception events" do + with_telemetry_handler(__MODULE__, @custom_events, fn -> + key = {:eval, fn -> raise ArgumentError, "error" end} + + assert_raise ArgumentError, fn -> + Cache.fetch(key, @custom_opts) end + + assert_receive {@custom_exception, measurements, %{command: :fetch} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:adapter_meta][:name] == Cache + assert metadata[:args] == [key, @custom_opts] + assert metadata[:kind] == :error + assert metadata[:reason] == %ArgumentError{message: "error"} + assert metadata[:stacktrace] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{foo: "bar"} end) end + + test "error: invalid telemetry_event" do + assert_raise NimbleOptions.ValidationError, ~r"invalid value for :telemetry_event", fn -> + Cache.fetch(:invalid, telemetry_event: :invalid) + end + end + + test "error: invalid telemetry_metadata" do + assert_raise NimbleOptions.ValidationError, ~r"invalid value for :telemetry_metadata", fn -> + Cache.fetch(:invalid, telemetry_metadata: :invalid) + end + end end end diff --git a/test/nebulex/time_test.exs b/test/nebulex/time_test.exs index 5d15d86b..33087c53 100644 --- a/test/nebulex/time_test.exs +++ b/test/nebulex/time_test.exs @@ -1,4 +1,4 @@ defmodule Nebulex.TimeTest do use ExUnit.Case, async: true - doctest Nebulex.Entry + doctest Nebulex.Time end diff --git a/test/nebulex/utils_test.exs b/test/nebulex/utils_test.exs new file mode 100644 index 00000000..d2acfd6e --- /dev/null +++ b/test/nebulex/utils_test.exs @@ -0,0 +1,4 @@ +defmodule Nebulex.UtilsTest do + use ExUnit.Case, async: true + doctest Nebulex.Utils +end diff --git a/test/shared/cache/deprecated_test.exs b/test/shared/cache/deprecated_test.exs deleted file mode 100644 index d8890c66..00000000 --- a/test/shared/cache/deprecated_test.exs +++ /dev/null @@ -1,31 +0,0 @@ -defmodule Nebulex.Cache.DeprecatedTest do - import Nebulex.CacheCase - - deftests do - describe "size/0" do - test "returns the current number of entries in cache", %{cache: cache} do - for x <- 1..100, do: cache.put(x, x) - assert cache.size() == 100 - - for x <- 1..50, do: cache.delete(x) - assert cache.size() == 50 - - for x <- 51..60, do: assert(cache.get(x) == x) - assert cache.size() == 50 - end - end - - describe "flush/0" do - test "evicts all entries from cache", %{cache: cache} do - Enum.each(1..2, fn _ -> - for x <- 1..100, do: cache.put(x, x) - - assert cache.flush() == 100 - :ok = Process.sleep(500) - - for x <- 1..100, do: refute(cache.get(x)) - end) - end - end - end -end diff --git a/test/shared/cache/entry_expiration_test.exs b/test/shared/cache/entry_expiration_test.exs deleted file mode 100644 index 90f34e15..00000000 --- a/test/shared/cache/entry_expiration_test.exs +++ /dev/null @@ -1,226 +0,0 @@ -defmodule Nebulex.Cache.EntryExpirationTest do - import Nebulex.CacheCase - - deftests do - describe "ttl option is given to" do - test "put", %{cache: cache} do - assert cache.put("foo", "bar", ttl: 500) == :ok - assert cache.has_key?("foo") - - Process.sleep(600) - refute cache.has_key?("foo") - end - - test "put_all", %{cache: cache} do - entries = [{0, nil} | for(x <- 1..3, do: {x, x})] - assert cache.put_all(entries, ttl: 1000) - - refute cache.get(0) - for x <- 1..3, do: assert(x == cache.get(x)) - :ok = Process.sleep(1200) - for x <- 1..3, do: refute(cache.get(x)) - end - - test "put_new_all", %{cache: cache} do - assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}, ttl: 1000) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - - refute cache.put_new_all(%{"apples" => 3, "oranges" => 1}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - refute cache.get("oranges") - - :ok = Process.sleep(1200) - refute cache.get("apples") - refute cache.get("bananas") - end - - test "take", %{cache: cache} do - :ok = cache.put("foo", "bar", ttl: 500) - :ok = Process.sleep(600) - - refute cache.take(1) - end - - test "take!", %{cache: cache} do - :ok = cache.put(1, 1, ttl: 100) - :ok = Process.sleep(500) - - assert_raise KeyError, fn -> - cache.take!(1) - end - end - - test "incr (initializes default value if ttl is expired)", %{cache: cache} do - assert cache.incr(:counter, 1, ttl: 200) == 1 - assert cache.incr(:counter) == 2 - - :ok = Process.sleep(210) - - assert cache.incr(:counter, 1, ttl: 200) == 1 - assert cache.incr(:counter) == 2 - end - end - - describe "ttl" do - test "returns the remaining ttl for the given key", %{cache: cache} do - assert cache.put(:a, 1, ttl: 500) == :ok - assert cache.ttl(:a) > 0 - assert cache.put(:b, 2) == :ok - - :ok = Process.sleep(10) - assert cache.ttl(:a) > 0 - assert cache.ttl(:b) == :infinity - - :ok = Process.sleep(600) - refute cache.ttl(:a) - assert cache.ttl(:b) == :infinity - end - - test "returns nil if key does not exist", %{cache: cache} do - refute cache.ttl(:non_existent) - end - end - - describe "expire" do - test "alters the expiration time for the given key", %{cache: cache} do - assert cache.put(:a, 1, ttl: 500) == :ok - assert cache.ttl(:a) > 0 - - assert cache.expire(:a, 1000) - assert cache.ttl(:a) > 100 - - assert cache.expire(:a, :infinity) - assert cache.ttl(:a) == :infinity - - refute cache.expire(:b, 5) - end - - test "returns false if key does not exist", %{cache: cache} do - assert cache.expire(:non_existent, 1000) == false - end - - test "raises when ttl is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl to be a valid timeout", fn -> - cache.expire(:a, "hello") - end - end - end - - describe "touch" do - test "updates the last access time for the given entry", %{cache: cache} do - assert cache.put(:touch, 1, ttl: 1000) == :ok - - :ok = Process.sleep(100) - assert cache.touch(:touch) - - :ok = Process.sleep(200) - assert cache.touch(:touch) - assert cache.get(:touch) == 1 - - :ok = Process.sleep(1100) - refute cache.get(:touch) - end - - test "returns false if key does not exist", %{cache: cache} do - assert cache.touch(:non_existent) == false - end - end - - describe "expiration" do - test "single entry put with ttl", %{cache: cache} do - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.get!(1) == 11 - - for _ <- 3..1 do - assert cache.ttl(1) > 0 - Process.sleep(200) - end - - :ok = Process.sleep(500) - refute cache.ttl(1) - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - end - - test "multiple entries put with ttl", %{cache: cache} do - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.get!(1) == 11 - - :ok = Process.sleep(10) - assert cache.get(1) == 11 - :ok = Process.sleep(1100) - refute cache.get(1) - - ops = [ - put: ["foo", "bar", [ttl: 1000]], - put_all: [[{"foo", "bar"}], [ttl: 1000]] - ] - - for {action, args} <- ops do - assert apply(cache, action, args) == :ok - :ok = Process.sleep(10) - assert cache.get("foo") == "bar" - :ok = Process.sleep(1200) - refute cache.get("foo") - - assert apply(cache, action, args) == :ok - :ok = Process.sleep(10) - assert cache.get("foo") == "bar" - :ok = Process.sleep(1200) - refute cache.get("foo") - end - end - end - - describe "get_and_update with ttl" do - test "existing entry", %{cache: cache} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - - :ok = Process.sleep(10) - - assert cache.get_and_update(1, &cache.get_and_update_fun/1) == {1, 2} - assert cache.ttl(1) == :infinity - - :ok = Process.sleep(1200) - assert cache.get(1) == 2 - end - end - - describe "update with ttl" do - test "existing entry", %{cache: cache} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - - :ok = Process.sleep(10) - - assert cache.update(1, 10, &Integer.to_string/1) == "1" - assert cache.ttl(1) == :infinity - - :ok = Process.sleep(1200) - assert cache.get(1) == "1" - end - end - - describe "incr with ttl" do - test "increments a counter", %{cache: cache} do - assert cache.incr(:counter, 1, ttl: 1000) == 1 - assert cache.ttl(1) > 0 - - :ok = Process.sleep(1200) - refute cache.get(:counter) - end - - test "increments a counter and then set ttl", %{cache: cache} do - assert cache.incr(:counter, 1) == 1 - assert cache.ttl(:counter) == :infinity - - assert cache.expire(:counter, 500) - :ok = Process.sleep(600) - refute cache.get(:counter) - end - end - end -end diff --git a/test/shared/cache/entry_prop_test.exs b/test/shared/cache/entry_prop_test.exs deleted file mode 100644 index ccdc1f2b..00000000 --- a/test/shared/cache/entry_prop_test.exs +++ /dev/null @@ -1,32 +0,0 @@ -defmodule Nebulex.Cache.EntryPropTest do - import Nebulex.CacheCase - - deftests do - use ExUnitProperties - - describe "key/value entries" do - property "any term", %{cache: cache} do - check all term <- term() do - refute cache.get(term) - - refute cache.replace(term, term) - assert cache.put(term, term) == :ok - refute cache.put_new(term, term) - assert cache.get(term) == term - - assert cache.replace(term, "replaced") - assert cache.get(term) == "replaced" - - assert cache.take(term) == "replaced" - refute cache.take(term) - - assert cache.put_new(term, term) - assert cache.get(term) == term - - assert cache.delete(term) == :ok - refute cache.get(term) - end - end - end - end -end diff --git a/test/shared/cache/entry_test.exs b/test/shared/cache/entry_test.exs deleted file mode 100644 index f1e045c2..00000000 --- a/test/shared/cache/entry_test.exs +++ /dev/null @@ -1,385 +0,0 @@ -defmodule Nebulex.Cache.EntryTest do - import Nebulex.CacheCase - - deftests do - describe "put/3" do - test "puts the given entry into the cache", %{cache: cache} do - for x <- 1..4, do: assert(cache.put(x, x) == :ok) - - assert cache.get(1) == 1 - assert cache.get(2) == 2 - - for x <- 3..4, do: assert(cache.put(x, x * x) == :ok) - assert cache.get(3) == 9 - assert cache.get(4) == 16 - end - - test "nil value has not any effect", %{cache: cache} do - assert cache.put("foo", nil) == :ok - refute cache.get("foo") - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put("hello", "world", ttl: "1") - end - end - end - - describe "put_new/3" do - test "puts the given entry into the cache if the key does not exist", %{cache: cache} do - assert cache.put_new("foo", "bar") - assert cache.get("foo") == "bar" - end - - test "do nothing if key does exist already", %{cache: cache} do - :ok = cache.put("foo", "bar") - - refute cache.put_new("foo", "bar bar") - assert cache.get("foo") == "bar" - end - - test "nil value has not any effect", %{cache: cache} do - assert cache.put_new(:mykey, nil) - refute cache.get(:mykey) - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put_new("hello", "world", ttl: "1") - end - end - end - - describe "put_new!/3" do - test "puts the given entry into the cache if the key does not exist", %{cache: cache} do - assert cache.put_new!("hello", "world") - assert cache.get("hello") == "world" - end - - test "raises when the key does exist in cache", %{cache: cache} do - :ok = cache.put("hello", "world") - - message = ~r"key \"hello\" already exists in cache" - - assert_raise Nebulex.KeyAlreadyExistsError, message, fn -> - cache.put_new!("hello", "world world") - end - end - end - - describe "replace/3" do - test "replaces the cached entry with a new value", %{cache: cache} do - refute cache.replace("foo", "bar") - - assert cache.put("foo", "bar") == :ok - assert cache.get("foo") == "bar" - - assert cache.replace("foo", "bar bar") - assert cache.get("foo") == "bar bar" - end - - test "nil value has not any effect", %{cache: cache} do - :ok = cache.put("hello", "world") - - assert cache.replace("hello", nil) - assert cache.get("hello") == "world" - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.replace("hello", "world", ttl: "1") - end - end - end - - describe "replace!/3" do - test "replaces the cached entry with a new value", %{cache: cache} do - :ok = cache.put("foo", "bar") - - assert cache.replace!("foo", "bar bar") - assert cache.get("foo") == "bar bar" - end - - test "raises when the key does not exist in cache", %{cache: cache} do - assert_raise KeyError, fn -> - cache.replace!("foo", "bar") - end - end - end - - describe "put_all/2" do - test "puts the given entries at once", %{cache: cache} do - assert cache.put_all(%{"apples" => 1, "bananas" => 3}) - assert cache.put_all(blueberries: 2, strawberries: 5) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - assert cache.get(:blueberries) == 2 - assert cache.get(:strawberries) == 5 - end - - test "empty list or map has not any effect", %{cache: cache} do - assert cache.put_all([]) - assert cache.put_all(%{}) - assert count = cache.count_all() - assert cache.delete_all() == count - end - - test "puts the given entries using different data types at once", %{cache: cache} do - entries = - Enum.reduce(1..100, %{}, fn elem, acc -> - sample = %{ - elem => elem, - :"atom#{elem}" => elem, - "#{elem}" => elem, - {:tuple, elem} => elem, - <<100, elem>> => elem, - [elem] => elem - } - - Map.merge(acc, sample) - end) - - assert cache.put_all(entries) == :ok - for {k, v} <- entries, do: assert(cache.get(k) == v) - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") - end - end - end - - describe "put_new_all/2" do - test "puts the given entries only if none of the keys does exist already", %{cache: cache} do - assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - - refute cache.put_new_all(%{"apples" => 3, "oranges" => 1}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - refute cache.get("oranges") - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") - end - end - end - - describe "get/2" do - test "retrieves a cached entry", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.get(x) == x - end - end - - test "returns nil if key does not exist in cache", %{cache: cache} do - refute cache.get("non-existent") - end - end - - describe "get!/2" do - test "retrieves a cached entry", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.get!(x) == x - end - end - - test "raises when the key does not exist in cache", %{cache: cache} do - assert_raise KeyError, fn -> - cache.get!("non-existent") - end - end - end - - describe "get_all/2" do - test "returns a map with the given keys", %{cache: cache} do - assert cache.put_all(a: 1, c: 3) - assert cache.get_all([:a, :b, :c]) == %{a: 1, c: 3} - assert cache.delete_all() == 2 - end - - test "returns an empty map when none of the given keys is in cache", %{cache: cache} do - assert map_size(cache.get_all(["foo", "bar", 1, :a])) == 0 - end - - test "returns an empty map when the given key list is empty", %{cache: cache} do - assert map_size(cache.get_all([])) == 0 - end - end - - describe "delete/2" do - test "deletes the given key", %{cache: cache} do - for x <- 1..3, do: cache.put(x, x * 2) - - assert cache.get(1) == 2 - assert cache.delete(1) == :ok - refute cache.get(1) - - assert cache.get(2) == 4 - assert cache.get(3) == 6 - - assert cache.delete(:non_existent) == :ok - refute cache.get(:non_existent) - end - end - - describe "take/2" do - test "returns the given key and removes it from cache", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.take(x) == x - refute cache.take(x) - end - end - - test "returns nil if the key does not exist in cache", %{cache: cache} do - refute cache.take(:non_existent) - refute cache.take(nil) - end - end - - describe "take!/2" do - test "returns the given key and removes it from cache", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.take!(1) == 1 - end - - test "raises when the key does not exist in cache", %{cache: cache} do - assert_raise KeyError, fn -> - cache.take!(:non_existent) - end - - assert_raise KeyError, fn -> - cache.take!(nil) - end - end - end - - describe "has_key?/1" do - test "returns true if key does exist in cache", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.has_key?(x) - end - end - - test "returns false if key does not exist in cache", %{cache: cache} do - refute cache.has_key?(:non_existent) - refute cache.has_key?(nil) - end - end - - describe "update/4" do - test "updates an entry under a key applying a function on the value", %{cache: cache} do - :ok = cache.put("foo", "123") - :ok = cache.put("bar", "foo") - - assert cache.update("foo", 1, &String.to_integer/1) == 123 - assert cache.update("bar", "init", &String.to_atom/1) == :foo - end - - test "creates the entry with the default value if key does not exist", %{cache: cache} do - assert cache.update("foo", "123", &Integer.to_string/1) == "123" - end - - test "has not any effect if the given value is nil", %{cache: cache} do - refute cache.update("bar", nil, &Integer.to_string/1) - refute cache.get("bar") - end - end - - describe "incr/3" do - test "increments a counter by the given amount", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter) == 2 - assert cache.incr(:counter, 2) == 4 - assert cache.incr(:counter, 3) == 7 - assert cache.incr(:counter, 0) == 7 - - assert :counter |> cache.get() |> to_int() == 7 - - assert cache.incr(:counter, -1) == 6 - assert cache.incr(:counter, -1) == 5 - assert cache.incr(:counter, -2) == 3 - assert cache.incr(:counter, -3) == 0 - end - - test "increments a counter by the given amount with default", %{cache: cache} do - assert cache.incr(:counter1, 1, default: 10) == 11 - assert cache.incr(:counter2, 2, default: 10) == 12 - assert cache.incr(:counter3, -2, default: 10) == 8 - end - - test "increments a counter by the given amount ignoring the default", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter, 1, default: 10) == 2 - assert cache.incr(:counter, -1, default: 100) == 1 - end - - test "raises when amount is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected amount to be an integer", fn -> - cache.incr(:counter, "foo") - end - end - - test "raises when default is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected default: to be an integer", fn -> - cache.incr(:counter, 1, default: :invalid) - end - end - end - - describe "decr/3" do - test "decrements a counter by the given amount", %{cache: cache} do - assert cache.decr(:counter) == -1 - assert cache.decr(:counter) == -2 - assert cache.decr(:counter, 2) == -4 - assert cache.decr(:counter, 3) == -7 - assert cache.decr(:counter, 0) == -7 - - assert :counter |> cache.get() |> to_int() == -7 - - assert cache.decr(:counter, -1) == -6 - assert cache.decr(:counter, -1) == -5 - assert cache.decr(:counter, -2) == -3 - assert cache.decr(:counter, -3) == 0 - end - - test "decrements a counter by the given amount with default", %{cache: cache} do - assert cache.decr(:counter1, 1, default: 10) == 9 - assert cache.decr(:counter2, 2, default: 10) == 8 - assert cache.decr(:counter3, -2, default: 10) == 12 - end - - test "decrements a counter by the given amount ignoring the default", %{cache: cache} do - assert cache.decr(:counter) == -1 - assert cache.decr(:counter, 1, default: 10) == -2 - assert cache.decr(:counter, -1, default: 100) == -1 - end - - test "raises when amount is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected amount to be an integer", fn -> - cache.decr(:counter, "foo") - end - end - - test "raises when default is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected default: to be an integer", fn -> - cache.decr(:counter, 1, default: :invalid) - end - end - end - - ## Helpers - - defp to_int(data) when is_integer(data), do: data - defp to_int(data) when is_binary(data), do: String.to_integer(data) - end -end diff --git a/test/shared/cache/kv_error_test.exs b/test/shared/cache/kv_error_test.exs new file mode 100644 index 00000000..85851e61 --- /dev/null +++ b/test/shared/cache/kv_error_test.exs @@ -0,0 +1,196 @@ +defmodule Nebulex.Cache.KVErrorTest do + import Nebulex.CacheCase + + deftests do + import Nebulex.CacheCase, only: [assert_error_module: 2, assert_error_reason: 2] + + describe "put/3" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put("hello", "world") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_new/3" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put_new("hello", "world") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_new!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.put_new!("hello", "world") + end + end + end + + describe "replace/3" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.replace("hello", "world") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "replace!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.replace!("hello", "world") + end + end + end + + describe "put_all/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put_all(%{"apples" => 1, "bananas" => 3}) + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_all!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.put_all!(other: 1) + end + end + end + + describe "put_new_all/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put_new_all(%{"apples" => 1, "bananas" => 3}) + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_new_all!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.put_new_all!(other: 1) + end + end + end + + describe "fetch/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.fetch(1) + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "fetch!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.fetch!("raise") + end + end + end + + describe "get/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.get("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "get!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.get!("raise") + end + end + end + + describe "delete/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.delete("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "delete!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.delete!("raise") + end + end + end + + describe "take/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.take("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "take!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.take!("raise") + end + end + end + + describe "has_key?/1" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.has_key?("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "update!/4" do + test "raises because put error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.update!("error", 1, &String.to_integer/1) + end + end + + test "raises because fetch error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.update!("error", 1, &String.to_integer/1) + end + end + end + + describe "incr!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.incr!(:raise) + end + end + end + + describe "decr!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.decr!(:raise) + end + end + end + end +end diff --git a/test/shared/cache/kv_expiration_error_test.exs b/test/shared/cache/kv_expiration_error_test.exs new file mode 100644 index 00000000..9c9e042d --- /dev/null +++ b/test/shared/cache/kv_expiration_error_test.exs @@ -0,0 +1,21 @@ +defmodule Nebulex.Cache.KVExpirationErrorTest do + import Nebulex.CacheCase + + deftests do + describe "expire!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.expire!(:raise, 100) + end + end + end + + describe "touch!/1" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.touch!(:raise) + end + end + end + end +end diff --git a/test/shared/cache/kv_expiration_test.exs b/test/shared/cache/kv_expiration_test.exs new file mode 100644 index 00000000..625a50c3 --- /dev/null +++ b/test/shared/cache/kv_expiration_test.exs @@ -0,0 +1,249 @@ +defmodule Nebulex.Cache.KVExpirationTest do + import Nebulex.CacheCase + + deftests do + describe "ttl option is given to" do + test "put", %{cache: cache} do + assert cache.put!("foo", "bar", ttl: 500) == :ok + assert cache.has_key?("foo") == {:ok, true} + + :ok = Process.sleep(600) + + assert cache.has_key?("foo") == {:ok, false} + end + + test "put_all", %{cache: cache} do + entries = [{0, nil} | for(x <- 1..3, do: {x, x})] + + assert cache.put_all!(entries, ttl: 1000) == :ok + + refute cache.get!(0) + + for x <- 1..3, do: assert(cache.fetch!(x) == x) + + :ok = Process.sleep(1200) + + for x <- 1..3, do: refute(cache.get!(x)) + end + + test "put_new_all", %{cache: cache} do + assert cache.put_new_all!(%{"apples" => 1, "bananas" => 3}, ttl: 1000) == true + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + + assert cache.put_new_all!(%{"apples" => 3, "oranges" => 1}) == false + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + refute cache.get!("oranges") + + :ok = Process.sleep(1200) + + refute cache.get!("apples") + refute cache.get!("bananas") + end + + test "take", %{cache: cache} do + :ok = cache.put!("foo", "bar", ttl: 500) + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.take("foo") + end + + test "take!", %{cache: cache} do + :ok = cache.put!(1, 1, ttl: 100) + + :ok = Process.sleep(500) + + assert_raise Nebulex.KeyError, ~r"key 1", fn -> + cache.take!(1) + end + end + + test "incr! (initializes default value if ttl is expired)", %{cache: cache} do + assert cache.incr!(:counter, 1, ttl: 200) == 1 + assert cache.incr!(:counter) == 2 + + :ok = Process.sleep(210) + + assert cache.incr!(:counter, 1, ttl: 200) == 1 + assert cache.incr!(:counter) == 2 + end + end + + describe "ttl!/1" do + test "returns the remaining ttl for the given key", %{cache: cache} do + assert cache.put!(:a, 1, ttl: 500) == :ok + assert cache.ttl!(:a) > 0 + assert cache.put!(:b, 2) == :ok + + :ok = Process.sleep(10) + + assert cache.ttl!(:a) > 0 + assert cache.ttl!(:b) == :infinity + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: :a}} = cache.ttl(:a) + assert cache.ttl!(:b) == :infinity + end + + test "raises Nebulex.KeyError if key does not exist", %{cache: cache} do + msg = ~r|key :non_existent not found| + + assert_raise Nebulex.KeyError, msg, fn -> + cache.ttl!(:non_existent) + end + end + end + + describe "expire!/2" do + test "alters the expiration time for the given key", %{cache: cache} do + assert cache.put!(:a, 1, ttl: 500) == :ok + assert cache.ttl!(:a) > 0 + + assert cache.expire!(:a, 1000) == true + assert cache.ttl!(:a) > 100 + + assert cache.expire!(:a, :infinity) == true + assert cache.ttl!(:a) == :infinity + end + + test "returns false if key does not exist", %{cache: cache} do + assert cache.expire!(:non_existent, 100) == false + end + + test "raises when ttl is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"expected ttl to be a valid timeout", fn -> + cache.expire!(:a, "hello") + end + end + end + + describe "touch!/1" do + test "updates the last access time for the given entry", %{cache: cache} do + assert cache.put!(:touch, 1, ttl: 1000) == :ok + + :ok = Process.sleep(100) + + assert cache.touch!(:touch) == true + + :ok = Process.sleep(200) + + assert cache.touch!(:touch) == true + assert cache.fetch!(:touch) == 1 + + :ok = Process.sleep(1100) + + refute cache.get!(:touch) + end + + test "returns false if key does not exist", %{cache: cache} do + assert cache.touch!(:non_existent) == false + end + end + + describe "expiration" do + test "single entry put with ttl", %{cache: cache} do + assert cache.put!(1, 11, ttl: 1000) == :ok + assert cache.fetch!(1) == 11 + + for _ <- 3..1 do + assert cache.ttl!(1) > 0 + + Process.sleep(200) + end + + :ok = Process.sleep(500) + + assert {:error, %Nebulex.KeyError{key: 1}} = cache.ttl(1) + assert cache.put!(1, 11, ttl: 1000) == :ok + assert cache.ttl!(1) > 0 + end + + test "multiple entries put with ttl", %{cache: cache} do + assert cache.put!(1, 11, ttl: 1000) == :ok + assert cache.fetch!(1) == 11 + + :ok = Process.sleep(10) + + assert cache.fetch!(1) == 11 + + :ok = Process.sleep(1100) + + refute cache.get!(1) + + ops = [ + put!: ["foo", "bar", [ttl: 1000]], + put_all!: [[{"foo", "bar"}], [ttl: 1000]] + ] + + for {action, args} <- ops do + assert apply(cache, action, args) == :ok + + :ok = Process.sleep(10) + + assert cache.fetch!("foo") == "bar" + + :ok = Process.sleep(1200) + + refute cache.get!("foo") + end + end + end + + describe "get_and_update with ttl" do + test "existing entry", %{cache: cache} do + assert cache.put!(1, 1, ttl: 1000) == :ok + assert cache.ttl!(1) > 0 + + :ok = Process.sleep(10) + + assert cache.get_and_update!(1, &cache.get_and_update_fun/1) == {1, 2} + assert cache.ttl!(1) == :infinity + + :ok = Process.sleep(1200) + + assert cache.fetch!(1) == 2 + end + end + + describe "update with ttl" do + test "existing entry", %{cache: cache} do + assert cache.put!(1, 1, ttl: 1000) == :ok + assert cache.ttl!(1) > 0 + + :ok = Process.sleep(10) + + assert cache.update!(1, 10, &Integer.to_string/1) == "1" + assert cache.ttl!(1) == :infinity + + :ok = Process.sleep(1200) + + assert cache.fetch!(1) == "1" + end + end + + describe "incr with ttl" do + test "increments a counter", %{cache: cache} do + assert cache.incr!(:counter, 1, ttl: 1000) == 1 + assert cache.ttl!(:counter) > 0 + + :ok = Process.sleep(1200) + + refute cache.get!(:counter) + end + + test "increments a counter and then set ttl", %{cache: cache} do + assert cache.incr!(:counter, 1) == 1 + assert cache.ttl!(:counter) == :infinity + + assert cache.expire!(:counter, 500) == true + + :ok = Process.sleep(600) + + refute cache.get!(:counter) + end + end + end +end diff --git a/test/shared/cache/kv_prop_test.exs b/test/shared/cache/kv_prop_test.exs new file mode 100644 index 00000000..1b25031c --- /dev/null +++ b/test/shared/cache/kv_prop_test.exs @@ -0,0 +1,33 @@ +defmodule Nebulex.Cache.KVPropTest do + import Nebulex.CacheCase + + deftests do + use ExUnitProperties + + describe "key/value entries" do + property "any term", %{cache: cache} do + check all term <- term() do + refute cache.get!(term) + + assert cache.replace!(term, term) == false + assert cache.put!(term, term) == :ok + assert cache.put_new!(term, term) == false + assert cache.fetch!(term) == term + + assert cache.replace!(term, "replaced") == true + assert cache.fetch!(term) == "replaced" + + assert cache.take!(term) == "replaced" + assert {:error, %Nebulex.KeyError{key: key}} = cache.take(term) + assert key == term + + assert cache.put_new!(term, term) == true + assert cache.fetch!(term) == term + + assert cache.delete!(term) == :ok + refute cache.get!(term) + end + end + end + end +end diff --git a/test/shared/cache/kv_test.exs b/test/shared/cache/kv_test.exs new file mode 100644 index 00000000..9067028a --- /dev/null +++ b/test/shared/cache/kv_test.exs @@ -0,0 +1,573 @@ +defmodule Nebulex.Cache.KVTest do + import Nebulex.CacheCase + + deftests do + describe "put/3" do + test "puts the given entry into the cache", %{cache: cache} do + for x <- 1..4, do: assert(cache.put(x, x) == :ok) + + assert cache.fetch!(1) == 1 + assert cache.fetch!(2) == 2 + + for x <- 3..4, do: assert(cache.put(x, x * x) == :ok) + + assert cache.fetch!(3) == 9 + assert cache.fetch!(4) == 16 + end + + test "puts a nil value", %{cache: cache} do + assert cache.put("foo", nil) == :ok + assert cache.fetch("foo") == {:ok, nil} + end + + test "puts a boolean value", %{cache: cache} do + assert cache.put(:boolean, true) == :ok + assert cache.fetch(:boolean) == {:ok, true} + + assert cache.put(:boolean, false) == :ok + assert cache.fetch(:boolean) == {:ok, false} + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put("hello", "world", ttl: "1") + end + end + + test "with dynamic_cache", %{cache: cache} = ctx do + if name = Map.get(ctx, :name) do + assert cache.put(name, "foo", "bar", []) == :ok + assert cache.fetch!(name, "foo", []) == "bar" + assert cache.delete(name, "foo", []) == :ok + end + end + + test "with dynamic_cache raises an exception", %{cache: cache} do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + cache.put!(:invalid, "foo", "bar", []) + end + end + end + + describe "put!/3" do + test "puts the given entry into the cache", %{cache: cache} do + for x <- 1..4, do: assert(cache.put!(x, x) == :ok) + + assert cache.fetch!(1) == 1 + assert cache.fetch!(2) == 2 + + for x <- 3..4, do: assert(cache.put!(x, x * x) == :ok) + + assert cache.fetch!(3) == 9 + assert cache.fetch!(4) == 16 + end + end + + describe "put_new/3" do + test "puts the given entry into the cache if the key does not exist", %{cache: cache} do + assert cache.put_new("foo", "bar") == {:ok, true} + assert cache.fetch!("foo") == "bar" + end + + test "do nothing if key does exist already", %{cache: cache} do + :ok = cache.put("foo", "bar") + + assert cache.put_new("foo", "bar bar") == {:ok, false} + assert cache.fetch!("foo") == "bar" + end + + test "puts a new nil value", %{cache: cache} do + assert cache.put_new(:mykey, nil) == {:ok, true} + assert cache.fetch(:mykey) == {:ok, nil} + end + + test "puts a boolean value", %{cache: cache} do + assert cache.put_new(true, true) == {:ok, true} + assert cache.fetch(true) == {:ok, true} + + assert cache.put_new(false, false) == {:ok, true} + assert cache.fetch(false) == {:ok, false} + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put_new("hello", "world", ttl: "1") + end + end + end + + describe "put_new!/3" do + test "puts the given entry into the cache if the key does not exist", %{cache: cache} do + assert cache.put_new!("hello", "world") == true + assert cache.fetch!("hello") == "world" + end + + test "raises false if the key does exist already", %{cache: cache} do + assert cache.put_new!("hello", "world") == true + assert cache.put_new!("hello", "world") == false + end + end + + describe "replace/3" do + test "replaces the cached entry with a new value", %{cache: cache} do + assert cache.replace("foo", "bar") == {:ok, false} + + assert cache.put("foo", "bar") == :ok + assert cache.fetch!("foo") == "bar" + + assert cache.replace("foo", "bar bar") == {:ok, true} + assert cache.fetch!("foo") == "bar bar" + end + + test "existing value with nil", %{cache: cache} do + :ok = cache.put("hello", "world") + + assert cache.replace("hello", nil) == {:ok, true} + assert cache.fetch("hello") == {:ok, nil} + end + + test "existing boolean value", %{cache: cache} do + :ok = cache.put(:boolean, true) + + assert cache.replace(:boolean, false) == {:ok, true} + assert cache.fetch(:boolean) == {:ok, false} + + assert cache.replace(:boolean, true) == {:ok, true} + assert cache.fetch(:boolean) == {:ok, true} + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.replace("hello", "world", ttl: "1") + end + end + end + + describe "replace!/3" do + test "replaces the cached entry with a new value", %{cache: cache} do + assert cache.put("foo", "bar") == :ok + assert cache.replace!("foo", "bar bar") == true + assert cache.fetch!("foo") == "bar bar" + end + + test "returns false when the key is not found", %{cache: cache} do + assert cache.replace!("foo", "bar") == false + end + end + + describe "put_all/2" do + test "puts the given entries at once", %{cache: cache} do + assert cache.put_all(%{"apples" => 1, "bananas" => 3}) == :ok + assert cache.put_all(blueberries: 2, strawberries: 5) == :ok + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + assert cache.fetch!(:blueberries) == 2 + assert cache.fetch!(:strawberries) == 5 + end + + test "empty list or map has not any effect", %{cache: cache} do + assert cache.put_all([]) == :ok + assert cache.put_all(%{}) == :ok + + assert count = cache.count_all() + assert cache.delete_all() == count + end + + test "puts the given entries using different data types at once", %{cache: cache} do + entries = + Enum.reduce(1..100, %{}, fn elem, acc -> + sample = %{ + elem => elem, + :"atom#{elem}" => elem, + "#{elem}" => elem, + {:tuple, elem} => elem, + <<100, elem>> => elem, + [elem] => elem, + true => true, + false => false + } + + Map.merge(acc, sample) + end) + + assert cache.put_all(entries) == :ok + + for {k, v} <- entries, do: assert(cache.fetch!(k) == v) + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") + end + end + end + + describe "put_all!/2" do + test "puts the given entries at once", %{cache: cache} do + assert cache.put_all!(%{"apples" => 1, "bananas" => 3}) == :ok + assert cache.put_all!(blueberries: 2, strawberries: 5) == :ok + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + assert cache.fetch!(:blueberries) == 2 + assert cache.fetch!(:strawberries) == 5 + end + end + + describe "put_new_all/2" do + test "puts the given entries only if none of the keys does exist already", %{cache: cache} do + assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}) == {:ok, true} + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + + assert cache.put_new_all(%{"apples" => 3, "oranges" => 1}) == {:ok, false} + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + refute cache.get!("oranges") + end + + test "puts a boolean values", %{cache: cache} do + assert cache.put_new_all(%{true => true, false => false}) == {:ok, true} + assert cache.fetch!(true) == true + assert cache.fetch!(false) == false + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put_new_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") + end + end + end + + describe "put_new_all!/2" do + test "puts the given entries only if none of the keys does exist already", %{cache: cache} do + assert cache.put_new_all!(%{"apples" => 1, "bananas" => 3}) == true + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + end + + test "raises an error if any of the keys does exist already", %{cache: cache} do + assert cache.put_new_all!(%{"apples" => 1, "bananas" => 3}) == true + assert cache.put_new_all!(%{"apples" => 3, "oranges" => 1}) == false + end + end + + describe "fetch/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.fetch(x) == {:ok, x} + end + end + + test "returns {:error, :not_found} if key does not exist in cache", %{cache: cache} do + assert {:error, %Nebulex.KeyError{key: "non-existent"}} = cache.fetch("non-existent") + end + end + + describe "fetch!/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.fetch!(x) == x + end + end + + test "raises when the key does not exist in cache", %{cache: cache} do + msg = ~r|key "non-existent" not found| + + assert_raise Nebulex.KeyError, msg, fn -> + cache.fetch!("non-existent") + end + end + end + + describe "get/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.get(x) == {:ok, x} + end + end + + test "returns default if key does not exist in cache", %{cache: cache} do + assert cache.get("non-existent") == {:ok, nil} + assert cache.get("non-existent", "default") == {:ok, "default"} + end + end + + describe "get!/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.get!(x) == x + end + end + + test "returns default if key does not exist in cache", %{cache: cache} do + refute cache.get!("non-existent") + assert cache.get!("non-existent", "default") == "default" + end + end + + describe "delete/2" do + test "deletes the given key", %{cache: cache} do + for x <- 1..3, do: cache.put(x, x * 2) + + assert cache.fetch!(1) == 2 + assert cache.delete(1) == :ok + refute cache.get!(1) + + assert cache.fetch!(2) == 4 + assert cache.fetch!(3) == 6 + + assert cache.delete(:non_existent) == :ok + refute cache.get!(:non_existent) + end + + test "deletes boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.fetch!(true) == true + assert cache.fetch!(false) == false + assert cache.fetch!(nil) == nil + + assert cache.delete(true) == :ok + assert cache.delete(false) == :ok + assert cache.delete(nil) == :ok + + refute cache.get!(true) + refute cache.get!(false) + refute cache.get!(nil) + end + end + + describe "delete!/2" do + test "deletes the given key", %{cache: cache} do + assert cache.put("foo", "bar") == :ok + + assert cache.fetch!("foo") == "bar" + assert cache.delete!("foo") == :ok + refute cache.get!("foo") + end + end + + describe "take/2" do + test "returns the given key and removes it from cache", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.take(x) == {:ok, x} + assert {:error, %Nebulex.KeyError{key: ^x}} = cache.take(x) + end + end + + test "returns boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.take(true) == {:ok, true} + assert cache.take(false) == {:ok, false} + assert cache.take(nil) == {:ok, nil} + + refute cache.get!(true) + refute cache.get!(false) + refute cache.get!(nil) + end + + test "returns nil if the key does not exist in cache", %{cache: cache} do + assert {:error, %Nebulex.KeyError{key: :non_existent}} = cache.take(:non_existent) + assert {:error, %Nebulex.KeyError{key: nil}} = cache.take(nil) + end + end + + describe "take!/2" do + test "returns the given key and removes it from cache", %{cache: cache} do + assert cache.put(1, 1) == :ok + assert cache.take!(1) == 1 + assert cache.get!(1) == nil + end + + test "raises when the key does not exist in cache", %{cache: cache} do + msg = ~r|key "non-existent" not found| + + assert_raise Nebulex.KeyError, msg, fn -> + cache.take!("non-existent") + end + end + end + + describe "has_key?/1" do + test "returns true if key does exist in cache", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.has_key?(x) == {:ok, true} + end + end + + test "returns boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.has_key?(true) == {:ok, true} + assert cache.has_key?(false) == {:ok, true} + assert cache.has_key?(nil) == {:ok, true} + end + + test "returns false if key does not exist in cache", %{cache: cache} do + assert cache.has_key?(:non_existent) == {:ok, false} + assert cache.has_key?(nil) == {:ok, false} + end + end + + describe "update!/4" do + test "updates an entry under a key applying a function on the value", %{cache: cache} do + :ok = cache.put("foo", "123") + :ok = cache.put("bar", "foo") + + assert cache.update!("foo", 1, &String.to_integer/1) == 123 + assert cache.update!("bar", "init", &String.to_atom/1) == :foo + end + + test "creates the entry with the default value if key does not exist", %{cache: cache} do + assert cache.update!("foo", "123", &Integer.to_string/1) == "123" + end + + test "updates existing value with nil", %{cache: cache} do + assert cache.update!("bar", nil, &Integer.to_string/1) == nil + assert cache.fetch!("bar") == nil + end + + test "raises because the cache is not started", %{cache: cache} do + :ok = cache.stop() + + assert_raise Nebulex.Error, fn -> + cache.update!("error", 1, &String.to_integer/1) + end + end + end + + describe "incr/3" do + test "increments a counter by the given amount", %{cache: cache} do + assert cache.incr(:counter) == {:ok, 1} + assert cache.incr(:counter) == {:ok, 2} + assert cache.incr(:counter, 2) == {:ok, 4} + assert cache.incr(:counter, 3) == {:ok, 7} + assert cache.incr(:counter, 0) == {:ok, 7} + + assert :counter |> cache.fetch!() |> to_int() == 7 + + assert cache.incr(:counter, -1) == {:ok, 6} + assert cache.incr(:counter, -1) == {:ok, 5} + assert cache.incr(:counter, -2) == {:ok, 3} + assert cache.incr(:counter, -3) == {:ok, 0} + end + + test "increments a counter by the given amount with default", %{cache: cache} do + assert cache.incr(:counter1, 1, default: 10) == {:ok, 11} + assert cache.incr(:counter2, 2, default: 10) == {:ok, 12} + assert cache.incr(:counter3, -2, default: 10) == {:ok, 8} + end + + test "increments a counter by the given amount ignoring the default", %{cache: cache} do + assert cache.incr(:counter) == {:ok, 1} + assert cache.incr(:counter, 1, default: 10) == {:ok, 2} + assert cache.incr(:counter, -1, default: 100) == {:ok, 1} + end + + test "raises when amount is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for amount argument", fn -> + cache.incr(:counter, "foo") + end + end + + test "raises when default is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :default option: expected integer", fn -> + cache.incr(:counter, 1, default: :invalid) + end + end + end + + describe "incr!/3" do + test "increments a counter by the given amount", %{cache: cache} do + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter) == 2 + assert cache.incr!(:counter, 2) == 4 + assert cache.incr!(:counter, 3) == 7 + assert cache.incr!(:counter, 0) == 7 + + assert :counter |> cache.fetch!() |> to_int() == 7 + + assert cache.incr!(:counter, -1) == 6 + assert cache.incr!(:counter, -1) == 5 + assert cache.incr!(:counter, -2) == 3 + assert cache.incr!(:counter, -3) == 0 + end + end + + describe "decr/3" do + test "decrements a counter by the given amount", %{cache: cache} do + assert cache.decr(:counter) == {:ok, -1} + assert cache.decr(:counter) == {:ok, -2} + assert cache.decr(:counter, 2) == {:ok, -4} + assert cache.decr(:counter, 3) == {:ok, -7} + assert cache.decr(:counter, 0) == {:ok, -7} + + assert :counter |> cache.fetch!() |> to_int() == -7 + + assert cache.decr(:counter, -1) == {:ok, -6} + assert cache.decr(:counter, -1) == {:ok, -5} + assert cache.decr(:counter, -2) == {:ok, -3} + assert cache.decr(:counter, -3) == {:ok, 0} + end + + test "decrements a counter by the given amount with default", %{cache: cache} do + assert cache.decr(:counter1, 1, default: 10) == {:ok, 9} + assert cache.decr(:counter2, 2, default: 10) == {:ok, 8} + assert cache.decr(:counter3, -2, default: 10) == {:ok, 12} + end + + test "decrements a counter by the given amount ignoring the default", %{cache: cache} do + assert cache.decr(:counter) == {:ok, -1} + assert cache.decr(:counter, 1, default: 10) == {:ok, -2} + assert cache.decr(:counter, -1, default: 100) == {:ok, -1} + end + + test "raises when amount is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for amount argument", fn -> + cache.decr(:counter, "foo") + end + end + + test "raises when default is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :default option: expected integer", fn -> + cache.decr(:counter, 1, default: :invalid) + end + end + end + + describe "decr!/3" do + test "decrements a counter by the given amount", %{cache: cache} do + assert cache.decr!(:counter) == -1 + assert cache.decr!(:counter) == -2 + assert cache.decr!(:counter, 2) == -4 + assert cache.decr!(:counter, 3) == -7 + assert cache.decr!(:counter, 0) == -7 + + assert :counter |> cache.fetch!() |> to_int() == -7 + + assert cache.decr!(:counter, -1) == -6 + assert cache.decr!(:counter, -1) == -5 + assert cache.decr!(:counter, -2) == -3 + assert cache.decr!(:counter, -3) == 0 + end + end + + ## Helpers + + defp to_int(data) when is_integer(data), do: data + defp to_int(data) when is_binary(data), do: String.to_integer(data) + end +end diff --git a/test/shared/cache/persistence_error_test.exs b/test/shared/cache/persistence_error_test.exs index cd246e53..af997963 100644 --- a/test/shared/cache/persistence_error_test.exs +++ b/test/shared/cache/persistence_error_test.exs @@ -2,12 +2,43 @@ defmodule Nebulex.Cache.PersistenceErrorTest do import Nebulex.CacheCase deftests "persistence error" do - test "dump: invalid path", %{cache: cache} do - assert cache.dump("/invalid/path") == {:error, :enoent} + test "dump/2 fails because invalid path", %{cache: cache} do + assert cache.dump("/invalid/path") == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + opts: [cache: cache], + reason: %File.Error{action: "open", path: "/invalid/path", reason: :enoent} + }} end - test "load: invalid path", %{cache: cache} do - assert cache.load("wrong_file") == {:error, :enoent} + test "dump!/2 raises because invalid path", %{cache: cache} do + err = """ + the following exception occurred when executing a command. + + ** (File.Error) could not open \"/invalid/path\": no such file or directory + + """ + + assert_raise Nebulex.Error, err, fn -> + cache.dump!("/invalid/path") + end + end + + test "load/2 error because invalid path", %{cache: cache} do + assert cache.load("wrong_file") == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + opts: [cache: cache], + reason: %File.Error{action: "open", path: "wrong_file", reason: :enoent} + }} + end + + test "load!/2 raises because invalid path", %{cache: cache} do + assert_raise Nebulex.Error, ~r|could not open "wrong_file": no such file|, fn -> + cache.load!("wrong_file") + end end end end diff --git a/test/shared/cache/persistence_test.exs b/test/shared/cache/persistence_test.exs index 5d77b954..7375a10f 100644 --- a/test/shared/cache/persistence_test.exs +++ b/test/shared/cache/persistence_test.exs @@ -2,36 +2,36 @@ defmodule Nebulex.Cache.PersistenceTest do import Nebulex.CacheCase deftests "persistence" do - test "dump and load", %{cache: cache} do + test "dump and load", %{cache: cache} = attrs do tmp = System.tmp_dir!() - path = "#{tmp}/#{cache}" + path = "#{tmp}/#{attrs[:name] || cache}" try do - assert cache.count_all() == 0 + assert cache.count_all!() == 0 assert cache.dump(path) == :ok assert File.exists?(path) assert cache.load(path) == :ok - assert cache.count_all() == 0 + assert cache.count_all!() == 0 count = 100 unexpired = for x <- 1..count, into: %{}, do: {x, x} assert cache.put_all(unexpired) == :ok assert cache.put_all(%{a: 1, b: 2}, ttl: 10) == :ok - assert cache.put_all(%{c: 1, d: 2}, ttl: 3_600_000) == :ok - assert cache.count_all() == count + 4 + assert cache.put_all(%{c: 1, d: 2}, ttl: :timer.hours(1)) == :ok + assert cache.count_all!() == count + 4 :ok = Process.sleep(1000) assert cache.dump(path) == :ok assert File.exists?(path) - assert cache.delete_all() == count + 4 - assert cache.count_all() == 0 + assert cache.delete_all!() == count + 4 + assert cache.count_all!() == 0 assert cache.load(path) == :ok - assert cache.get_all(1..count) == unexpired - assert cache.get_all([:a, :b, :c, :d]) == %{c: 1, d: 2} - assert cache.count_all() == count + 2 + assert cache.get_all!(in: Enum.to_list(1..count)) |> Map.new() == unexpired + assert cache.get_all!(in: [:a, :b, :c, :d]) |> Map.new() == %{c: 1, d: 2} + assert cache.count_all!() == count + 2 after File.rm_rf!(path) end diff --git a/test/shared/cache/queryable_expiration_test.exs b/test/shared/cache/queryable_expiration_test.exs new file mode 100644 index 00000000..2318d824 --- /dev/null +++ b/test/shared/cache/queryable_expiration_test.exs @@ -0,0 +1,56 @@ +defmodule Nebulex.Cache.QueryableExpirationTest do + import Nebulex.CacheCase + + deftests do + import Nebulex.CacheCase + + describe "expired entries are not matched and returned" do + test "on: get_all! or stream!", %{cache: cache} do + :ok = cache.put_all!(a: 1, b: 2) + :ok = cache.put(:c, 3, ttl: 1000) + + keys = [:a, :b, :c] + + assert cache.get_all!(select: :key) |> :lists.usort() == keys + assert cache.stream!(select: :key) |> Enum.to_list() |> :lists.usort() == keys + + wait_until(fn -> + assert cache.get_all!(select: :key) |> :lists.usort() == [:a, :b] + + assert stream = cache.stream!(select: :key) + assert stream |> Enum.to_list() |> :lists.usort() == [:a, :b] + end) + end + + test "on: get_all! or stream! [in: keys]", %{cache: cache} do + :ok = cache.put_all!(a: 1, b: 2) + :ok = cache.put(:c, 3, ttl: 1000) + + keys = [:a, :b, :c] + + assert cache.get_all!(in: keys, select: :key) |> :lists.usort() == keys + assert cache.stream!(in: keys, select: :key) |> Enum.to_list() |> :lists.usort() == keys + + wait_until(fn -> + assert cache.get_all!(in: keys, select: :key) |> :lists.usort() == [:a, :b] + + assert stream = cache.stream!(in: keys, select: :key) + assert stream |> Enum.to_list() |> :lists.usort() == [:a, :b] + end) + end + + test "on: delete_all! [in: keys]", %{cache: cache} do + :ok = cache.put_all!(a: 1, b: 2) + :ok = cache.put(:c, 3, ttl: 1000) + + assert cache.count_all!() == 3 + assert cache.delete_all!(in: [:a]) == 1 + assert cache.count_all!() == 2 + + wait_until(fn -> + assert cache.get_all!(select: :key) |> :lists.usort() == [:b] + end) + end + end + end +end diff --git a/test/shared/cache/queryable_test.exs b/test/shared/cache/queryable_test.exs index 22d9c000..a43a32f4 100644 --- a/test/shared/cache/queryable_test.exs +++ b/test/shared/cache/queryable_test.exs @@ -4,88 +4,267 @@ defmodule Nebulex.Cache.QueryableTest do deftests do import Nebulex.CacheCase - describe "all/2" do - test "returns all keys in cache", %{cache: cache} do + describe "get_all/2" do + test "ok: matches all cached entries", %{cache: cache} do set1 = cache_put(cache, 1..50) set2 = cache_put(cache, 51..100) - for x <- 1..100, do: assert(cache.get(x) == x) + for x <- 1..100, do: assert(cache.fetch!(x) == x) + expected = set1 ++ set2 - assert :lists.usort(cache.all()) == expected + assert cache.get_all!() |> :lists.usort() == List.zip([expected, expected]) + assert cache.get_all!(select: :key) |> :lists.usort() == expected + assert cache.get_all!(select: :value) |> :lists.usort() == expected set3 = Enum.to_list(20..60) - :ok = Enum.each(set3, &cache.delete(&1)) + :ok = Enum.each(set3, &cache.delete!(&1)) expected = :lists.usort(expected -- set3) - assert :lists.usort(cache.all()) == expected + assert cache.get_all!() |> :lists.usort() == List.zip([expected, expected]) + assert cache.get_all!(select: :key) |> :lists.usort() == expected + assert cache.get_all!(select: :value) |> :lists.usort() == expected + end + + test "ok: returns an empty list when the cache is empty", %{cache: cache} do + assert cache.get_all!() == [] + end + + test "error: raises an exception because of an invalid query", %{cache: cache} do + assert_raise Nebulex.QueryError, fn -> + cache.get_all(query: :invalid) + end + end + + test "error: invalid option value for query spec", %{cache: cache} do + for opt <- [:select, :in] do + msg = ~r"invalid value for #{inspect(opt)} option" + + assert_raise NimbleOptions.ValidationError, msg, fn -> + cache.get_all([{opt, :invalid}]) + end + end + end + + test "error: unknown option in query spec", %{cache: cache} do + assert_raise NimbleOptions.ValidationError, ~r"unknown options", fn -> + cache.get_all(foo: :bar) + end + end + + test "error: invalid option entry for query spec", %{cache: cache} do + assert_raise ArgumentError, ~r"expected a keyword list, but an entry in the list", fn -> + cache.get_all([:invalid]) + end + end + + test "error: invalid query spec", %{cache: cache} do + msg = ~r"invalid query spec: expected a keyword list, got: :invalid" + + assert_raise ArgumentError, msg, fn -> + cache.get_all(:invalid) + end end end describe "stream/2" do @entries for x <- 1..10, into: %{}, do: {x, x * 2} - test "returns all keys in cache", %{cache: cache} do + test "ok: returns all keys in cache", %{cache: cache} do :ok = cache.put_all(@entries) - assert nil - |> cache.stream() + assert cache.stream!(select: :key) |> Enum.to_list() |> :lists.usort() == Map.keys(@entries) end - test "returns all values in cache", %{cache: cache} do + test "ok: returns all values in cache", %{cache: cache} do :ok = cache.put_all(@entries) - assert nil - |> cache.stream(return: :value, page_size: 3) + assert [select: :value] + |> cache.stream!(max_entries: 2) |> Enum.to_list() |> :lists.usort() == Map.values(@entries) end - test "returns all key/value pairs in cache", %{cache: cache} do + test "ok: returns all key/value pairs in cache", %{cache: cache} do :ok = cache.put_all(@entries) - assert nil - |> cache.stream(return: {:key, :value}, page_size: 3) + assert [select: {:key, :value}] + |> cache.stream!(max_entries: 2) |> Enum.to_list() - |> :lists.usort() == :maps.to_list(@entries) + |> :lists.usort() == Map.to_list(@entries) + end + + test "ok: returns an empty list when the cache is empty", %{cache: cache} do + assert cache.stream!() |> Enum.to_list() == [] end - test "raises when query is invalid", %{cache: cache} do + test "error: raises an exception because of an invalid query", %{cache: cache} do assert_raise Nebulex.QueryError, fn -> - :invalid_query - |> cache.stream() - |> Enum.to_list() + cache.stream!(query: :invalid_query) end end end describe "delete_all/2" do - test "evicts all entries in the cache", %{cache: cache} do + test "ok: evicts all entries in the cache", %{cache: cache} do Enum.each(1..2, fn _ -> entries = cache_put(cache, 1..50) - assert cache.all() |> :lists.usort() |> length() == length(entries) + assert cache.get_all!() |> :lists.usort() |> length() == length(entries) - cached = cache.count_all() - assert cache.delete_all() == cached - assert cache.count_all() == 0 + cached = cache.count_all!() + assert cache.delete_all!() == cached + assert cache.count_all!() == 0 end) end + + test "ok: deleted count is 0 when the cache is empty", %{cache: cache} do + assert cache.delete_all!() == 0 + end + + test "error: raises an exception because of an invalid query", %{cache: cache} do + assert_raise Nebulex.QueryError, fn -> + cache.delete_all(query: :invalid) + end + end end describe "count_all/2" do - test "returns the total number of cached entries", %{cache: cache} do + test "ok: returns the total number of cached entries", %{cache: cache} do for x <- 1..100, do: cache.put(x, x) - total = cache.all() |> length() - assert cache.count_all() == total + total = cache.get_all!() |> length() + assert cache.count_all!() == total + + for x <- 1..50, do: cache.delete!(x) + total = cache.get_all!() |> length() + assert cache.count_all!() == total + + for x <- 51..60, do: assert(cache.fetch!(x) == x) + end + + test "ok: count is 0 when the cache is empty", %{cache: cache} do + assert cache.count_all!() == 0 + end + + test "error: raises an exception because of an invalid query", %{cache: cache} do + assert_raise Nebulex.QueryError, fn -> + cache.count_all(query: :invalid) + end + end + end - for x <- 1..50, do: cache.delete(x) - total = cache.all() |> length() - assert cache.count_all() == total + describe "get_all!/2 - [in: keys]" do + test "ok: returns the entries associated to the requested keys", %{cache: cache} do + assert cache.put_all(a: 1, c: 3) == :ok - for x <- 51..60, do: assert(cache.get(x) == x) + keys = [:a, :b, :c] + + assert cache.get_all!(in: keys) |> Map.new() == %{a: 1, c: 3} + assert cache.get_all!(in: keys, select: :key) |> :lists.usort() == [:a, :c] + assert cache.get_all!(in: keys, select: :value) |> :lists.usort() == [1, 3] + + assert cache.delete_all!() == 2 + end + + test "ok: returns an empty list when none of the given keys is in cache", %{cache: cache} do + assert cache.get_all!(in: ["foo", "bar", 1, :a]) == [] + end + + test "ok: returns an empty list when the given key list is empty", %{cache: cache} do + assert cache.get_all!(in: []) == [] + end + + test "error: raises an exception because invalid query spec", %{cache: cache} do + assert_raise NimbleOptions.ValidationError, ~r"invalid value for :in option", fn -> + cache.get_all!(in: :invalid) + end + end + end + + describe "stream!/2 - [in: keys]" do + test "ok: returns the entries associated to the requested keys", %{cache: cache} do + entries = for x <- 1..10, into: %{}, do: {x, x * 2} + assert cache.put_all(entries) == :ok + + keys = [1, 2, 3, 4, 5, 11, 100] + expected_keys = Map.take(entries, keys) |> Map.keys() + expected_values = Map.take(entries, keys) |> Map.values() + + assert cache.stream!(in: keys) |> Map.new() == Map.take(entries, keys) + + assert cache.stream!(in: keys, select: :key) |> Enum.to_list() |> :lists.usort() == + expected_keys + + assert cache.stream!(in: keys, select: :value) |> Enum.to_list() |> :lists.usort() == + expected_values + + assert cache.delete_all!() == 10 + end + + test "ok: returns an empty list when none of the given keys is in cache", %{cache: cache} do + assert cache.stream!(in: ["foo", "bar", 1, :a]) |> Enum.to_list() == [] + end + + test "ok: returns an empty list when the given key list is empty", %{cache: cache} do + assert cache.stream!(in: []) |> Enum.to_list() == [] + end + + test "error: raises an exception because invalid query spec", %{cache: cache} do + assert_raise NimbleOptions.ValidationError, ~r"invalid value for :in option", fn -> + cache.stream!(in: :invalid) + end + end + end + + describe "count_all!/2 - [in: keys])" do + test "ok: returns the count of the requested keys", %{cache: cache} do + assert cache.put_all(a: 1, c: 3, d: 4) == :ok + + assert cache.count_all!(in: [:a, :b, :c]) == 2 + + assert cache.delete_all!() == 3 + end + + test "ok: returns 0 when none of the given keys is in cache", %{cache: cache} do + assert cache.count_all!(in: ["foo", "bar", 1, :a]) == 0 + end + + test "ok: returns 0 when the given key list is empty", %{cache: cache} do + assert cache.count_all!(in: []) == 0 + end + + test "error: raises an exception because invalid query spec", %{cache: cache} do + assert_raise NimbleOptions.ValidationError, ~r"invalid value for :in option", fn -> + cache.count_all!(in: :invalid) + end + end + end + + describe "delete_all!/2 - [in: keys]" do + test "ok: returns the count of the deleted keys", %{cache: cache} do + assert cache.put_all(a: 1, c: 3, d: 4) == :ok + + assert cache.delete_all!(in: [:a, :b, :c]) == 2 + assert cache.get_all!() == [d: 4] + + assert cache.delete_all!() == 1 + assert cache.get_all!() == [] + end + + test "ok: returns 0 when none of the given keys is in cache", %{cache: cache} do + assert cache.delete_all!(in: ["foo", "bar", 1, :a]) == 0 + end + + test "ok: returns 0 when the given key list is empty", %{cache: cache} do + assert cache.delete_all!(in: []) == 0 + end + + test "error: raises an exception because invalid query spec", %{cache: cache} do + assert_raise NimbleOptions.ValidationError, ~r"invalid value for :in option", fn -> + cache.delete_all!(in: :invalid) + end end end end diff --git a/test/shared/cache/transaction_test.exs b/test/shared/cache/transaction_test.exs index 7a7d9343..b3f0a905 100644 --- a/test/shared/cache/transaction_test.exs +++ b/test/shared/cache/transaction_test.exs @@ -4,57 +4,65 @@ defmodule Nebulex.Cache.TransactionTest do deftests do describe "transaction" do test "ok: single transaction", %{cache: cache} do - refute cache.transaction(fn -> - with :ok <- cache.put(1, 11), - 11 <- cache.get!(1), - :ok <- cache.delete(1) do - cache.get(1) - end - end) + assert cache.transaction(fn -> + :ok = cache.put!(1, 11) + + 11 = cache.fetch!(1) + + :ok = cache.delete!(1) + + cache.get!(1) + end) == {:ok, nil} end test "ok: nested transaction", %{cache: cache} do - refute cache.transaction( - [keys: [1]], + assert cache.transaction( fn -> cache.transaction( - [keys: [2]], fn -> - with :ok <- cache.put(1, 11), - 11 <- cache.get!(1), - :ok <- cache.delete(1) do - cache.get(1) - end - end + :ok = cache.put!(1, 11) + + 11 = cache.fetch!(1) + + :ok = cache.delete!(1) + + cache.get!(1) + end, + keys: [2] ) - end - ) + end, + keys: [1] + ) == {:ok, {:ok, nil}} end test "ok: single transaction with read and write operations", %{cache: cache} do assert cache.put(:test, ["old value"]) == :ok - assert cache.get(:test) == ["old value"] + assert cache.fetch!(:test) == ["old value"] assert cache.transaction( - [keys: [:test]], fn -> - ["old value"] = value = cache.get(:test) - :ok = cache.put(:test, ["new value" | value]) - cache.get(:test) - end - ) == ["new value", "old value"] + ["old value"] = value = cache.fetch!(:test) + + :ok = cache.put!(:test, ["new value" | value]) + + cache.fetch!(:test) + end, + keys: [:test] + ) == {:ok, ["new value", "old value"]} - assert cache.get(:test) == ["new value", "old value"] + assert cache.fetch!(:test) == ["new value", "old value"] end - test "raises exception", %{cache: cache} do + test "error: exception is raised", %{cache: cache} do assert_raise MatchError, fn -> cache.transaction(fn -> - with :ok <- cache.put(1, 11), - 11 <- cache.get!(1), - :ok <- cache.delete(1) do - :ok = cache.get(1) - end + :ok = cache.put!(1, 11) + + 11 = cache.fetch!(1) + + :ok = cache.delete!(1) + + :ok = cache.get(1) end) end end @@ -66,34 +74,39 @@ defmodule Nebulex.Cache.TransactionTest do _ = cache.put_dynamic_cache(name) cache.transaction( - [keys: [key], retries: 1], fn -> :ok = cache.put(key, true) - Process.sleep(2000) - end + + Process.sleep(1100) + end, + keys: [key], + retries: 1 ) end) :ok = Process.sleep(200) - assert_raise RuntimeError, "transaction aborted", fn -> - cache.transaction( - [keys: [key], retries: 1], - fn -> - cache.get(key) - end - ) + assert_raise Nebulex.Error, ~r|transaction aborted|, fn -> + {:error, %Nebulex.Error{} = reason} = + cache.transaction( + fn -> cache.get(key) end, + keys: [key], + retries: 1 + ) + + raise reason end end end describe "in_transaction?" do test "returns true if calling process is already within a transaction", %{cache: cache} do - refute cache.in_transaction?() + assert cache.in_transaction?() == {:ok, false} cache.transaction(fn -> - :ok = cache.put(1, 11, return: :key) - true = cache.in_transaction?() + :ok = cache.put(1, 11) + + assert cache.in_transaction?() == {:ok, true} end) end end diff --git a/test/shared/cache_test.exs b/test/shared/cache_test_case.exs similarity index 58% rename from test/shared/cache_test.exs rename to test/shared/cache_test_case.exs index 792323ca..2c8c6428 100644 --- a/test/shared/cache_test.exs +++ b/test/shared/cache_test_case.exs @@ -1,18 +1,18 @@ -defmodule Nebulex.CacheTest do +defmodule Nebulex.CacheTestCase do @moduledoc """ Shared Tests """ defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest - use Nebulex.Cache.EntryExpirationTest - use Nebulex.Cache.EntryPropTest + use Nebulex.Cache.KVTest + use Nebulex.Cache.KVExpirationTest + use Nebulex.Cache.KVPropTest use Nebulex.Cache.QueryableTest + use Nebulex.Cache.QueryableExpirationTest use Nebulex.Cache.TransactionTest use Nebulex.Cache.PersistenceTest use Nebulex.Cache.PersistenceErrorTest - use Nebulex.Cache.DeprecatedTest end end end diff --git a/test/shared/local_test.exs b/test/shared/local_test.exs deleted file mode 100644 index b45599c0..00000000 --- a/test/shared/local_test.exs +++ /dev/null @@ -1,528 +0,0 @@ -defmodule Nebulex.LocalTest do - import Nebulex.CacheCase - - deftests do - import Ex2ms - import Nebulex.CacheCase - - alias Nebulex.{Adapter, Entry} - - describe "error" do - test "on init because invalid backend", %{cache: cache} do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%RuntimeError{message: msg}, _}} = - cache.start_link(name: :invalid_backend, backend: :xyz) - - assert msg == - "expected backend: option to be one of the supported " <> - "backends [:ets, :shards], got: :xyz" - end - - test "because cache is stopped", %{cache: cache} do - :ok = cache.stop() - - msg = ~r"could not lookup Nebulex cache" - assert_raise Nebulex.RegistryLookupError, msg, fn -> cache.put(1, 13) end - assert_raise Nebulex.RegistryLookupError, msg, fn -> cache.get(1) end - assert_raise Nebulex.RegistryLookupError, msg, fn -> cache.delete(1) end - end - end - - describe "entry:" do - test "get_and_update", %{cache: cache} do - fun = fn - nil -> {nil, 1} - val -> {val, val * 2} - end - - assert cache.get_and_update(1, fun) == {nil, 1} - - assert cache.get_and_update(1, &{&1, &1 * 2}) == {1, 2} - assert cache.get_and_update(1, &{&1, &1 * 3}) == {2, 6} - assert cache.get_and_update(1, &{&1, nil}) == {6, 6} - assert cache.get(1) == 6 - assert cache.get_and_update(1, fn _ -> :pop end) == {6, nil} - assert cache.get_and_update(1, fn _ -> :pop end) == {nil, nil} - assert cache.get_and_update(3, &{&1, 3}) == {nil, 3} - - assert_raise ArgumentError, fn -> - cache.get_and_update(1, fn _ -> :other end) - end - end - - test "incr and update", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter) == 2 - - assert cache.get_and_update(:counter, &{&1, &1 * 2}) == {2, 4} - assert cache.incr(:counter) == 5 - - assert cache.update(:counter, 1, &(&1 * 2)) == 10 - assert cache.incr(:counter, -10) == 0 - - assert cache.put("foo", "bar") == :ok - - assert_raise ArgumentError, fn -> - cache.incr("foo") - end - end - - test "incr with ttl", %{cache: cache} do - assert cache.incr(:counter_with_ttl, 1, ttl: 1000) == 1 - assert cache.incr(:counter_with_ttl) == 2 - assert cache.get(:counter_with_ttl) == 2 - - :ok = Process.sleep(1010) - refute cache.get(:counter_with_ttl) - - assert cache.incr(:counter_with_ttl, 1, ttl: 5000) == 1 - assert cache.ttl(:counter_with_ttl) > 1000 - - assert cache.expire(:counter_with_ttl, 500) - :ok = Process.sleep(600) - refute cache.get(:counter_with_ttl) - end - - test "incr existing entry", %{cache: cache} do - assert cache.put(:counter, 0) == :ok - assert cache.incr(:counter) == 1 - assert cache.incr(:counter, 2) == 3 - end - end - - describe "queryable:" do - test "ETS match_spec queries", %{cache: cache, name: name} do - values = cache_put(cache, 1..5, &(&1 * 2)) - _ = new_generation(cache, name) - values = values ++ cache_put(cache, 6..10, &(&1 * 2)) - - assert nil - |> cache.stream(page_size: 3, return: :value) - |> Enum.to_list() - |> :lists.usort() == values - - {_, expected} = Enum.split(values, 5) - - test_ms = - fun do - {_, _, value, _, _} when value > 10 -> value - end - - for action <- [:all, :stream] do - assert all_or_stream(cache, action, test_ms, page_size: 3, return: :value) == expected - - msg = ~r"invalid match spec" - - assert_raise Nebulex.QueryError, msg, fn -> - all_or_stream(cache, action, :invalid_query) - end - end - end - - test "expired and unexpired queries", %{cache: cache} do - for action <- [:all, :stream] do - expired = cache_put(cache, 1..5, &(&1 * 2), ttl: 1000) - unexpired = cache_put(cache, 6..10, &(&1 * 2)) - - all = expired ++ unexpired - - opts = [page_size: 3, return: :value] - - assert all_or_stream(cache, action, nil, opts) == all - assert all_or_stream(cache, action, :unexpired, opts) == all - assert all_or_stream(cache, action, :expired, opts) == [] - - :ok = Process.sleep(1100) - - assert all_or_stream(cache, action, :unexpired, opts) == unexpired - assert all_or_stream(cache, action, :expired, opts) == expired - end - end - - test "all entries", %{cache: cache} do - assert cache.put_all([a: 1, b: 2, c: 3], ttl: 5000) == :ok - - assert all = cache.all(:unexpired, return: :entry) - assert length(all) == 3 - - for %Entry{} = entry <- all do - assert Entry.ttl(entry) > 0 - end - end - - test "delete all expired and unexpired entries", %{cache: cache} do - _ = cache_put(cache, 1..5, & &1, ttl: 1500) - _ = cache_put(cache, 6..10) - - assert cache.delete_all(:expired) == 0 - assert cache.count_all(:expired) == 0 - - :ok = Process.sleep(1600) - - assert cache.delete_all(:expired) == 5 - assert cache.count_all(:expired) == 0 - assert cache.count_all(:unexpired) == 5 - - assert cache.delete_all(:unexpired) == 5 - assert cache.count_all(:unexpired) == 0 - assert cache.count_all() == 0 - end - - test "delete all matched entries", %{cache: cache, name: name} do - values = cache_put(cache, 1..5) - - _ = new_generation(cache, name) - - values = values ++ cache_put(cache, 6..10) - - assert cache.count_all() == 10 - - test_ms = - fun do - {_, _, value, _, _} when rem(value, 2) == 0 -> value - end - - {expected, rem} = Enum.split_with(values, &(rem(&1, 2) == 0)) - - assert cache.count_all(test_ms) == 5 - assert cache.all(test_ms) |> Enum.sort() == Enum.sort(expected) - - assert cache.delete_all(test_ms) == 5 - assert cache.count_all(test_ms) == 0 - assert cache.all() |> Enum.sort() == Enum.sort(rem) - end - - test "delete all entries with special query {:in, keys}", %{cache: cache} do - entries = for x <- 1..10, into: %{}, do: {x, x} - - :ok = cache.put_all(entries) - - assert cache.count_all() == 10 - - assert cache.delete_all({:in, [2, 4, 6, 8, 10, 12]}) == 5 - - assert cache.count_all() == 5 - assert cache.all() |> Enum.sort() == [1, 3, 5, 7, 9] - end - - test "delete all entries with special query {:in, keys} (nested tuples)", %{cache: cache} do - [ - {1, {:foo, "bar"}}, - {2, {nil, nil}}, - {3, {nil, {nil, nil}}}, - {4, {nil, {nil, nil, {nil, nil}}}}, - {5, {:a, {:b, {:c, {:d, {:e, "f"}}}}}}, - {6, {:a, :b, {:c, :d, {:e, :f, {:g, :h, {:i, :j, "k"}}}}}} - ] - |> Enum.each(fn {k, v} -> - :ok = cache.put(k, v) - - assert cache.count_all() == 1 - assert cache.delete_all({:in, [k]}) == 1 - assert cache.count_all() == 0 - end) - end - end - - describe "older generation hitted on" do - test "put/3 (key is removed from older generation)", %{cache: cache, name: name} do - :ok = cache.put("foo", "bar") - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - :ok = cache.put("foo", "bar bar") - - assert get_from_new(cache, name, "foo") == "bar bar" - refute get_from_old(cache, name, "foo") - end - - test "put_new/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put_new("foo", "bar") == true - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.put_new("foo", "bar") == false - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - _ = new_generation(cache, name) - - assert cache.put_new("foo", "bar") == true - - assert get_from_new(cache, name, "foo") == "bar" - refute get_from_old(cache, name, "foo") - end - - test "replace/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.replace("foo", "bar bar") == false - - :ok = cache.put("foo", "bar") - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.replace("foo", "bar bar") == true - - assert get_from_new(cache, name, "foo") == "bar bar" - refute get_from_old(cache, name, "foo") - - _ = new_generation(cache, name) - _ = new_generation(cache, name) - - assert cache.replace("foo", "bar bar") == false - end - - test "put_all/2 (keys are removed from older generation)", %{cache: cache, name: name} do - entries = Enum.map(1..100, &{{:key, &1}, &1}) - - :ok = cache.put_all(entries) - - _ = new_generation(cache, name) - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - :ok = cache.put_all(entries) - - Enum.each(entries, fn {k, v} -> - assert get_from_new(cache, name, k) == v - refute get_from_old(cache, name, k) - end) - end - - test "put_new_all/2 (fallback to older generation)", %{cache: cache, name: name} do - entries = Enum.map(1..100, &{&1, &1}) - - assert cache.put_new_all(entries) == true - - _ = new_generation(cache, name) - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - assert cache.put_new_all(entries) == false - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - _ = new_generation(cache, name) - - assert cache.put_new_all(entries) == true - - Enum.each(entries, fn {k, v} -> - assert get_from_new(cache, name, k) == v - refute get_from_old(cache, name, k) - end) - end - - test "expire/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put("foo", "bar") == :ok - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.expire("foo", 200) == true - - assert get_from_new(cache, name, "foo") == "bar" - refute get_from_old(cache, name, "foo") - - :ok = Process.sleep(210) - - refute cache.get("foo") - end - - test "incr/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put(:counter, 0, ttl: 200) == :ok - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, :counter) - assert get_from_old(cache, name, :counter) == 0 - - assert cache.incr(:counter) == 1 - assert cache.incr(:counter) == 2 - - assert get_from_new(cache, name, :counter) == 2 - refute get_from_old(cache, name, :counter) - - :ok = Process.sleep(210) - - assert cache.incr(:counter) == 1 - end - - test "all/2 (no duplicates)", %{cache: cache, name: name} do - entries = for x <- 1..20, into: %{}, do: {x, x} - keys = Map.keys(entries) |> Enum.sort() - - :ok = cache.put_all(entries) - - assert cache.count_all() == 20 - assert cache.all() |> Enum.sort() == keys - - _ = new_generation(cache, name) - - :ok = cache.put_all(entries) - - assert cache.count_all() == 20 - assert cache.all() |> Enum.sort() == keys - - _ = new_generation(cache, name) - - more_entries = for x <- 10..30, into: %{}, do: {x, x} - more_keys = Map.keys(more_entries) |> Enum.sort() - - :ok = cache.put_all(more_entries) - - assert cache.count_all() == 30 - assert cache.all() |> Enum.sort() == (keys ++ more_keys) |> Enum.uniq() - - _ = new_generation(cache, name) - - assert cache.count_all() == 21 - assert cache.all() |> Enum.sort() == more_keys - end - end - - describe "generation" do - test "created with unexpired entries", %{cache: cache, name: name} do - assert cache.put("foo", "bar") == :ok - assert cache.get("foo") == "bar" - assert cache.ttl("foo") == :infinity - - _ = new_generation(cache, name) - - assert cache.get("foo") == "bar" - end - - test "lifecycle", %{cache: cache, name: name} do - # should be empty - refute cache.get(1) - - # set some entries - for x <- 1..2, do: cache.put(x, x) - - # fetch one entry from new generation - assert cache.get(1) == 1 - - # fetch non-existent entries - refute cache.get(3) - refute cache.get(:non_existent) - - # create a new generation - _ = new_generation(cache, name) - - # both entries should be in the old generation - refute get_from_new(cache, name, 1) - refute get_from_new(cache, name, 2) - assert get_from_old(cache, name, 1) == 1 - assert get_from_old(cache, name, 2) == 2 - - # fetch entry 1 and put it into the new generation - assert cache.get(1) == 1 - assert get_from_new(cache, name, 1) == 1 - refute get_from_new(cache, name, 2) - refute get_from_old(cache, name, 1) - assert get_from_old(cache, name, 2) == 2 - - # create a new generation, the old generation should be deleted - _ = new_generation(cache, name) - - # entry 1 should be into the old generation and entry 2 deleted - refute get_from_new(cache, name, 1) - refute get_from_new(cache, name, 2) - assert get_from_old(cache, name, 1) == 1 - refute get_from_old(cache, name, 2) - end - - test "creation with ttl", %{cache: cache, name: name} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.get(1) == 1 - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, 1) - assert get_from_old(cache, name, 1) == 1 - assert cache.get(1) == 1 - - :ok = Process.sleep(1100) - - refute cache.get(1) - refute get_from_new(cache, name, 1) - refute get_from_old(cache, name, 1) - end - end - - ## Helpers - - defp new_generation(cache, name) do - cache.with_dynamic_cache(name, fn -> - cache.new_generation() - end) - end - - defp get_from_new(cache, name, key) do - cache.with_dynamic_cache(name, fn -> - get_from(cache.newer_generation(), name, key) - end) - end - - defp get_from_old(cache, name, key) do - cache.with_dynamic_cache(name, fn -> - cache.generations() - |> List.last() - |> get_from(name, key) - end) - end - - defp get_from(gen, name, key) do - Adapter.with_meta(name, fn _, %{backend: backend} -> - case backend.lookup(gen, key) do - [] -> nil - [{_, ^key, val, _, _}] -> val - end - end) - end - - defp all_or_stream(cache, action, ms, opts \\ []) - - defp all_or_stream(cache, :all, ms, opts) do - ms - |> cache.all(opts) - |> handle_query_result() - end - - defp all_or_stream(cache, :stream, ms, opts) do - ms - |> cache.stream(opts) - |> handle_query_result() - end - - defp handle_query_result(list) when is_list(list) do - :lists.usort(list) - end - - defp handle_query_result(stream) do - stream - |> Enum.to_list() - |> :lists.usort() - end - end -end diff --git a/test/shared/multilevel_test.exs b/test/shared/multilevel_test.exs deleted file mode 100644 index a5998b5b..00000000 --- a/test/shared/multilevel_test.exs +++ /dev/null @@ -1,286 +0,0 @@ -defmodule Nebulex.MultilevelTest do - import Nebulex.CacheCase - - deftests do - describe "c:init/1" do - test "fails because missing levels config", %{cache: cache} do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = cache.start_link(name: :missing_levels) - - assert Regex.match?( - ~r"expected levels: to be a list with at least one level definition", - msg - ) - end - end - - describe "entry:" do - test "put/3", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.get(1, level: 1) == 1 - assert cache.get(1, level: 2) == 1 - assert cache.get(1, level: 3) == 1 - - assert cache.put(2, 2, level: 2) == :ok - assert cache.get(2, level: 2) == 2 - refute cache.get(2, level: 1) - refute cache.get(2, level: 3) - - assert cache.put("foo", nil) == :ok - refute cache.get("foo") - end - - test "put_new/3", %{cache: cache} do - assert cache.put_new(1, 1) - refute cache.put_new(1, 2) - assert cache.get(1, level: 1) == 1 - assert cache.get(1, level: 2) == 1 - assert cache.get(1, level: 3) == 1 - - assert cache.put_new(2, 2, level: 2) - assert cache.get(2, level: 2) == 2 - refute cache.get(2, level: 1) - refute cache.get(2, level: 3) - - assert cache.put_new("foo", nil) - refute cache.get("foo") - end - - test "put_all/2", %{cache: cache} do - assert cache.put_all( - for x <- 1..3 do - {x, x} - end, - ttl: 1000 - ) == :ok - - for x <- 1..3, do: assert(cache.get(x) == x) - :ok = Process.sleep(1100) - for x <- 1..3, do: refute(cache.get(x)) - - assert cache.put_all(%{"apples" => 1, "bananas" => 3}) == :ok - assert cache.put_all(blueberries: 2, strawberries: 5) == :ok - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - assert cache.get(:blueberries) == 2 - assert cache.get(:strawberries) == 5 - - assert cache.put_all([]) == :ok - assert cache.put_all(%{}) == :ok - - refute cache.put_new_all(%{"apples" => 100}) - assert cache.get("apples") == 1 - end - - test "get_all/2", %{cache: cache} do - assert cache.put_all(a: 1, c: 3) == :ok - assert cache.get_all([:a, :b, :c]) == %{a: 1, c: 3} - end - - test "delete/2", %{cache: cache} do - assert cache.put(1, 1) - assert cache.put(2, 2, level: 2) - - assert cache.delete(1) == :ok - refute cache.get(1, level: 1) - refute cache.get(1, level: 2) - refute cache.get(1, level: 3) - - assert cache.delete(2, level: 2) == :ok - refute cache.get(2, level: 1) - refute cache.get(2, level: 2) - refute cache.get(2, level: 3) - end - - test "take/2", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - assert cache.put(3, 3, level: 3) == :ok - - assert cache.take(1) == 1 - assert cache.take(2) == 2 - assert cache.take(3) == 3 - - refute cache.get(1, level: 1) - refute cache.get(1, level: 2) - refute cache.get(1, level: 3) - refute cache.get(2, level: 2) - refute cache.get(3, level: 3) - end - - test "has_key?/1", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - assert cache.put(3, 3, level: 3) == :ok - - assert cache.has_key?(1) - assert cache.has_key?(2) - assert cache.has_key?(3) - refute cache.has_key?(4) - end - - test "ttl/1", %{cache: cache} do - assert cache.put(:a, 1, ttl: 1000) == :ok - assert cache.ttl(:a) > 0 - assert cache.put(:b, 2) == :ok - - :ok = Process.sleep(10) - assert cache.ttl(:a) > 0 - assert cache.ttl(:b) == :infinity - refute cache.ttl(:c) - - :ok = Process.sleep(1100) - refute cache.ttl(:a) - end - - test "expire/2", %{cache: cache} do - assert cache.put(:a, 1) == :ok - assert cache.ttl(:a) == :infinity - - assert cache.expire(:a, 1000) - ttl = cache.ttl(:a) - assert ttl > 0 and ttl <= 1000 - - assert cache.get(:a, level: 1) == 1 - assert cache.get(:a, level: 2) == 1 - assert cache.get(:a, level: 3) == 1 - - :ok = Process.sleep(1100) - refute cache.get(:a) - refute cache.get(:a, level: 1) - refute cache.get(:a, level: 2) - refute cache.get(:a, level: 3) - end - - test "touch/1", %{cache: cache} do - assert cache.put(:touch, 1, ttl: 1000, level: 2) == :ok - - :ok = Process.sleep(10) - assert cache.touch(:touch) - - :ok = Process.sleep(200) - assert cache.touch(:touch) - assert cache.get(:touch) == 1 - - :ok = Process.sleep(1100) - refute cache.get(:touch) - - refute cache.touch(:non_existent) - end - - test "get_and_update/3", %{cache: cache} do - assert cache.put(1, 1, level: 1) == :ok - assert cache.put(2, 2) == :ok - - assert cache.get_and_update(1, &{&1, &1 * 2}, level: 1) == {1, 2} - assert cache.get(1, level: 1) == 2 - refute cache.get(1, level: 3) - refute cache.get(1, level: 3) - - assert cache.get_and_update(2, &{&1, &1 * 2}) == {2, 4} - assert cache.get(2, level: 1) == 4 - assert cache.get(2, level: 2) == 4 - assert cache.get(2, level: 3) == 4 - - assert cache.get_and_update(1, fn _ -> :pop end, level: 1) == {2, nil} - refute cache.get(1, level: 1) - - assert cache.get_and_update(2, fn _ -> :pop end) == {4, nil} - refute cache.get(2, level: 1) - refute cache.get(2, level: 2) - refute cache.get(2, level: 3) - end - - test "update/4", %{cache: cache} do - assert cache.put(1, 1, level: 1) == :ok - assert cache.put(2, 2) == :ok - - assert cache.update(1, 1, &(&1 * 2), level: 1) == 2 - assert cache.get(1, level: 1) == 2 - refute cache.get(1, level: 2) - refute cache.get(1, level: 3) - - assert cache.update(2, 1, &(&1 * 2)) == 4 - assert cache.get(2, level: 1) == 4 - assert cache.get(2, level: 2) == 4 - assert cache.get(2, level: 3) == 4 - end - - test "incr/3", %{cache: cache} do - assert cache.incr(1) == 1 - assert cache.get(1, level: 1) == 1 - assert cache.get(1, level: 2) == 1 - assert cache.get(1, level: 3) == 1 - - assert cache.incr(2, 2, level: 2) == 2 - assert cache.get(2, level: 2) == 2 - refute cache.get(2, level: 1) - refute cache.get(2, level: 3) - - assert cache.incr(3, 3) == 3 - assert cache.get(3, level: 1) == 3 - assert cache.get(3, level: 2) == 3 - assert cache.get(3, level: 3) == 3 - - assert cache.incr(4, 5) == 5 - assert cache.incr(4, -5) == 0 - assert cache.get(4, level: 1) == 0 - assert cache.get(4, level: 2) == 0 - assert cache.get(4, level: 3) == 0 - end - end - - describe "queryable:" do - test "all/2 and stream/2", %{cache: cache} do - for x <- 1..30, do: cache.put(x, x, level: 1) - for x <- 20..60, do: cache.put(x, x, level: 2) - for x <- 50..100, do: cache.put(x, x, level: 3) - - expected = :lists.usort(for x <- 1..100, do: x) - assert :lists.usort(cache.all()) == expected - - stream = cache.stream() - - assert stream - |> Enum.to_list() - |> :lists.usort() == expected - - del = - for x <- 20..60 do - assert cache.delete(x) == :ok - x - end - - expected = :lists.usort(expected -- del) - assert :lists.usort(cache.all()) == expected - end - - test "delete_all/2", %{cache: cache} do - for x <- 1..30, do: cache.put(x, x, level: 1) - for x <- 21..60, do: cache.put(x, x, level: 2) - for x <- 51..100, do: cache.put(x, x, level: 3) - - assert count = cache.count_all() - assert cache.delete_all() == count - assert cache.all() == [] - end - - test "count_all/2", %{cache: cache} do - assert cache.count_all() == 0 - for x <- 1..10, do: cache.put(x, x, level: 1) - for x <- 11..20, do: cache.put(x, x, level: 2) - for x <- 21..30, do: cache.put(x, x, level: 3) - assert cache.count_all() == 30 - - for x <- [1, 11, 21], do: cache.delete(x, level: 1) - assert cache.count_all() == 29 - - assert cache.delete(1, level: 1) == :ok - assert cache.delete(11, level: 2) == :ok - assert cache.delete(21, level: 3) == :ok - assert cache.count_all() == 27 - end - end - end -end diff --git a/test/support/cache_case.ex b/test/support/cache_case.exs similarity index 89% rename from test/support/cache_case.ex rename to test/support/cache_case.exs index 3455069d..59e7796e 100644 --- a/test/support/cache_case.ex +++ b/test/support/cache_case.exs @@ -1,6 +1,8 @@ defmodule Nebulex.CacheCase do @moduledoc false + use ExUnit.CaseTemplate + alias Nebulex.Telemetry @doc false @@ -44,6 +46,7 @@ defmodule Nebulex.CacheCase do on_exit(fn -> try do :ok = Process.sleep(20) + if Process.alive?(pid), do: Supervisor.stop(pid, :normal, 5000) catch :exit, _ -> :noop @@ -65,11 +68,13 @@ defmodule Nebulex.CacheCase do default_dynamic_cache = cache.get_dynamic_cache() {:ok, pid} = cache.start_link([name: name] ++ opts) + _ = cache.put_dynamic_cache(name) on_exit(fn -> try do :ok = Process.sleep(20) + if Process.alive?(pid), do: Supervisor.stop(pid, :normal, 5000) catch :exit, _ -> :noop @@ -86,13 +91,16 @@ defmodule Nebulex.CacheCase do @doc false def test_with_dynamic_cache(cache, opts \\ [], callback) do default_dynamic_cache = cache.get_dynamic_cache() + {:ok, pid} = cache.start_link(opts) try do _ = cache.put_dynamic_cache(pid) + callback.() after _ = cache.put_dynamic_cache(default_dynamic_cache) + Supervisor.stop(pid) end end @@ -107,6 +115,7 @@ defmodule Nebulex.CacheCase do rescue _ -> :ok = Process.sleep(delay) + wait_until(retries - 1, delay, fun) end @@ -114,7 +123,9 @@ defmodule Nebulex.CacheCase do def cache_put(cache, lst, fun \\ & &1, opts \\ []) do for key <- lst do value = fun.(key) + :ok = cache.put(key, value, opts) + value end end @@ -138,4 +149,18 @@ defmodule Nebulex.CacheCase do def handle_event(event, measurements, metadata, %{pid: pid}) do send(pid, {event, measurements, metadata}) end + + @doc false + def assert_error_module(ctx, error_module) do + fun = Map.get(ctx, :error_module, fn m -> assert m == Nebulex.Error end) + + fun.(error_module) + end + + @doc false + def assert_error_reason(ctx, error_reason) do + fun = Map.get(ctx, :error_reason, fn r -> assert r == :error end) + + fun.(error_reason) + end end diff --git a/test/support/cluster.ex b/test/support/cluster.ex deleted file mode 100644 index a8125f96..00000000 --- a/test/support/cluster.ex +++ /dev/null @@ -1,88 +0,0 @@ -defmodule Nebulex.Cluster do - @moduledoc """ - Taken from `Phoenix.PubSub.Cluster`. - Copyright (c) 2014 Chris McCord - """ - - def spawn(nodes) do - # Turn node into a distributed node with the given long name - _ = :net_kernel.start([:"primary@127.0.0.1"]) - - # Allow spawned nodes to fetch all code from this node - _ = :erl_boot_server.start([]) - _ = allow_boot(to_charlist("127.0.0.1")) - - nodes - |> Enum.map(&Task.async(fn -> spawn_node(&1) end)) - |> Enum.map(&Task.await(&1, 30_000)) - end - - def spawn_node(node_host) do - {:ok, node} = start_peer(node_host) - - _ = add_code_paths(node) - _ = transfer_configuration(node) - _ = ensure_applications_started(node) - - {:ok, node} - end - - if Code.ensure_loaded?(:peer) do - defp start_peer(node_host) do - {:ok, _pid, node} = - :peer.start(%{ - name: node_name(node_host), - host: to_charlist("127.0.0.1"), - args: [inet_loader_args()] - }) - - {:ok, node} - end - else - defp start_peer(node_host) do - :slave.start(to_charlist("127.0.0.1"), node_name(node_host), inet_loader_args()) - end - end - - defp rpc(node, module, function, args) do - :rpc.block_call(node, module, function, args) - end - - defp inet_loader_args do - to_charlist("-loader inet -hosts 127.0.0.1 -setcookie #{:erlang.get_cookie()}") - end - - defp allow_boot(host) do - {:ok, ipv4} = :inet.parse_ipv4_address(host) - :erl_boot_server.add_slave(ipv4) - end - - defp add_code_paths(node) do - rpc(node, :code, :add_paths, [:code.get_path()]) - end - - defp transfer_configuration(node) do - for {app_name, _, _} <- Application.loaded_applications() do - for {key, val} <- Application.get_all_env(app_name) do - rpc(node, Application, :put_env, [app_name, key, val]) - end - end - end - - defp ensure_applications_started(node) do - rpc(node, Application, :ensure_all_started, [:mix]) - rpc(node, Mix, :env, [Mix.env()]) - - for {app_name, _, _} <- Application.loaded_applications(), app_name not in [:dialyxir] do - rpc(node, Application, :ensure_all_started, [app_name]) - end - end - - defp node_name(node_host) do - node_host - |> to_string() - |> String.split("@") - |> Enum.at(0) - |> String.to_atom() - end -end diff --git a/test/support/fake_adapter.exs b/test/support/fake_adapter.exs new file mode 100644 index 00000000..9d72caaf --- /dev/null +++ b/test/support/fake_adapter.exs @@ -0,0 +1,86 @@ +defmodule Nebulex.FakeAdapter do + @moduledoc false + + ## Nebulex.Adapter + + @doc false + defmacro __before_compile__(_), do: :ok + + @doc false + def init(_opts) do + child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: Agent) + + {:ok, child_spec, %{}} + end + + ## Nebulex.Adapter.KV + + @doc false + def fetch(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def put(_, :error, :timeout, _, _, _) do + {:error, %Nebulex.Error{reason: :timeout}} + end + + def put(_, :error, reason, _, _, _) do + {:error, reason} + end + + def put(_, _, _, _, _, _) do + {:error, %Nebulex.Error{reason: :error}} + end + + @doc false + def delete(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def take(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def has_key?(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def ttl(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def expire(_, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def touch(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def update_counter(_, _, _, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def put_all(_, _, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Queryable + + @doc false + def execute(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def stream(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Persistence + + @doc false + def dump(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def load(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Info + + @doc false + def info(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Transaction + + @doc false + def transaction(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def in_transaction?(_, _), do: {:error, %Nebulex.Error{reason: :error}} +end diff --git a/test/support/node_case.ex b/test/support/node_case.ex deleted file mode 100644 index 179cc4a5..00000000 --- a/test/support/node_case.ex +++ /dev/null @@ -1,43 +0,0 @@ -defmodule Nebulex.NodeCase do - @moduledoc """ - Based on `Phoenix.PubSub.NodeCase`. - Copyright (c) 2014 Chris McCord - """ - - @timeout 5000 - - defmacro __using__(_opts) do - quote do - use ExUnit.Case, async: true - import unquote(__MODULE__) - @moduletag :clustered - - @timeout unquote(@timeout) - end - end - - def start_caches(nodes, caches) do - for node <- nodes, {cache, opts} <- caches do - {:ok, pid} = start_cache(node, cache, opts) - {node, cache, pid} - end - end - - def start_cache(node, cache, opts \\ []) do - rpc(node, cache, :start_link, [opts]) - end - - def stop_caches(node_pid_list) do - Enum.each(node_pid_list, fn {node, _cache, pid} -> - stop_cache(node, pid) - end) - end - - def stop_cache(node, pid) do - rpc(node, Supervisor, :stop, [pid, :normal, @timeout]) - end - - def rpc(node, module, function, args) do - :rpc.block_call(node, module, function, args) - end -end diff --git a/test/support/test_adapter.exs b/test/support/test_adapter.exs new file mode 100644 index 00000000..2ad92b5f --- /dev/null +++ b/test/support/test_adapter.exs @@ -0,0 +1,611 @@ +defmodule Nebulex.TestAdapter do + @moduledoc """ + Adapter for testing purposes. + """ + + defmodule Entry do + @moduledoc false + + defstruct value: nil, touched: nil, exp: nil + + alias Nebulex.Time + + @doc false + def new(value, ttl \\ :infinity, touched \\ Time.now()) do + %__MODULE__{ + value: value, + touched: touched, + exp: exp(ttl) + } + end + + @doc false + def exp(now \\ Time.now(), ttl) + + def exp(_now, :infinity), do: :infinity + def exp(now, ttl), do: now + ttl + end + + # Provide Cache Implementation + @behaviour Nebulex.Adapter + @behaviour Nebulex.Adapter.KV + @behaviour Nebulex.Adapter.Queryable + @behaviour Nebulex.Adapter.Persistence + + # Inherit default transaction implementation + use Nebulex.Adapter.Transaction + + # Inherit default info implementation + use Nebulex.Adapters.Common.Info + + import Nebulex.Utils + + alias Nebulex.Adapters.Common.Info.Stats + alias __MODULE__.{Entry, KV} + alias Nebulex.Time + + ## Nebulex.Adapter + + @impl true + defmacro __before_compile__(_env), do: :ok + + @impl true + def init(opts) do + # Required options + telemetry = Keyword.fetch!(opts, :telemetry) + telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) + + # Init stats_counter + stats_counter = + if Keyword.get(opts, :stats, true) == true do + Stats.init(telemetry_prefix) + end + + # Adapter meta + metadata = %{ + telemetry: telemetry, + telemetry_prefix: telemetry_prefix, + stats_counter: stats_counter, + started_at: DateTime.utc_now() + } + + # KV server + child_spec = Supervisor.child_spec({KV, [adapter_meta: metadata] ++ opts}, id: KV) + + {:ok, child_spec, metadata} + end + + ## Nebulex.Adapter.KV + + @impl true + def fetch(adapter_meta, key, _opts) do + with {:ok, %Entry{value: value}} <- do_fetch(adapter_meta, key) do + {:ok, value} + end + end + + defp do_fetch(_adapter_meta, {:eval, fun}) do + fun.() + end + + defp do_fetch(adapter_meta, key) do + adapter_meta.pid + |> GenServer.call({:fetch, key}) + |> validate_ttl(key, adapter_meta) + end + + defp validate_ttl({:ok, %Entry{exp: :infinity} = entry}, _key, _adapter_meta) do + {:ok, entry} + end + + defp validate_ttl( + {:ok, %Entry{exp: exp} = entry}, + key, + adapter_meta + ) + when is_integer(exp) do + if Time.now() >= exp do + :ok = delete(adapter_meta, key, []) + + wrap_error Nebulex.KeyError, key: key, reason: :expired + else + {:ok, entry} + end + end + + defp validate_ttl(:error, key, _) do + wrap_error Nebulex.KeyError, key: key, reason: :not_found + end + + @impl true + def put(adapter_meta, key, value, ttl, on_write, _opts) do + do_put(adapter_meta.pid, key, Entry.new(value, ttl), on_write) + end + + defp do_put(pid, key, entry, :put) do + GenServer.call(pid, {:put, key, entry}) + end + + defp do_put(pid, key, entry, :put_new) do + GenServer.call(pid, {:put_new, key, entry}) + end + + defp do_put(pid, key, entry, :replace) do + GenServer.call(pid, {:replace, key, entry}) + end + + @impl true + def put_all(adapter_meta, entries, ttl, on_write, _opts) do + entries = for {key, value} <- entries, into: %{}, do: {key, Entry.new(value, ttl)} + + do_put_all(adapter_meta.pid, entries, on_write) + end + + defp do_put_all(pid, entries, :put) do + GenServer.call(pid, {:put_all, entries}) + end + + defp do_put_all(pid, entries, :put_new) do + GenServer.call(pid, {:put_new_all, entries}) + end + + @impl true + def delete(adapter_meta, key, _opts) do + GenServer.call(adapter_meta.pid, {:delete, key}) + end + + @impl true + def take(adapter_meta, key, _opts) do + with {:ok, %Entry{value: value}} <- + adapter_meta.pid + |> GenServer.call({:pop, key}) + |> validate_ttl(key, adapter_meta) do + {:ok, value} + end + end + + @impl true + def update_counter(adapter_meta, key, amount, ttl, default, _opts) do + _ = do_fetch(adapter_meta, key) + + GenServer.call( + adapter_meta.pid, + {:update_counter, key, amount, Entry.new(default + amount, ttl)} + ) + end + + @impl true + def has_key?(adapter_meta, key, _opts) do + case fetch(%{adapter_meta | telemetry: false}, key, []) do + {:ok, _} -> {:ok, true} + {:error, _} -> {:ok, false} + end + end + + @impl true + def ttl(adapter_meta, key, _opts) do + with {:ok, entry} <- do_fetch(adapter_meta, key) do + {:ok, entry_ttl(entry)} + end + end + + @impl true + def expire(adapter_meta, key, ttl, _opts) do + GenServer.call(adapter_meta.pid, {:expire, key, ttl}) + end + + @impl true + def touch(adapter_meta, key, _opts) do + GenServer.call(adapter_meta.pid, {:touch, key}) + end + + ## Nebulex.Adapter.Queryable + + @impl true + def execute(adapter_meta, query_meta, opts) + + def execute(_adapter_meta, %{op: :get_all, query: {:in, []}}, _opts) do + {:ok, []} + end + + def execute(_adapter_meta, %{op: op, query: {:in, []}}, _opts) + when op in [:count_all, :delete_all] do + {:ok, 0} + end + + def execute(%{pid: pid}, query_meta, _opts) do + with :ok <- assert_query(query_meta.query) do + GenServer.call(pid, {:q, query_meta}) + end + end + + @impl true + def stream(%{pid: pid}, query_meta, opts) do + max_entries = Keyword.fetch!(opts, :max_entries) + + with :ok <- assert_query(query_meta.query) do + GenServer.call(pid, {:q, query_meta, max_entries}) + end + end + + defp assert_query({:q, nil}) do + :ok + end + + defp assert_query({:in, keys}) when is_list(keys) do + :ok + end + + defp assert_query({:q, q}) do + raise Nebulex.QueryError, query: q + end + + ## Nebulex.Adapter.Persistence + + @impl true + def dump(%{cache: cache}, path, opts) do + with_file(cache, path, [:write], fn io_dev -> + with {:ok, stream} <- cache.stream() do + stream + |> Stream.chunk_every(Keyword.get(opts, :entries_per_line, 10)) + |> Enum.each(fn chunk -> + bin = + chunk + |> :erlang.term_to_binary(get_compression(opts)) + |> Base.encode64() + + :ok = IO.puts(io_dev, bin) + end) + end + end) + end + + @impl true + def load(%{cache: cache}, path, opts) do + with_file(cache, path, [:read], fn io_dev -> + io_dev + |> IO.stream(:line) + |> Stream.map(&String.trim/1) + |> Enum.each(fn line -> + entries = + line + |> Base.decode64!() + |> :erlang.binary_to_term([:safe]) + + cache.put_all(entries, opts) + end) + end) + end + + # sobelow_skip ["Traversal.FileModule"] + defp with_file(cache, path, modes, function) do + case File.open(path, modes) do + {:ok, io_device} -> + try do + function.(io_device) + after + :ok = File.close(io_device) + end + + {:error, reason} -> + reason = %File.Error{reason: reason, action: "open", path: path} + + wrap_error Nebulex.Error, reason: reason, cache: cache + end + end + + defp get_compression(opts) do + case Keyword.get(opts, :compression) do + value when is_integer(value) and value >= 0 and value < 10 -> + [compressed: value] + + _ -> + [:compressed] + end + end + + ## Nebulex.Adapter.Info + + @impl true + def info(adapter_meta, spec, opts) do + cond do + spec == :all -> + with {:ok, info} <- GenServer.call(adapter_meta.pid, {:info, [:memory]}), + {:ok, base_info} <- super(adapter_meta, :all, opts) do + {:ok, Map.merge(base_info, info)} + end + + spec == :memory -> + GenServer.call(adapter_meta.pid, {:info, spec}) + + is_list(spec) and Enum.member?(spec, :memory) -> + with {:ok, info} <- GenServer.call(adapter_meta.pid, {:info, [:memory]}), + spec = Enum.reject(spec, &(&1 == :memory)), + {:ok, base_info} <- super(adapter_meta, spec, opts) do + {:ok, Map.merge(base_info, info)} + end + + true -> + super(adapter_meta, spec, opts) + end + end + + ## Helpers + + defp entry_ttl(%Entry{exp: :infinity}), do: :infinity + defp entry_ttl(%Entry{exp: exp}), do: exp - Time.now() +end + +defmodule Nebulex.TestAdapter.KV do + @moduledoc false + + use GenServer + + import Nebulex.Utils, only: [wrap_error: 2] + + alias Nebulex.TestAdapter.Entry + alias Nebulex.Time + + ## Internals + + # Internal state + defstruct map: nil, adapter_meta: nil + + ## API + + @spec start_link(keyword) :: GenServer.on_start() + def start_link(opts) do + GenServer.start_link(__MODULE__, opts) + end + + ## GenServer callbacks + + @impl true + def init(opts) do + {:ok, %__MODULE__{map: %{}, adapter_meta: Keyword.fetch!(opts, :adapter_meta)}} + end + + @impl true + def handle_call(request, from, state) + + def handle_call({:fetch, key}, _from, %__MODULE__{map: map} = state) do + {:reply, Map.fetch(map, key), state} + end + + def handle_call({:put, key, value}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, true}, %{state | map: Map.put(map, key, value)}} + end + + def handle_call({:put_new, key, value}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, false}, state} + + false -> + {:reply, {:ok, true}, %{state | map: Map.put_new(map, key, value)}} + end + end + + def handle_call({:replace, key, value}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.replace(map, key, value)}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:put_all, entries}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, true}, %{state | map: Map.merge(map, entries)}} + end + + def handle_call({:put_new_all, entries}, _from, %__MODULE__{map: map} = state) do + case Enum.any?(map, fn {k, _} -> Map.has_key?(entries, k) end) do + true -> + {:reply, {:ok, false}, state} + + false -> + {:reply, {:ok, true}, %{state | map: Map.merge(map, entries)}} + end + end + + def handle_call({:delete, key}, _from, %__MODULE__{map: map} = state) do + {:reply, :ok, %{state | map: Map.delete(map, key)}} + end + + def handle_call({:pop, key}, _from, %__MODULE__{map: map} = state) do + ref = make_ref() + + case Map.pop(map, key, ref) do + {^ref, _map} -> + {:reply, :error, state} + + {value, map} -> + {:reply, {:ok, value}, %{state | map: map}} + end + end + + def handle_call({:update_counter, key, amount, default}, _from, %__MODULE__{map: map} = state) do + case Map.fetch(map, key) do + {:ok, %{value: value}} when not is_integer(value) -> + error = wrap_error Nebulex.Error, reason: :badarith, cache: nil + + {:reply, error, map} + + _other -> + map = Map.update(map, key, default, &%{&1 | value: &1.value + amount}) + counter = Map.fetch!(map, key) + + {:reply, {:ok, counter.value}, %{state | map: map}} + end + end + + def handle_call({:expire, key, ttl}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.update!(map, key, &%{&1 | exp: Entry.exp(ttl)})}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:touch, key}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.update!(map, key, &%{&1 | touched: Time.now()})}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call( + {:q, %{op: :get_all, query: {:q, nil}, select: select}}, + _from, + %__MODULE__{map: map} = state + ) do + map = + map + |> filter_unexpired() + |> return(Enum, select) + + {:reply, {:ok, map}, state} + end + + def handle_call( + {:q, %{op: :get_all, query: {:in, keys}, select: select}}, + _from, + %__MODULE__{map: map} = state + ) do + map = + map + |> Map.take(keys) + |> filter_unexpired() + |> return(Enum, select) + + {:reply, {:ok, map}, state} + end + + def handle_call( + {:q, %{op: :count_all, query: {:q, nil}}}, + _from, + %__MODULE__{map: map} = state + ) do + {:reply, {:ok, map_size(map)}, state} + end + + def handle_call( + {:q, %{op: :count_all, query: {:in, keys}}}, + _from, + %__MODULE__{map: map} = state + ) do + count = map |> Map.take(keys) |> map_size() + + {:reply, {:ok, count}, state} + end + + def handle_call( + {:q, %{op: :delete_all, query: {:q, nil}}}, + _from, + %__MODULE__{map: map} = state + ) do + {:reply, {:ok, map_size(map)}, %{state | map: %{}}} + end + + def handle_call( + {:q, %{op: :delete_all, query: {:in, keys}}}, + _from, + %__MODULE__{map: map} = state + ) do + total_count = map_size(map) + map = Map.drop(map, keys) + + {:reply, {:ok, total_count - map_size(map)}, %{state | map: map}} + end + + def handle_call( + {:q, %{op: :stream, query: {:q, nil}, select: select}, max_entries}, + _from, + %__MODULE__{map: map} = state + ) do + {:reply, {:ok, stream_unexpired(map, max_entries, select)}, state} + end + + def handle_call( + {:q, %{op: :stream, query: {:in, keys}, select: select}, max_entries}, + _from, + %__MODULE__{map: map} = state + ) do + now = Time.now() + + stream = + map + |> Stream.filter(fn {k, %Entry{exp: exp}} -> + Enum.member?(keys, k) and exp > now + end) + |> return(Stream, select) + |> Stream.chunk_every(max_entries) + + {:reply, {:ok, stream}, state} + end + + def handle_call({:info, :memory}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, memory(map)}, state} + end + + def handle_call({:info, [_ | _]}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, %{memory: memory(map)}}, state} + end + + ## Private Functions + + defp stream_unexpired(map, max_entries, select) do + now = Time.now() + + map + |> Stream.filter(fn {_, %Entry{exp: exp}} -> exp > now end) + |> return(Stream, select) + |> Stream.chunk_every(max_entries) + end + + defp filter_unexpired(enum) do + now = Time.now() + + for {k, %Entry{exp: exp} = e} <- enum, exp > now, into: %{} do + {k, e} + end + end + + defp return(map, module, select) do + case select do + :key -> + module.map(map, fn {k, _e} -> k end) + + :value -> + module.map(map, fn {_k, e} -> e.value end) + + {:key, :value} -> + module.map(map, fn {k, e} -> {k, e.value} end) + end + end + + defp memory(map) when map_size(map) == 0 do + %{ + # Fixed + total: 1_000_000, + # Empty + used: 0 + } + end + + defp memory(map) do + %{ + # Fixed + total: 1_000_000, + # Fake size + used: map |> :erlang.term_to_binary() |> byte_size() + } + end +end diff --git a/test/support/test_cache.ex b/test/support/test_cache.ex deleted file mode 100644 index d0761c80..00000000 --- a/test/support/test_cache.ex +++ /dev/null @@ -1,216 +0,0 @@ -defmodule Nebulex.TestCache do - @moduledoc false - - defmodule Common do - @moduledoc false - - defmacro __using__(_opts) do - quote do - def get_and_update_fun(nil), do: {nil, 1} - def get_and_update_fun(current) when is_integer(current), do: {current, current * 2} - - def get_and_update_bad_fun(_), do: :other - end - end - end - - defmodule TestHook do - @moduledoc false - use GenServer - - alias Nebulex.Hook - - @actions [:get, :put] - - def start_link(opts \\ []) do - GenServer.start_link(__MODULE__, opts, name: __MODULE__) - end - - ## Hook Function - - def track(%Hook{step: :before, name: name}) when name in @actions do - System.system_time(:microsecond) - end - - def track(%Hook{step: :after_return, name: name} = event) when name in @actions do - GenServer.cast(__MODULE__, {:track, event}) - end - - def track(hook), do: hook - - ## Error Hook Function - - def hook_error(%Hook{name: :get}), do: raise(ArgumentError, "error") - - def hook_error(hook), do: hook - - ## GenServer - - @impl GenServer - def init(_opts) do - {:ok, %{}} - end - - @impl GenServer - def handle_cast({:track, %Hook{acc: start} = hook}, state) do - _ = send(:hooked_cache, %{hook | acc: System.system_time(:microsecond) - start}) - {:noreply, state} - end - end - - defmodule Cache do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - - use Nebulex.TestCache.Common - end - - defmodule Partitioned do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - - use Nebulex.TestCache.Common - end - - defmodule Replicated do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - - use Nebulex.TestCache.Common - end - - defmodule Multilevel do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - end - - ## Mocks - - defmodule AdapterMock do - @moduledoc false - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - @impl true - defmacro __before_compile__(_), do: :ok - - @impl true - def init(opts) do - child = { - {Agent, System.unique_integer([:positive, :monotonic])}, - {Agent, :start_link, [fn -> :ok end, [name: opts[:child_name]]]}, - :permanent, - 5_000, - :worker, - [Agent] - } - - {:ok, child, %{}} - end - - @impl true - def get(_, key, _) do - if is_integer(key) do - raise ArgumentError, "Error" - else - :ok - end - end - - @impl true - def put(_, _, _, _, _, _) do - :ok = Process.sleep(1000) - true - end - - @impl true - def delete(_, _, _), do: :ok - - @impl true - def take(_, _, _), do: nil - - @impl true - def has_key?(_, _), do: true - - @impl true - def ttl(_, _), do: nil - - @impl true - def expire(_, _, _), do: true - - @impl true - def touch(_, _), do: true - - @impl true - def update_counter(_, _, _, _, _, _), do: 1 - - @impl true - def get_all(_, _, _) do - :ok = Process.sleep(1000) - %{} - end - - @impl true - def put_all(_, _, _, _, _), do: Process.exit(self(), :normal) - - @impl true - def execute(_, :count_all, _, _) do - _ = Process.exit(self(), :normal) - 0 - end - - def execute(_, :delete_all, _, _) do - Process.sleep(2000) - 0 - end - - @impl true - def stream(_, _, _), do: 1..10 - end - - defmodule PartitionedMock do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.TestCache.AdapterMock - end - - defmodule ReplicatedMock do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Nebulex.TestCache.AdapterMock - end -end diff --git a/test/support/test_cache.exs b/test/support/test_cache.exs new file mode 100644 index 00000000..2302dd8e --- /dev/null +++ b/test/support/test_cache.exs @@ -0,0 +1,115 @@ +defmodule Nebulex.TestCache do + @moduledoc false + + defmodule Common do + @moduledoc false + + defmacro __using__(_opts) do + quote do + def get_and_update_fun(nil), do: {nil, 1} + def get_and_update_fun(current) when is_integer(current), do: {current, current * 2} + + def get_and_update_bad_fun(_), do: :other + end + end + end + + defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.TestAdapter + + use Nebulex.TestCache.Common + end + + ## Mocks + + defmodule AdapterMock do + @moduledoc false + @behaviour Nebulex.Adapter + @behaviour Nebulex.Adapter.KV + @behaviour Nebulex.Adapter.Queryable + + @impl true + defmacro __before_compile__(_), do: :ok + + @impl true + def init(opts) do + child = { + {Agent, System.unique_integer([:positive, :monotonic])}, + {Agent, :start_link, [fn -> :ok end, [name: opts[:child_name]]]}, + :permanent, + 5_000, + :worker, + [Agent] + } + + {:ok, child, %{}} + end + + @impl true + def fetch(_, key, _) do + if is_integer(key) do + raise ArgumentError, "Error" + else + {:ok, :ok} + end + end + + @impl true + def put(_, _, _, _, _, _) do + :ok = Process.sleep(1000) + + {:ok, true} + end + + @impl true + def delete(_, _, _), do: :ok + + @impl true + def take(_, _, _), do: {:ok, nil} + + @impl true + def has_key?(_, _, _), do: {:ok, true} + + @impl true + def ttl(_, _, _), do: {:ok, nil} + + @impl true + def expire(_, _, _, _), do: {:ok, true} + + @impl true + def touch(_, _, _), do: {:ok, true} + + @impl true + def update_counter(_, _, _, _, _, _), do: {:ok, 1} + + @impl true + def put_all(_, _, _, _, _) do + {:ok, Process.exit(self(), :normal)} + end + + @impl true + def execute(_, %{op: :get_all}, _) do + :ok = Process.sleep(1000) + + {:ok, []} + end + + def execute(_, %{op: :count_all}, _) do + _ = Process.exit(self(), :normal) + + {:ok, 0} + end + + def execute(_, %{op: :delete_all}, _) do + :ok = Process.sleep(2000) + + {:ok, 0} + end + + @impl true + def stream(_, _, _), do: {:ok, 1..10} + end +end diff --git a/test/test_helper.exs b/test/test_helper.exs index 0b1736e8..2adc0d1f 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,25 +1,31 @@ -# Start Telemetry -_ = Application.start(:telemetry) +# Load support modules +Code.require_file("support/test_adapter.exs", __DIR__) +Code.require_file("support/fake_adapter.exs", __DIR__) +Code.require_file("support/test_cache.exs", __DIR__) +Code.require_file("support/cache_case.exs", __DIR__) -# Set nodes -nodes = [:"node1@127.0.0.1", :"node2@127.0.0.1", :"node3@127.0.0.1", :"node4@127.0.0.1"] -:ok = Application.put_env(:nebulex, :nodes, nodes) - -# Load shared tests +# Load shared test cases for file <- File.ls!("test/shared/cache") do Code.require_file("./shared/cache/" <> file, __DIR__) end +# Load shared test cases for file <- File.ls!("test/shared"), not File.dir?("test/shared/" <> file) do Code.require_file("./shared/" <> file, __DIR__) end -# Spawn remote nodes -unless :clustered in Keyword.get(ExUnit.configuration(), :exclude, []) do - Nebulex.Cluster.spawn(nodes) -end +# Mocks +[ + Mix.Project, + Nebulex.Cache.Registry +] +|> Enum.each(&Mimic.copy/1) + +# Start Telemetry +_ = Application.start(:telemetry) -# For mix tests +# For tasks/generators testing +Mix.start() Mix.shell(Mix.Shell.Process) # Start ExUnit