diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 65be572e..aec0bee3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -84,15 +84,11 @@ jobs: if: ${{ matrix.style }} - name: Run tests - run: | - epmd -daemon - mix test --trace + run: mix test if: ${{ !matrix.coverage }} - name: Run tests with coverage - run: | - epmd -daemon - mix coveralls.github + run: mix coveralls.github if: ${{ matrix.coverage }} - name: Run sobelow diff --git a/guides/creating-new-adapter.md b/guides/creating-new-adapter.md index 09cc2539..b5bed1cd 100644 --- a/guides/creating-new-adapter.md +++ b/guides/creating-new-adapter.md @@ -108,7 +108,7 @@ end We won't be writing tests ourselves. Instead, we will use shared tests from the Nebulex parent repo. To do so, we will create a helper module in `test/shared/cache_test.exs` that will `use` test suites for behaviour we are -going to implement. The minimal set of behaviours is `Entry` and `Queryable` so +going to implement. The minimal set of behaviours is `KV` and `Queryable` so we'll go with them. ```elixir @@ -119,7 +119,7 @@ defmodule NebulexMemoryAdapter.CacheTest do defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest + use Nebulex.Cache.KVTest use Nebulex.Cache.QueryableTest end end @@ -187,7 +187,7 @@ Another try ```console mix test == Compilation error in file test/nebulex_memory_adapter_test.exs == -** (CompileError) test/nebulex_memory_adapter_test.exs:3: module Nebulex.Cache.EntryTest is not loaded and could not be found +** (CompileError) test/nebulex_memory_adapter_test.exs:3: module Nebulex.Cache.KVTest is not loaded and could not be found (elixir 1.13.2) expanding macro: Kernel.use/1 test/nebulex_memory_adapter_test.exs:3: NebulexMemoryAdapterTest (module) expanding macro: NebulexMemoryAdapter.CacheTest.__using__/1 @@ -305,7 +305,7 @@ one-by-one or define them all in bulk. For posterity, we put a complete ```elixir defmodule NebulexMemoryAdapter do @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable import Nebulex.Helpers @@ -320,17 +320,17 @@ defmodule NebulexMemoryAdapter do {:ok, child_spec, %{}} end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def fetch(adapter_meta, key, _opts) do wrap_ok Agent.get(adapter_meta.pid, &Map.get(&1, key)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def get_all(adapter_meta, keys, _opts) do wrap_ok Agent.get(adapter_meta.pid, &Map.take(&1, keys)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def put(adapter_meta, key, value, ttl, :put_new, opts) do if get(adapter_meta, key, []) do false @@ -358,7 +358,7 @@ defmodule NebulexMemoryAdapter do wrap_ok true end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def put_all(adapter_meta, entries, ttl, :put_new, opts) do if get_all(adapter_meta, Map.keys(entries), []) == %{} do put_all(adapter_meta, entries, ttl, :put, opts) @@ -378,12 +378,12 @@ defmodule NebulexMemoryAdapter do wrap_ok true end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def delete(adapter_meta, key, _opts) do wrap_ok Agent.update(adapter_meta.pid, &Map.delete(&1, key)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def take(adapter_meta, key, _opts) do value = get(adapter_meta, key, []) @@ -392,7 +392,7 @@ defmodule NebulexMemoryAdapter do wrap_ok value end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def update_counter(adapter_meta, key, amount, _ttl, default, _opts) do Agent.update(adapter_meta.pid, fn state -> Map.update(state, key, default + amount, fn v -> v + amount end) @@ -401,22 +401,22 @@ defmodule NebulexMemoryAdapter do wrap_ok get(adapter_meta, key, []) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def has_key?(adapter_meta, key, _opts) do wrap_ok Agent.get(adapter_meta.pid, &Map.has_key?(&1, key)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def ttl(_adapter_meta, _key, _opts) do wrap_ok nil end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def expire(_adapter_meta, _key, _ttl, _opts) do wrap_ok true end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def touch(_adapter_meta, _key, _opts) do wrap_ok true end diff --git a/lib/mix/tasks/nbx.ex b/lib/mix/tasks/nbx.ex index 23424e50..84c60f9b 100644 --- a/lib/mix/tasks/nbx.ex +++ b/lib/mix/tasks/nbx.ex @@ -24,6 +24,7 @@ defmodule Mix.Tasks.Nbx do defp general do _ = Application.ensure_all_started(:nebulex) + Mix.shell().info("Nebulex v#{Application.spec(:nebulex, :vsn)}") Mix.shell().info("In-Process and Distributed Cache Toolkit for Elixir.") diff --git a/lib/nebulex/adapter.ex b/lib/nebulex/adapter.ex index 9cf8a755..11f15afd 100644 --- a/lib/nebulex/adapter.ex +++ b/lib/nebulex/adapter.ex @@ -8,9 +8,6 @@ defmodule Nebulex.Adapter do @typedoc "Adapter" @type t :: module - @typedoc "Metadata type" - @type metadata :: %{optional(atom) => term} - @typedoc """ The metadata returned by the adapter `c:init/1`. @@ -18,11 +15,12 @@ defmodule Nebulex.Adapter do the following keys into the meta: * `:cache` - The cache module. + * `:name` - The name of the cache supervisor process. * `:pid` - The PID returned by the child spec returned in `c:init/1`. * `:adapter` - The defined cache adapter. """ - @type adapter_meta :: metadata + @type adapter_meta :: %{optional(term) => term} ## Callbacks @@ -32,7 +30,8 @@ defmodule Nebulex.Adapter do @macrocallback __before_compile__(env :: Macro.Env.t()) :: Macro.t() @doc """ - Initializes the adapter supervision tree by returning the children. + Initializes the adapter supervision tree by returning the children + and adapter metadata. """ @callback init(config :: keyword) :: {:ok, :supervisor.child_spec(), adapter_meta} @@ -115,11 +114,11 @@ defmodule Nebulex.Adapter do ## Private Functions - defp build_defspan(fun, opts) when is_list(opts) do + defp build_defspan(ast, opts) when is_list(opts) do {name, args} = - case Macro.decompose_call(fun) do - {_, _} = pair -> pair - _ -> raise ArgumentError, "invalid syntax in defspan #{Macro.to_string(fun)}" + case Macro.decompose_call(ast) do + {_, _} = parts -> parts + _ -> raise ArgumentError, "invalid syntax in defspan #{Macro.to_string(ast)}" end as = Keyword.get(opts, :as, name) diff --git a/lib/nebulex/adapter/keyslot.ex b/lib/nebulex/adapter/keyslot.ex deleted file mode 100644 index 58d94930..00000000 --- a/lib/nebulex/adapter/keyslot.ex +++ /dev/null @@ -1,51 +0,0 @@ -defmodule Nebulex.Adapter.Keyslot do - @moduledoc """ - This behaviour provides a callback to compute the hash slot for a specific - key based on the number of slots (partitions, nodes, ...). - - The purpose of this module is to allow users to implement a custom - hash-slot function to distribute the keys. It can be used to select - the node/slot where a specific key is supposed to be. - - > It is highly recommended to use a **Consistent Hashing** algorithm. - - ## Example - - defmodule MyApp.Keyslot do - use Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end - - This example uses [Jumping Consistent Hash](https://github.com/cabol/jchash). - """ - - @doc """ - Returns an integer within the range `0..range-1` identifying the hash slot - the specified `key` hashes to. - - ## Example - - iex> MyKeyslot.hash_slot("mykey", 10) - 2 - - """ - @callback hash_slot(key :: any, range :: pos_integer) :: non_neg_integer - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - defdelegate hash_slot(key, range), to: :erlang, as: :phash2 - - defoverridable hash_slot: 2 - end - end -end diff --git a/lib/nebulex/adapter/entry.ex b/lib/nebulex/adapter/kv.ex similarity index 98% rename from lib/nebulex/adapter/entry.ex rename to lib/nebulex/adapter/kv.ex index 805606cc..4ac6c1f4 100644 --- a/lib/nebulex/adapter/entry.ex +++ b/lib/nebulex/adapter/kv.ex @@ -1,9 +1,9 @@ -defmodule Nebulex.Adapter.Entry do +defmodule Nebulex.Adapter.KV do @moduledoc """ - Specifies the entry API required from adapters. + Specifies the Key/Value API required from adapters. This behaviour specifies all read/write key-based functions, - the ones applied to a specific cache entry. + the ones applied to a specific cache key. """ @typedoc "Proxy type to the adapter meta" diff --git a/lib/nebulex/adapters/local.ex b/lib/nebulex/adapters/local.ex deleted file mode 100644 index 91bd1f29..00000000 --- a/lib/nebulex/adapters/local.ex +++ /dev/null @@ -1,998 +0,0 @@ -defmodule Nebulex.Adapters.Local do - @moduledoc ~S""" - Adapter module for Local Generational Cache; inspired by - [epocxy](https://github.com/duomark/epocxy). - - Generational caching using an ets table (or multiple ones when used with - `:shards`) for each generation of cached data. Accesses hit the newer - generation first, and migrate from the older generation to the newer - generation when retrieved from the stale table. When a new generation - is started, the oldest one is deleted. This is a form of mass garbage - collection which avoids using timers and expiration of individual - cached elements. - - This implementation of generation cache uses only two generations - (which is more than enough) also referred like the `newer` and - the `older`. - - ## Overall features - - * Configurable backend (`ets` or `:shards`). - * Expiration – A status based on TTL (Time To Live) option. To maintain - cache performance, expired entries may not be immediately removed or - evicted, they are expired or evicted on-demand, when the key is read. - * Eviction – [Generational Garbage Collection][gc]. - * Sharding – For intensive workloads, the Cache may also be partitioned - (by using `:shards` backend and specifying the `:partitions` option). - * Support for transactions via Erlang global name registration facility. - * Support for stats. - - [gc]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.Generation.html - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:backend` - Defines the backend or storage to be used for the adapter. - Supported backends are: `:ets` and `:shards`. Defaults to `:ets`. - - * `:read_concurrency` - (boolean) Since this adapter uses ETS tables - internally, this option is used when a new table is created; see - `:ets.new/2`. Defaults to `true`. - - * `:write_concurrency` - (boolean) Since this adapter uses ETS tables - internally, this option is used when a new table is created; see - `:ets.new/2`. Defaults to `true`. - - * `:compressed` - (boolean) Since this adapter uses ETS tables internally, - this option is used when a new table is created; see `:ets.new/2`. - Defaults to `false`. - - * `:backend_type` - This option defines the type of ETS to be used - (Defaults to `:set`). However, it is highly recommended to keep the - default value, since there are commands not supported (unexpected - exception may be raised) for types like `:bag` or `: duplicate_bag`. - Please see the [ETS](https://erlang.org/doc/man/ets.html) docs - for more information. - - * `:partitions` - If it is set, an integer > 0 is expected, otherwise, - it defaults to `System.schedulers_online()`. This option is only - available for `:shards` backend. - - * `:gc_interval` - If it is set, an integer > 0 is expected defining the - interval time in milliseconds to garbage collection to run, delete the - oldest generation and create a new one. If this option is not set, - garbage collection is never executed, so new generations must be - created explicitly, e.g.: `MyCache.new_generation(opts)`. - - * `:max_size` - If it is set, an integer > 0 is expected defining the - max number of cached entries (cache limit). If it is not set (`nil`), - the check to release memory is not performed (the default). - - * `:allocated_memory` - If it is set, an integer > 0 is expected defining - the max size in bytes allocated for a cache generation. When this option - is set and the configured value is reached, a new cache generation is - created so the oldest is deleted and force releasing memory space. - If it is not set (`nil`), the cleanup check to release memory is - not performed (the default). - - * `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in - milliseconds for triggering the next cleanup and memory check. This will - be the timeout to use when either the max size or max allocated memory - is reached. Defaults to `10_000` (10 seconds). - - * `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in - milliseconds for triggering the next cleanup and memory check. This is - the timeout used when the cache starts and there are few entries or the - consumed memory is near to `0`. Defaults to `600_000` (10 minutes). - - ## Usage - - `Nebulex.Cache` is the wrapper around the cache. We can define a - local cache as follows: - - defmodule MyApp.LocalCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local - end - - Where the configuration for the cache must be in your application - environment, usually defined in your `config/config.exs`: - - config :my_app, MyApp.LocalCache, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - allocated_memory: 2_000_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10) - - For intensive workloads, the Cache may also be partitioned using `:shards` - as cache backend (`backend: :shards`) and configuring the desired number of - partitions via the `:partitions` option. Defaults to - `System.schedulers_online()`. - - config :my_app, MyApp.LocalCache, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - allocated_memory: 2_000_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10), - backend: :shards, - partitions: System.schedulers_online() * 2 - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.LocalCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Eviction configuration - - This section is to understand a bit better how the different configuration - options work and have an idea what values to set; especially if it is the - first time using Nebulex. - - ### `:ttl` option - - The `:ttl` option that is used to set the expiration time for a key, it - doesn't work as eviction mechanism, since the local adapter implements a - generational cache, the options that control the eviction process are: - `:gc_interval`, `:gc_cleanup_min_timeout`, `:gc_cleanup_max_timeout`, - `:max_size` and `:allocated_memory`. The `:ttl` is evaluated on-demand - when a key is retrieved, and at that moment if it s expired, then remove - it from the cache, hence, it can not be used as eviction method, it is - more for keep the integrity and consistency in the cache. For this reason, - it is highly recommended to configure always the eviction options mentioned - before. - - ### Caveats when using `:ttl` option: - - * When using the `:ttl` option, ensure it is less than `:gc_interval`, - otherwise, there may be a situation where the key is evicted and the - `:ttl` hasn't happened yet (maybe because the garbage collector ran - before the key had been fetched). - * Assuming you have `:gc_interval` set to 2 hrs, then you put a new key - with `:ttl` set to 1 hr, and 1 minute later the GC runs, that key will - be moved to the older generation so it can be yet retrieved. On the other - hand, if the key is never fetched till the next GC cycle (causing moving - it to the newer generation), since the key is already in the oldest - generation it will be evicted from the cache so it won't be retrievable - anymore. - - ### Garbage collection or eviction options - - This adapter implements a generational cache, which means its main eviction - mechanism is pushing a new cache generation and remove the oldest one. In - this way, we ensure only the most frequently used keys are always available - in the newer generation and the the least frequently used are evicted when - the garbage collector runs, and the garbage collector is triggered upon - these conditions: - - * When the time interval defined by `:gc_interval` is completed. - This makes the garbage-collector process to run creating a new - generation and forcing to delete the oldest one. - * When the "cleanup" timeout expires, and then the limits `:max_size` - and `:allocated_memory` are checked, if one of those is reached, - then the garbage collector runs (a new generation is created and - the oldest one is deleted). The cleanup timeout is controlled by - `:gc_cleanup_min_timeout` and `:gc_cleanup_max_timeout`, it works - with an inverse linear backoff, which means the timeout is inverse - proportional to the memory growth; the bigger the cache size is, - the shorter the cleanup timeout will be. - - ### First-time configuration - - For configuring the cache with accurate and/or good values it is important - to know several things in advance, like for example the size of an entry - in average so we can calculate a good value for max size and/or allocated - memory, how intensive will be the load in terms of reads and writes, etc. - The problem is most of these aspects are unknown when it is a new app or - we are using the cache for the first time. Therefore, the following - recommendations will help you to configure the cache for the first time: - - * When configuring the `:gc_interval`, think about how that often the - least frequently used entries should be evicted, or what is the desired - retention period for the cached entries. For example, if `:gc_interval` - is set to 1 hr, it means you will keep in cache only those entries that - are retrieved periodically within a 2 hr period; `gc_interval * 2`, - being 2 the number of generations. Longer than that, the GC will - ensure is always evicted (the oldest generation is always deleted). - If it is the first time using Nebulex, perhaps you can start with - `gc_interval: :timer.hours(12)` (12 hrs), so the max retention - period for the keys will be 1 day; but ensure you also set either the - `:max_size` or `:allocated_memory`. - * It is highly recommended to set either `:max_size` or `:allocated_memory` - to ensure the oldest generation is deleted (least frequently used keys - are evicted) when one of these limits is reached and also to avoid - running out of memory. For example, for the `:allocated_memory` we can - set 25% of the total memory, and for the `:max_size` something between - `100_000` and `1_000_000`. - * For `:gc_cleanup_min_timeout` we can set `10_000`, which means when the - cache is reaching the size or memory limit, the polling period for the - cleanup process will be 10 seconds. And for `:gc_cleanup_max_timeout` - we can set `600_000`, which means when the cache is almost empty the - polling period will be close to 10 minutes. - - ## Stats - - This adapter does support stats by using the default implementation - provided by `Nebulex.Adapter.Stats`. The adapter also uses the - `Nebulex.Telemetry.StatsHandler` to aggregate the stats and keep - them updated. Therefore, it requires the Telemetry events are emitted - by the adapter (the `:telemetry` option should not be set to `false` - so the Telemetry events can be dispatched), otherwise, stats won't - work properly. - - ## Queryable API - - Since this adapter is implemented on top of ETS tables, the query must be - a valid match spec given by `:ets.match_spec()`. However, there are some - predefined and/or shorthand queries you can use. See the section - "Predefined queries" below for for information. - - Internally, an entry is represented by the tuple - `{:entry, key, value, touched, ttl}`, which means the match pattern within - the `:ets.match_spec()` must be something like: - `{:entry, :"$1", :"$2", :"$3", :"$4"}`. - In order to make query building easier, you can use `Ex2ms` library. - - ### Predefined queries - - * `nil` - All keys are returned. - - * `:unexpired` - All unexpired keys/entries. - - * `:expired` - All expired keys/entries. - - * `{:in, [term]}` - Only the keys in the given key list (`[term]`) - are returned. This predefined query is only supported for - `c:Nebulex.Cache.delete_all/2`. This is the recommended - way of doing bulk delete of keys. - - ## Examples - - # built-in queries - MyCache.all() - MyCache.all(:unexpired) - MyCache.all(:expired) - MyCache.all({:in, ["foo", "bar"]}) - - # using a custom match spec (all values > 10) - spec = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] - MyCache.all(spec) - - # using Ex2ms - import Ex2ms - - spec = - fun do - {_, key, value, _, _} when value > 10 -> {key, value} - end - - MyCache.all(spec) - - The `:return` option applies only for built-in queries, such as: - `nil | :unexpired | :expired`, if you are using a custom `:ets.match_spec()`, - the return value depends on it. - - The same applies to the `stream` function. - - ## Extended API (convenience functions) - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Creating new generations: - - MyCache.new_generation() - MyCache.new_generation(reset_timer: false) - - Retrieving the current generations: - - MyCache.generations() - - Retrieving the newer generation: - - MyCache.newer_generation() - - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - # Inherit default stats implementation - use Nebulex.Adapter.Stats - - import Nebulex.Adapter - import Nebulex.Helpers - import Record - - alias Nebulex.Adapter.Stats - alias Nebulex.Adapters.Local.{Backend, Generation, Metadata} - alias Nebulex.{Entry, Time} - - # Cache Entry - defrecord(:entry, - key: nil, - value: nil, - touched: nil, - exp: nil - ) - - # Inline common instructions - @compile {:inline, list_gen: 1, newer_gen: 1, fetch_entry: 3, pop_entry: 3} - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function for creating new generations. - """ - def new_generation(opts \\ []) do - Generation.new(get_dynamic_cache(), opts) - end - - @doc """ - A convenience function for reset the GC timer. - """ - def reset_generation_timer do - Generation.reset_timer(get_dynamic_cache()) - end - - @doc """ - A convenience function for retrieving the current generations. - """ - def generations do - Generation.list(get_dynamic_cache()) - end - - @doc """ - A convenience function for retrieving the newer generation. - """ - def newer_generation do - Generation.newer(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Validate options - opts = __MODULE__.Options.validate!(opts) - - # Required options - cache = Keyword.fetch!(opts, :cache) - telemetry = Keyword.fetch!(opts, :telemetry) - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - - # Init internal metadata table - meta_tab = opts[:meta_tab] || Metadata.init() - - # Init stats_counter - stats_counter = Stats.init(opts) - - # Resolve the backend to be used - backend = Keyword.fetch!(opts, :backend) - - # Internal option for max nested match specs based on number of keys - purge_chunk_size = Keyword.fetch!(opts, :purge_chunk_size) - - # Build adapter metadata - adapter_meta = %{ - cache: cache, - name: opts[:name] || cache, - telemetry: telemetry, - telemetry_prefix: telemetry_prefix, - meta_tab: meta_tab, - stats_counter: stats_counter, - backend: backend, - purge_chunk_size: purge_chunk_size, - started_at: DateTime.utc_now() - } - - # Build adapter child_spec - child_spec = Backend.child_spec(backend, [adapter_meta: adapter_meta] ++ opts) - - {:ok, child_spec, adapter_meta} - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan fetch(adapter_meta, key, _opts) do - adapter_meta.meta_tab - |> list_gen() - |> do_fetch(key, adapter_meta) - |> return(:value) - end - - defp do_fetch([newer], key, adapter_meta) do - fetch_entry(newer, key, adapter_meta) - end - - defp do_fetch([newer, older], key, adapter_meta) do - with {:error, _} <- fetch_entry(newer, key, adapter_meta), - {:ok, cached} <- pop_entry(older, key, adapter_meta) do - true = adapter_meta.backend.insert(newer, cached) - - {:ok, cached} - end - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - adapter_meta = %{adapter_meta | telemetry: Map.get(adapter_meta, :in_span?, false)} - - keys - |> Enum.reduce(%{}, fn key, acc -> - case fetch(adapter_meta, key, opts) do - {:ok, val} -> Map.put(acc, key, val) - {:error, _} -> acc - end - end) - |> wrap_ok() - end - - @impl true - defspan put(adapter_meta, key, value, ttl, on_write, _opts) do - now = Time.now() - entry = entry(key: key, value: value, touched: now, exp: exp(now, ttl)) - - wrap_ok do_put(on_write, adapter_meta.meta_tab, adapter_meta.backend, entry) - end - - defp do_put(:put, meta_tab, backend, entry) do - put_entries(meta_tab, backend, entry) - end - - defp do_put(:put_new, meta_tab, backend, entry) do - put_new_entries(meta_tab, backend, entry) - end - - defp do_put(:replace, meta_tab, backend, entry(key: key, value: value)) do - update_entry(meta_tab, backend, key, [{3, value}]) - end - - @impl true - defspan put_all(adapter_meta, entries, ttl, on_write, _opts) do - now = Time.now() - exp = exp(now, ttl) - - entries = - for {key, value} <- entries do - entry(key: key, value: value, touched: now, exp: exp) - end - - do_put_all( - on_write, - adapter_meta.meta_tab, - adapter_meta.backend, - adapter_meta.purge_chunk_size, - entries - ) - |> wrap_ok() - end - - defp do_put_all(:put, meta_tab, backend, chunk_size, entries) do - put_entries(meta_tab, backend, entries, chunk_size) - end - - defp do_put_all(:put_new, meta_tab, backend, chunk_size, entries) do - put_new_entries(meta_tab, backend, entries, chunk_size) - end - - @impl true - defspan delete(adapter_meta, key, _opts) do - adapter_meta.meta_tab - |> list_gen() - |> Enum.each(&adapter_meta.backend.delete(&1, key)) - end - - @impl true - defspan take(adapter_meta, key, _opts) do - adapter_meta.meta_tab - |> list_gen() - |> Enum.reduce_while(nil, fn gen, _acc -> - case pop_entry(gen, key, adapter_meta) do - {:ok, res} -> {:halt, return({:ok, res}, :value)} - error -> {:cont, error} - end - end) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, ttl, default, _opts) do - # Current time - now = Time.now() - - # Get needed metadata - meta_tab = adapter_meta.meta_tab - backend = adapter_meta.backend - - # Verify if the key has expired - _ = - meta_tab - |> list_gen() - |> do_fetch(key, adapter_meta) - - # Run the counter operation - meta_tab - |> newer_gen() - |> backend.update_counter( - key, - {3, amount}, - entry(key: key, value: default, touched: now, exp: exp(now, ttl)) - ) - |> wrap_ok() - end - - @impl true - def has_key?(adapter_meta, key, _opts) do - case fetch(adapter_meta, key, []) do - {:ok, _} -> {:ok, true} - {:error, _} -> {:ok, false} - end - end - - @impl true - defspan ttl(adapter_meta, key, _opts) do - with {:ok, res} <- adapter_meta.meta_tab |> list_gen() |> do_fetch(key, adapter_meta) do - {:ok, entry_ttl(res)} - end - end - - defp entry_ttl(entry(exp: :infinity)), do: :infinity - - defp entry_ttl(entry(exp: exp)) do - exp - Time.now() - end - - defp entry_ttl(entries) when is_list(entries) do - Enum.map(entries, &entry_ttl/1) - end - - @impl true - defspan expire(adapter_meta, key, ttl, _opts) do - now = Time.now() - - adapter_meta.meta_tab - |> update_entry(adapter_meta.backend, key, [{4, now}, {5, exp(now, ttl)}]) - |> wrap_ok() - end - - @impl true - defspan touch(adapter_meta, key, _opts) do - adapter_meta.meta_tab - |> update_entry(adapter_meta.backend, key, [{4, Time.now()}]) - |> wrap_ok() - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{meta_tab: meta_tab, backend: backend}, :count_all, nil, _opts) do - meta_tab - |> list_gen() - |> Enum.reduce(0, fn gen, acc -> - gen - |> backend.info(:size) - |> Kernel.+(acc) - end) - |> wrap_ok() - end - - defp do_execute(%{meta_tab: meta_tab} = adapter_meta, :delete_all, nil, _opts) do - with {:ok, count_all} <- do_execute(adapter_meta, :count_all, nil, []) do - :ok = Generation.delete_all(meta_tab) - {:ok, count_all} - end - end - - defp do_execute(%{meta_tab: meta_tab} = adapter_meta, :delete_all, {:in, keys}, _opts) - when is_list(keys) do - meta_tab - |> list_gen() - |> Enum.reduce(0, fn gen, acc -> - do_delete_all(adapter_meta.backend, gen, keys, adapter_meta.purge_chunk_size) + acc - end) - end - - defp do_execute(%{meta_tab: meta_tab, backend: backend}, operation, query, opts) do - with {:ok, query} <- validate_match_spec(query, opts) do - query = maybe_match_spec_return_true(query, operation) - - {reducer, acc_in} = - case operation do - :all -> {&(backend.select(&1, query) ++ &2), []} - :count_all -> {&(backend.select_count(&1, query) + &2), 0} - :delete_all -> {&(backend.select_delete(&1, query) + &2), 0} - end - - meta_tab - |> list_gen() - |> Enum.reduce(acc_in, reducer) - |> wrap_ok() - end - end - - @impl true - defspan stream(adapter_meta, query, opts) do - with {:ok, query} <- validate_match_spec(query, opts) do - adapter_meta - |> do_stream(query, Keyword.get(opts, :page_size, 20)) - |> wrap_ok() - end - end - - defp do_stream(%{meta_tab: meta_tab, backend: backend}, match_spec, page_size) do - Stream.resource( - fn -> - [newer | _] = generations = list_gen(meta_tab) - result = backend.select(newer, match_spec, page_size) - {result, generations} - end, - fn - {:"$end_of_table", [_gen]} -> - {:halt, []} - - {:"$end_of_table", [_gen | generations]} -> - result = - generations - |> hd() - |> backend.select(match_spec, page_size) - - {[], {result, generations}} - - {{elements, cont}, [_ | _] = generations} -> - {elements, {backend.select(cont), generations}} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, opts, fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with {:ok, %Nebulex.Stats{} = stats} <- super(adapter_meta) do - {:ok, %{stats | metadata: Map.put(stats.metadata, :started_at, adapter_meta.started_at)}} - end - end - - ## Helpers - - defp exp(_now, :infinity), do: :infinity - defp exp(now, ttl), do: now + ttl - - defp list_gen(meta_tab) do - Metadata.fetch!(meta_tab, :generations) - end - - defp newer_gen(meta_tab) do - meta_tab - |> Metadata.fetch!(:generations) - |> hd() - end - - defmacrop backend_call(adapter_meta, fun, tab, key) do - quote do - case unquote(adapter_meta).backend.unquote(fun)(unquote(tab), unquote(key)) do - [] -> - wrap_error Nebulex.KeyError, key: unquote(key), cache: unquote(adapter_meta).name - - [entry(exp: :infinity) = entry] -> - {:ok, entry} - - [entry() = entry] -> - validate_exp(entry, unquote(tab), unquote(adapter_meta)) - - entries when is_list(entries) -> - now = Time.now() - - {:ok, for(entry(touched: touched, exp: exp) = e <- entries, now < exp, do: e)} - end - end - end - - defp validate_exp(entry(key: key, exp: exp) = entry, tab, adapter_meta) do - if Time.now() >= exp do - true = adapter_meta.backend.delete(tab, key) - - wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.name, reason: :expired - else - {:ok, entry} - end - end - - defp fetch_entry(tab, key, adapter_meta) do - backend_call(adapter_meta, :lookup, tab, key) - end - - defp pop_entry(tab, key, adapter_meta) do - backend_call(adapter_meta, :take, tab, key) - end - - defp put_entries(meta_tab, backend, entries, chunk_size \\ 0) - - defp put_entries( - meta_tab, - backend, - entry(key: key, value: val, touched: touched, exp: exp) = entry, - _chunk_size - ) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert(newer_gen, entry) - - [newer_gen, older_gen] -> - changes = [{3, val}, {4, touched}, {5, exp}] - - with false <- backend.update_element(newer_gen, key, changes) do - true = backend.delete(older_gen, key) - - backend.insert(newer_gen, entry) - end - end - end - - defp put_entries(meta_tab, backend, entries, chunk_size) when is_list(entries) do - do_put_entries(meta_tab, backend, entries, fn older_gen -> - keys = Enum.map(entries, fn entry(key: key) -> key end) - - do_delete_all(backend, older_gen, keys, chunk_size) - end) - end - - defp do_put_entries(meta_tab, backend, entry_or_entries, purge_fun) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert(newer_gen, entry_or_entries) - - [newer_gen, older_gen] -> - _ = purge_fun.(older_gen) - - backend.insert(newer_gen, entry_or_entries) - end - end - - defp put_new_entries(meta_tab, backend, entries, chunk_size \\ 0) - - defp put_new_entries(meta_tab, backend, entry(key: key) = entry, _chunk_size) do - do_put_new_entries(meta_tab, backend, entry, fn newer_gen, older_gen -> - with true <- backend.insert_new(older_gen, entry) do - true = backend.delete(older_gen, key) - - backend.insert_new(newer_gen, entry) - end - end) - end - - defp put_new_entries(meta_tab, backend, entries, chunk_size) when is_list(entries) do - do_put_new_entries(meta_tab, backend, entries, fn newer_gen, older_gen -> - with true <- backend.insert_new(older_gen, entries) do - keys = Enum.map(entries, fn entry(key: key) -> key end) - - _ = do_delete_all(backend, older_gen, keys, chunk_size) - - backend.insert_new(newer_gen, entries) - end - end) - end - - defp do_put_new_entries(meta_tab, backend, entry_or_entries, purge_fun) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert_new(newer_gen, entry_or_entries) - - [newer_gen, older_gen] -> - purge_fun.(newer_gen, older_gen) - end - end - - defp update_entry(meta_tab, backend, key, updates) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.update_element(newer_gen, key, updates) - - [newer_gen, older_gen] -> - with false <- backend.update_element(newer_gen, key, updates), - [entry() = entry] <- backend.take(older_gen, key) do - entry = - Enum.reduce(updates, entry, fn - {3, value}, acc -> entry(acc, value: value) - {4, value}, acc -> entry(acc, touched: value) - {5, value}, acc -> entry(acc, exp: value) - end) - - backend.insert(newer_gen, entry) - else - [] -> false - other -> other - end - end - end - - defp do_delete_all(backend, tab, keys, chunk_size) do - do_delete_all(backend, tab, keys, chunk_size, 0) - end - - defp do_delete_all(backend, tab, [key], _chunk_size, deleted) do - true = backend.delete(tab, key) - - deleted + 1 - end - - defp do_delete_all(backend, tab, [k1, k2 | keys], chunk_size, deleted) do - k1 = if is_tuple(k1), do: {k1}, else: k1 - k2 = if is_tuple(k2), do: {k2}, else: k2 - - do_delete_all( - backend, - tab, - keys, - chunk_size, - deleted, - 2, - {:orelse, {:==, :"$1", k1}, {:==, :"$1", k2}} - ) - end - - defp do_delete_all(backend, tab, [], _chunk_size, deleted, _count, acc) do - backend.select_delete(tab, delete_all_match_spec(acc)) + deleted - end - - defp do_delete_all(backend, tab, keys, chunk_size, deleted, count, acc) - when count >= chunk_size do - deleted = backend.select_delete(tab, delete_all_match_spec(acc)) + deleted - - do_delete_all(backend, tab, keys, chunk_size, deleted) - end - - defp do_delete_all(backend, tab, [k | keys], chunk_size, deleted, count, acc) do - k = if is_tuple(k), do: {k}, else: k - - do_delete_all( - backend, - tab, - keys, - chunk_size, - deleted, - count + 1, - {:orelse, acc, {:==, :"$1", k}} - ) - end - - defp return({:ok, entry(value: value)}, :value) do - {:ok, value} - end - - defp return({:ok, entries}, :value) when is_list(entries) do - {:ok, for(entry(value: value) <- entries, do: value)} - end - - defp return(other, _field) do - other - end - - defp validate_match_spec(spec, opts) when spec in [nil, :unexpired, :expired] do - [ - { - entry(key: :"$1", value: :"$2", touched: :"$3", exp: :"$4"), - if(spec = comp_match_spec(spec), do: [spec], else: []), - ret_match_spec(opts) - } - ] - |> wrap_ok() - end - - defp validate_match_spec(spec, _opts) do - case :ets.test_ms(test_ms(), spec) do - {:ok, _result} -> - {:ok, spec} - - {:error, _result} -> - msg = """ - expected query to be one of: - - - `nil` - match all entries - - `:unexpired` - match only unexpired entries - - `:expired` - match only expired entries - - `{:in, list_of_keys}` - special form only available for delete_all - - `:ets.match_spec()` - ETS match spec - - but got: - - #{inspect(spec, pretty: true)} - """ - - wrap_error Nebulex.QueryError, message: msg, query: spec - end - end - - defp comp_match_spec(nil), - do: nil - - defp comp_match_spec(:unexpired), - do: {:orelse, {:==, :"$4", :infinity}, {:<, Time.now(), :"$4"}} - - defp comp_match_spec(:expired), - do: {:not, comp_match_spec(:unexpired)} - - defp ret_match_spec(opts) do - case Keyword.get(opts, :return, :key) do - :key -> [:"$1"] - :value -> [:"$2"] - {:key, :value} -> [{{:"$1", :"$2"}}] - :entry -> [%Entry{key: :"$1", value: :"$2", touched: :"$3", exp: :"$4"}] - end - end - - defp maybe_match_spec_return_true([{pattern, conds, _ret}], operation) - when operation in [:delete_all, :count_all] do - [{pattern, conds, [true]}] - end - - defp maybe_match_spec_return_true(match_spec, _operation) do - match_spec - end - - defp delete_all_match_spec(conds) do - [ - { - entry(key: :"$1", value: :"$2", touched: :"$3", exp: :"$4"), - [conds], - [true] - } - ] - end - - defp test_ms, do: entry(key: 1, value: 1, touched: Time.now(), exp: 1000) -end diff --git a/lib/nebulex/adapters/local/backend.ex b/lib/nebulex/adapters/local/backend.ex deleted file mode 100644 index 708ab405..00000000 --- a/lib/nebulex/adapters/local/backend.ex +++ /dev/null @@ -1,83 +0,0 @@ -defmodule Nebulex.Adapters.Local.Backend do - @moduledoc false - - @doc false - defmacro __using__(_opts) do - quote do - alias Nebulex.Adapters.Local.Generation - - defp generation_spec(opts) do - %{ - id: Module.concat([__MODULE__, GC]), - start: {Generation, :start_link, [opts]} - } - end - - defp sup_spec(children) do - %{ - id: Module.concat([__MODULE__, Supervisor]), - start: {Supervisor, :start_link, [children, [strategy: :one_for_all]]}, - type: :supervisor - } - end - - defp parse_opts(opts, extra \\ []) do - type = Keyword.fetch!(opts, :backend_type) - - compressed = - case Keyword.fetch!(opts, :compressed) do - true -> [:compressed] - false -> [] - end - - backend_opts = - [ - type, - :public, - {:keypos, 2}, - {:read_concurrency, Keyword.fetch!(opts, :read_concurrency)}, - {:write_concurrency, Keyword.fetch!(opts, :write_concurrency)}, - compressed, - extra - ] - |> List.flatten() - |> Enum.filter(&(&1 != :named_table)) - - Keyword.put(opts, :backend_opts, backend_opts) - end - end - end - - @doc """ - Helper function for returning the child spec for the given backend. - """ - def child_spec(backend, opts) do - backend - |> get_mod() - |> apply(:child_spec, [opts]) - end - - @doc """ - Helper function for creating a new table for the given backend. - """ - def new(backend, meta_tab, tab_opts) do - backend - |> get_mod() - |> apply(:new, [meta_tab, tab_opts]) - end - - @doc """ - Helper function for deleting a table for the given backend. - """ - def delete(backend, meta_tab, gen_tab) do - backend - |> get_mod() - |> apply(:delete, [meta_tab, gen_tab]) - end - - defp get_mod(:ets), do: Nebulex.Adapters.Local.Backend.ETS - - if Code.ensure_loaded?(:shards) do - defp get_mod(:shards), do: Nebulex.Adapters.Local.Backend.Shards - end -end diff --git a/lib/nebulex/adapters/local/backend/ets.ex b/lib/nebulex/adapters/local/backend/ets.ex deleted file mode 100644 index d552447d..00000000 --- a/lib/nebulex/adapters/local/backend/ets.ex +++ /dev/null @@ -1,25 +0,0 @@ -defmodule Nebulex.Adapters.Local.Backend.ETS do - @moduledoc false - use Nebulex.Adapters.Local.Backend - - ## API - - @doc false - def child_spec(opts) do - opts - |> parse_opts() - |> generation_spec() - |> List.wrap() - |> sup_spec() - end - - @doc false - def new(_meta_tab, tab_opts) do - :ets.new(__MODULE__, tab_opts) - end - - @doc false - def delete(_meta_tab, gen_tab) do - :ets.delete(gen_tab) - end -end diff --git a/lib/nebulex/adapters/local/backend/shards.ex b/lib/nebulex/adapters/local/backend/shards.ex deleted file mode 100644 index a0d097c2..00000000 --- a/lib/nebulex/adapters/local/backend/shards.ex +++ /dev/null @@ -1,82 +0,0 @@ -if Code.ensure_loaded?(:shards) do - defmodule Nebulex.Adapters.Local.Backend.Shards do - @moduledoc false - - defmodule __MODULE__.DynamicSupervisor do - @moduledoc false - use DynamicSupervisor - - alias Nebulex.Adapters.Local.Metadata - - ## API - - @doc false - def start_link(tab) do - DynamicSupervisor.start_link(__MODULE__, tab) - end - - ## DynamicSupervisor Callbacks - - @impl true - def init(meta_tab) do - :ok = Metadata.put(meta_tab, :shards_sup, self()) - - DynamicSupervisor.init(strategy: :one_for_one) - end - end - - use Nebulex.Adapters.Local.Backend - - alias Nebulex.Adapters.Local.Metadata - - ## API - - @doc false - def child_spec(opts) do - partitions = Keyword.get_lazy(opts, :partitions, &System.schedulers_online/0) - - meta_tab = - opts - |> Keyword.fetch!(:adapter_meta) - |> Map.fetch!(:meta_tab) - - sup_spec([ - {__MODULE__.DynamicSupervisor, meta_tab}, - generation_spec(parse_opts(opts, partitions: partitions)) - ]) - end - - @doc false - def new(meta_tab, tab_opts) do - {:ok, _pid, tab} = - meta_tab - |> Metadata.get(:shards_sup) - |> DynamicSupervisor.start_child(table_spec(tab_opts)) - - tab - end - - @doc false - def delete(meta_tab, gen_tab) do - meta_tab - |> Metadata.get(:shards_sup) - |> DynamicSupervisor.terminate_child(:shards_meta.tab_pid(gen_tab)) - end - - @doc false - def start_table(opts) do - tab = :shards.new(__MODULE__, opts) - pid = :shards_meta.tab_pid(tab) - - {:ok, pid, tab} - end - - defp table_spec(opts) do - %{ - id: __MODULE__, - start: {__MODULE__, :start_table, [opts]}, - type: :supervisor - } - end - end -end diff --git a/lib/nebulex/adapters/local/generation.ex b/lib/nebulex/adapters/local/generation.ex deleted file mode 100644 index eca46126..00000000 --- a/lib/nebulex/adapters/local/generation.ex +++ /dev/null @@ -1,568 +0,0 @@ -defmodule Nebulex.Adapters.Local.Generation do - @moduledoc """ - Generational garbage collection process. - - The generational garbage collector manage the heap as several sub-heaps, - known as generations, based on age of the objects. An object is allocated - in the youngest generation, sometimes called the nursery, and is promoted - to an older generation if its lifetime exceeds the threshold of its current - generation (defined by option `:gc_interval`). Every time the GC runs - (triggered by `:gc_interval` timeout), a new cache generation is created - and the oldest one is deleted. - - The only way to create new generations is through this module (this server - is the metadata owner) calling `new/2` function. When a Cache is created, - a generational garbage collector is attached to it automatically, - therefore, this server MUST NOT be started directly. - - ## Options - - These options are configured through the `Nebulex.Adapters.Local` adapter: - - * `:gc_interval` - If it is set, an integer > 0 is expected defining the - interval time in milliseconds to garbage collection to run, delete the - oldest generation and create a new one. If this option is not set, - garbage collection is never executed, so new generations must be - created explicitly, e.g.: `MyCache.new_generation(opts)`. - - * `:max_size` - If it is set, an integer > 0 is expected defining the - max number of cached entries (cache limit). If it is not set (`nil`), - the check to release memory is not performed (the default). - - * `:allocated_memory` - If it is set, an integer > 0 is expected defining - the max size in bytes for the cache storage. When this option is set - and the configured value is reached, a new cache generation is created - so the oldest is deleted and force releasing memory space. If it is not - set (`nil`), the cleanup check to release memory is not performed - (the default). - - * `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in - milliseconds for triggering the next cleanup and memory check. This will - be the timeout to use when either the max size or max allocated memory - is reached. Defaults to `10_000` (10 seconds). - - * `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in - milliseconds for triggering the next cleanup and memory check. This is - the timeout used when the cache starts and there are few entries or the - consumed memory is near to `0`. Defaults to `600_000` (10 minutes). - - """ - - # State - defstruct [ - :cache, - :name, - :telemetry, - :telemetry_prefix, - :meta_tab, - :backend, - :backend_opts, - :stats_counter, - :gc_interval, - :gc_heartbeat_ref, - :max_size, - :allocated_memory, - :gc_cleanup_min_timeout, - :gc_cleanup_max_timeout, - :gc_cleanup_ref - ] - - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.Adapter.Stats - alias Nebulex.Adapters.Local.{Backend, Metadata, Options} - alias Nebulex.Telemetry - alias Nebulex.Telemetry.StatsHandler - - @type t :: %__MODULE__{} - @type server_ref :: pid | atom | :ets.tid() - @type opts :: Nebulex.Cache.opts() - - ## API - - @doc """ - Starts the garbage collector for the built-in local cache adapter. - """ - @spec start_link(opts) :: GenServer.on_start() - def start_link(opts) do - GenServer.start_link(__MODULE__, opts) - end - - @doc """ - Creates a new cache generation. Once the max number of generations - is reached, when a new generation is created, the oldest one is - deleted. - - ## Options - - * `:reset_timer` - Indicates if the poll frequency time-out should - be reset or not (default: true). - - ## Example - - Nebulex.Adapters.Local.Generation.new(MyCache) - - Nebulex.Adapters.Local.Generation.new(MyCache, reset_timer: false) - - """ - @spec new(server_ref, opts) :: [atom] - def new(server_ref, opts \\ []) do - # Validate options - opts = Options.validate!(opts) - - do_call(server_ref, {:new_generation, Keyword.fetch!(opts, :reset_timer)}) - end - - @doc """ - Removes or flushes all entries from the cache (including all its generations). - - ## Example - - Nebulex.Adapters.Local.Generation.delete_all(MyCache) - - """ - @spec delete_all(server_ref) :: :ok - def delete_all(server_ref) do - do_call(server_ref, :delete_all) - end - - @doc """ - Reallocates the block of memory that was previously allocated for the given - `server_ref` with the new `size`. In other words, reallocates the max memory - size for a cache generation. - - ## Example - - Nebulex.Adapters.Local.Generation.realloc(MyCache, 1_000_000) - - """ - @spec realloc(server_ref, pos_integer) :: :ok - def realloc(server_ref, size) do - do_call(server_ref, {:realloc, size}) - end - - @doc """ - Returns the memory info in a tuple form `{used_mem, total_mem}`. - - ## Example - - Nebulex.Adapters.Local.Generation.memory_info(MyCache) - - """ - @spec memory_info(server_ref) :: {used_mem :: non_neg_integer, total_mem :: non_neg_integer} - def memory_info(server_ref) do - do_call(server_ref, :memory_info) - end - - @doc """ - Resets the timer for pushing new cache generations. - - ## Example - - Nebulex.Adapters.Local.Generation.reset_timer(MyCache) - - """ - def reset_timer(server_ref) do - server_ref - |> server() - |> GenServer.cast(:reset_timer) - end - - @doc """ - Returns the list of the generations in the form `[newer, older]`. - - ## Example - - Nebulex.Adapters.Local.Generation.list(MyCache) - - """ - @spec list(server_ref) :: [:ets.tid()] - def list(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.get(:generations, []) - end - - @doc """ - Returns the newer generation. - - ## Example - - Nebulex.Adapters.Local.Generation.newer(MyCache) - - """ - @spec newer(server_ref) :: :ets.tid() - def newer(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.get(:generations, []) - |> hd() - end - - @doc """ - Returns the PID of the GC server for the given `server_ref`. - - ## Example - - Nebulex.Adapters.Local.Generation.server(MyCache) - - """ - @spec server(server_ref) :: pid - def server(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.fetch!(:gc_pid) - end - - @doc """ - A convenience function for retrieving the state. - """ - @spec get_state(server_ref) :: t - def get_state(server_ref) do - server_ref - |> server() - |> GenServer.call(:get_state) - end - - defp do_call(tab, message) do - tab - |> server() - |> GenServer.call(message) - end - - defp get_meta_tab(server_ref) when is_atom(server_ref) or is_pid(server_ref) do - unwrap_or_raise Adapter.with_meta(server_ref, & &1.meta_tab) - end - - defp get_meta_tab(server_ref), do: server_ref - - ## GenServer Callbacks - - @impl true - def init(opts) do - # Trap exit signals to run cleanup process - _ = Process.flag(:trap_exit, true) - - # Initial state - state = struct(__MODULE__, parse_opts(opts)) - - # Init cleanup timer - cleanup_ref = - if state.max_size || state.allocated_memory, - do: start_timer(state.gc_cleanup_max_timeout, nil, :cleanup) - - # Timer ref - {:ok, ref} = - if state.gc_interval, - do: {new_gen(state), start_timer(state.gc_interval)}, - else: {new_gen(state), nil} - - # Update state - state = %{state | gc_cleanup_ref: cleanup_ref, gc_heartbeat_ref: ref} - - {:ok, state, {:continue, :attach_stats_handler}} - end - - defp parse_opts(opts) do - # Get adapter metadata - adapter_meta = Keyword.fetch!(opts, :adapter_meta) - - # Add the GC PID to the meta table - meta_tab = Map.fetch!(adapter_meta, :meta_tab) - :ok = Metadata.put(meta_tab, :gc_pid, self()) - - gc_opts = - opts - |> Keyword.take([ - :gc_interval, - :max_size, - :allocated_memory, - :gc_cleanup_min_timeout, - :gc_cleanup_max_timeout, - :reset_timer - ]) - |> Options.validate!() - - Map.merge(adapter_meta, %{ - backend_opts: Keyword.get(opts, :backend_opts, []), - gc_interval: Keyword.get(gc_opts, :gc_interval), - max_size: Keyword.get(gc_opts, :max_size), - allocated_memory: Keyword.get(gc_opts, :allocated_memory), - gc_cleanup_min_timeout: Keyword.get(gc_opts, :gc_cleanup_min_timeout), - gc_cleanup_max_timeout: Keyword.get(gc_opts, :gc_cleanup_max_timeout) - }) - end - - @impl true - def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: nil} = state) do - {:noreply, state} - end - - def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: stats_counter} = state) do - _ = - Telemetry.attach_many( - stats_counter, - [state.telemetry_prefix ++ [:command, :stop]], - &StatsHandler.handle_event/4, - stats_counter - ) - - {:noreply, state} - end - - @impl true - def terminate(_reason, state) do - if ref = state.stats_counter, do: Telemetry.detach(ref) - end - - @impl true - def handle_call(:delete_all, _from, %__MODULE__{} = state) do - :ok = new_gen(state) - - :ok = - state.meta_tab - |> list() - |> Enum.each(&state.backend.delete_all_objects(&1)) - - {:reply, :ok, %{state | gc_heartbeat_ref: maybe_reset_timer(true, state)}} - end - - def handle_call({:new_generation, reset_timer?}, _from, state) do - # Create new generation - :ok = new_gen(state) - - # Maybe reset heartbeat timer - heartbeat_ref = maybe_reset_timer(reset_timer?, state) - - {:reply, :ok, %{state | gc_heartbeat_ref: heartbeat_ref}} - end - - def handle_call( - :memory_info, - _from, - %__MODULE__{backend: backend, meta_tab: meta_tab, allocated_memory: allocated} = state - ) do - {:reply, {memory_info(backend, meta_tab), allocated}, state} - end - - def handle_call({:realloc, mem_size}, _from, state) do - {:reply, :ok, %{state | allocated_memory: mem_size}} - end - - def handle_call(:get_state, _from, state) do - {:reply, state, state} - end - - @impl true - def handle_cast(:reset_timer, state) do - {:noreply, %{state | gc_heartbeat_ref: maybe_reset_timer(true, state)}} - end - - @impl true - def handle_info( - :heartbeat, - %__MODULE__{ - gc_interval: gc_interval, - gc_heartbeat_ref: heartbeat_ref - } = state - ) do - # Create new generation - :ok = new_gen(state) - - # Reset heartbeat timer - heartbeat_ref = start_timer(gc_interval, heartbeat_ref) - - {:noreply, %{state | gc_heartbeat_ref: heartbeat_ref}} - end - - def handle_info(:cleanup, state) do - # Check size first, if the cleanup is done, skip checking the memory, - # otherwise, check the memory too. - {_, state} = - with {false, state} <- check_size(state) do - check_memory(state) - end - - {:noreply, state} - end - - defp check_size(%__MODULE__{max_size: max_size} = state) when not is_nil(max_size) do - maybe_cleanup(:size, state) - end - - defp check_size(state) do - {false, state} - end - - defp check_memory(%__MODULE__{allocated_memory: allocated} = state) when not is_nil(allocated) do - maybe_cleanup(:memory, state) - end - - defp check_memory(state) do - {false, state} - end - - defp maybe_cleanup( - info, - %__MODULE__{ - cache: cache, - name: name, - gc_cleanup_ref: cleanup_ref, - gc_cleanup_min_timeout: min_timeout, - gc_cleanup_max_timeout: max_timeout, - gc_interval: gc_interval, - gc_heartbeat_ref: heartbeat_ref - } = state - ) do - case cleanup_info(info, state) do - {size, max_size} when size >= max_size -> - # Create a new generation - :ok = new_gen(state) - - # Purge expired entries - _ = cache.delete_all(:expired, dynamic_cache: name) - - # Reset the heartbeat timer - heartbeat_ref = start_timer(gc_interval, heartbeat_ref) - - # Reset the cleanup timer - cleanup_ref = - info - |> cleanup_info(state) - |> elem(0) - |> reset_cleanup_timer(max_size, min_timeout, max_timeout, cleanup_ref) - - {true, %{state | gc_heartbeat_ref: heartbeat_ref, gc_cleanup_ref: cleanup_ref}} - - {size, max_size} -> - # Reset the cleanup timer - cleanup_ref = reset_cleanup_timer(size, max_size, min_timeout, max_timeout, cleanup_ref) - - {false, %{state | gc_cleanup_ref: cleanup_ref}} - end - end - - defp cleanup_info(:size, %__MODULE__{backend: mod, meta_tab: tab, max_size: max}) do - {size_info(mod, tab), max} - end - - defp cleanup_info(:memory, %__MODULE__{backend: mod, meta_tab: tab, allocated_memory: max}) do - {memory_info(mod, tab), max} - end - - ## Private Functions - - defp new_gen(%__MODULE__{ - meta_tab: meta_tab, - backend: backend, - backend_opts: backend_opts, - stats_counter: stats_counter - }) do - # Create new generation - gen_tab = Backend.new(backend, meta_tab, backend_opts) - - # Update generation list - case list(meta_tab) do - [newer, older] -> - # Since the older generation is deleted, update evictions count - :ok = Stats.incr(stats_counter, :evictions, backend.info(older, :size)) - - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab, newer]) - - # Process the older generation: - # - Delete previously stored deprecated generation - # - Flush the older generation - # - Deprecate it (mark it for deletion) - :ok = process_older_gen(meta_tab, backend, older) - - [newer] -> - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab, newer]) - - [] -> - # update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab]) - end - end - - # The older generation cannot be removed immediately because there may be - # ongoing operations using it, then it may cause race-condition errors. - # Hence, the idea is to keep it alive till a new generation is pushed, but - # flushing its data before so that we release memory space. By the time a new - # generation is pushed, then it is safe to delete it completely. - defp process_older_gen(meta_tab, backend, older) do - if deprecated = Metadata.get(meta_tab, :deprecated) do - # Delete deprecated generation if it does exist - _ = Backend.delete(backend, meta_tab, deprecated) - end - - # Flush older generation to release space so it can be marked for deletion - true = backend.delete_all_objects(older) - - # Keep alive older generation reference into the metadata - Metadata.put(meta_tab, :deprecated, older) - end - - defp start_timer(time, ref \\ nil, event \\ :heartbeat) - - defp start_timer(nil, _, _) do - nil - end - - defp start_timer(time, ref, event) do - _ = if ref, do: Process.cancel_timer(ref) - - Process.send_after(self(), event, time) - end - - defp maybe_reset_timer(_, %__MODULE__{gc_interval: nil} = state) do - state.gc_heartbeat_ref - end - - defp maybe_reset_timer(false, state) do - state.gc_heartbeat_ref - end - - defp maybe_reset_timer(true, %__MODULE__{} = state) do - start_timer(state.gc_interval, state.gc_heartbeat_ref) - end - - defp reset_cleanup_timer(size, max_size, min_timeout, max_timeout, cleanup_ref) do - size - |> linear_inverse_backoff(max_size, min_timeout, max_timeout) - |> start_timer(cleanup_ref, :cleanup) - end - - defp size_info(backend, meta_tab) do - meta_tab - |> list() - |> Enum.reduce(0, &(backend.info(&1, :size) + &2)) - end - - defp memory_info(backend, meta_tab) do - meta_tab - |> list() - |> Enum.reduce(0, fn gen, acc -> - gen - |> backend.info(:memory) - |> Kernel.*(:erlang.system_info(:wordsize)) - |> Kernel.+(acc) - end) - end - - defp linear_inverse_backoff(size, _max_size, _min_timeout, max_timeout) when size <= 0 do - max_timeout - end - - defp linear_inverse_backoff(size, max_size, min_timeout, _max_timeout) when size >= max_size do - min_timeout - end - - defp linear_inverse_backoff(size, max_size, min_timeout, max_timeout) do - round((min_timeout - max_timeout) / max_size * size + max_timeout) - end -end diff --git a/lib/nebulex/adapters/local/metadata.ex b/lib/nebulex/adapters/local/metadata.ex deleted file mode 100644 index be232bb7..00000000 --- a/lib/nebulex/adapters/local/metadata.ex +++ /dev/null @@ -1,28 +0,0 @@ -defmodule Nebulex.Adapters.Local.Metadata do - @moduledoc false - - @type tab :: :ets.tid() | atom - - @spec init :: tab - def init do - :ets.new(__MODULE__, [:public, read_concurrency: true]) - end - - @spec get(tab, term, term) :: term - def get(tab, key, default \\ nil) do - :ets.lookup_element(tab, key, 2) - rescue - ArgumentError -> default - end - - @spec fetch!(tab, term) :: term - def fetch!(tab, key) do - :ets.lookup_element(tab, key, 2) - end - - @spec put(tab, term, term) :: :ok - def put(tab, key, value) do - true = :ets.insert(tab, {key, value}) - :ok - end -end diff --git a/lib/nebulex/adapters/local/options.ex b/lib/nebulex/adapters/local/options.ex deleted file mode 100644 index 5dac0085..00000000 --- a/lib/nebulex/adapters/local/options.ex +++ /dev/null @@ -1,132 +0,0 @@ -defmodule Nebulex.Adapters.Local.Options do - @moduledoc """ - Option definitions for the local adapter. - """ - use Nebulex.Cache.Options - - definition = - [ - gc_interval: [ - required: false, - type: :pos_integer, - doc: """ - The interval time in milliseconds to garbage collection to run, - delete the oldest generation and create a new one. - """ - ], - max_size: [ - required: false, - type: :pos_integer, - doc: """ - The max number of cached entries (cache limit). - """ - ], - allocated_memory: [ - required: false, - type: :pos_integer, - doc: """ - The max size in bytes for the cache storage. - """ - ], - gc_cleanup_min_timeout: [ - required: false, - type: :pos_integer, - default: 10_000, - doc: """ - The min timeout in milliseconds for triggering the next cleanup - and memory check. - """ - ], - gc_cleanup_max_timeout: [ - required: false, - type: :pos_integer, - default: 600_000, - doc: """ - The max timeout in milliseconds for triggering the next cleanup - and memory check. - """ - ], - reset_timer: [ - required: false, - type: :boolean, - default: true, - doc: """ - Whether the GC timer should be reset or not. - """ - ], - backend: [ - required: false, - type: {:in, [:ets, :shards]}, - default: :ets, - doc: """ - The backend or storage to be used for the adapter. - Supported backends are: `:ets` and `:shards`. - """ - ], - read_concurrency: [ - required: false, - type: :boolean, - default: true, - doc: """ - Since this adapter uses ETS tables internally, this option is used when - a new table is created; see `:ets.new/2`. - """ - ], - write_concurrency: [ - required: false, - type: :boolean, - default: true, - doc: """ - Since this adapter uses ETS tables internally, this option is used when - a new table is created; see `:ets.new/2`. - """ - ], - compressed: [ - required: false, - type: :boolean, - default: false, - doc: """ - Since this adapter uses ETS tables internally, this option is used when - a new table is created; see `:ets.new/2`. - """ - ], - backend_type: [ - required: false, - type: {:in, [:set, :ordered_set, :bag, :duplicate_bag]}, - default: :set, - doc: """ - This option defines the type of ETS to be used internally when - a new table is created; see `:ets.new/2`. - """ - ], - partitions: [ - required: false, - type: :pos_integer, - doc: """ - This option is only available for `:shards` backend and defines - the number of partitions to use. - """ - ], - backend_opts: [ - required: false, - doc: """ - This option is built internally for creating the ETS tables - used by the local adapter underneath. - """ - ], - purge_chunk_size: [ - required: false, - type: :pos_integer, - default: 100, - doc: """ - This option is for limiting the max nested match specs based on number - of keys when purging the older cache generation. - """ - ] - ] ++ base_definition() - - @definition NimbleOptions.new!(definition) - - @doc false - def definition, do: @definition -end diff --git a/lib/nebulex/adapters/multilevel.ex b/lib/nebulex/adapters/multilevel.ex deleted file mode 100644 index 80faefee..00000000 --- a/lib/nebulex/adapters/multilevel.ex +++ /dev/null @@ -1,649 +0,0 @@ -defmodule Nebulex.Adapters.Multilevel do - @moduledoc ~S""" - Adapter module for Multi-level Cache. - - This is just a simple layer on top of local or distributed cache - implementations that enables to have a cache hierarchy by levels. - Multi-level caches generally operate by checking the fastest, - level 1 (L1) cache first; if it hits, the adapter proceeds at - high speed. If that first cache misses, the next fastest cache - (level 2, L2) is checked, and so on, before accessing external - memory (that can be handled by a `cacheable` decorator). - - For write functions, the "Write Through" policy is applied by default; - this policy ensures that the data is stored safely as it is written - throughout the hierarchy. However, it is possible to force the write - operation in a specific level (although it is not recommended) via - `level` option, where the value is a positive integer greater than 0. - - We can define a multi-level cache as follows: - - defmodule MyApp.Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - end - - Where the configuration for the cache and its levels must be in your - application environment, usually defined in your `config/config.exs`: - - config :my_app, MyApp.Multilevel, - model: :inclusive, - levels: [ - { - MyApp.Multilevel.L1, - gc_interval: :timer.hours(12), - backend: :shards - }, - { - MyApp.Multilevel.L2, - primary: [ - gc_interval: :timer.hours(12), - backend: :shards - ] - } - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.Multilevel, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:levels` - This option is to define the levels, a list of tuples - `{cache_level :: Nebulex.Cache.t(), opts :: keyword}`, where - the first element is the module that defines the cache for that - level, and the second one is the options that will be passed to - that level in the `start/link/1` (which depends on the adapter - this level is using). The order in which the levels are defined - is the same the multi-level cache will use. For example, the first - cache in the list will be the L1 cache (level 1) and so on; - the Nth element will be the LN cache. This option is mandatory, - if it is not set or empty, an exception will be raised. - - * `:model` - Specifies the cache model: `:inclusive` or `:exclusive`; - defaults to `:inclusive`. In an inclusive cache, the same data can be - present in all caches/levels. In an exclusive cache, data can be present - in only one cache/level and a key cannot be found in the rest of caches - at the same time. This option affects `get` operation only; if - `:cache_model` is `:inclusive`, when the key is found in a level N, - that entry is duplicated backwards (to all previous levels: 1..N-1). - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:level` - It may be an integer greater than 0 that specifies the cache - level where the operation will take place. By default, the evaluation - is performed throughout the whole cache hierarchy (all levels). - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the multi-level adapter is a layer/wrapper on top of other existing - adapters, each cache level may Telemetry emit events independently. - For example, for the cache defined before `MyApp.Multilevel`, the next - events will be emitted for the main multi-level cache: - - * `[:my_app, :multilevel, :command, :start]` - * `[:my_app, :multilevel, :command, :stop]` - * `[:my_app, :multilevel, :command, :exception]` - - For the L1 (configured with the local adapter): - - * `[:my_app, :multilevel, :l1, :command, :start]` - * `[:my_app, :multilevel, :l1, :command, :stop]` - * `[:my_app, :multilevel, :l1, :command, :exception]` - - For the L2 (configured with the partitioned adapter): - - * `[:my_app, :multilevel, :l2, :command, :start]` - * `[:my_app, :multilevel, :l2, :primary, :command, :start]` - * `[:my_app, :multilevel, :l2, :command, :stop]` - * `[:my_app, :multilevel, :l2, :primary, :command, :stop]` - * `[:my_app, :multilevel, :l2, :command, :exception]` - * `[:my_app, :multilevel, :l2, :primary, :command, :exception]` - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Stats - - Since the multi-level adapter works as a wrapper for the configured cache - levels, the support for stats depends on the underlying levels. Also, the - measurements are consolidated per level, they are not aggregated. For example, - if we enable the stats for the multi-level cache defined previously and run: - - MyApp.Multilevel.stats() - - The returned stats will look like: - - %Nebulex.Stats{ - measurements: %{ - l1: %{evictions: 0, expirations: 0, hits: 0, misses: 0, writes: 0}, - l2: %{evictions: 0, expirations: 0, hits: 0, misses: 0, writes: 0} - }, - metadata: %{ - l1: %{ - cache: NMyApp.Multilevel.L1, - started_at: ~U[2021-01-10 13:06:04.075084Z] - }, - l2: %{ - cache: MyApp.Multilevel.L2.Primary, - started_at: ~U[2021-01-10 13:06:04.089888Z] - }, - cache: MyApp.Multilevel, - started_at: ~U[2021-01-10 13:06:04.066750Z] - } - } - - **IMPORTANT:** Those cache levels with stats disabled won't be included - into the returned stats (they are skipped). If a cache level is using - an adapter that does not support stats, you may get unexpected errors. - Therefore, and as overall recommendation, check out the documentation - for adapters used by the underlying cache levels and ensure they - implement the `Nebulex.Adapter.Stats` behaviour. - - ### Stats with Telemetry - - In case you are using Telemetry metrics, you can define the metrics per - level, for example: - - last_value("nebulex.cache.stats.l1.hits", - event_name: "nebulex.cache.stats", - measurement: &get_in(&1, [:l1, :hits]), - tags: [:cache] - ) - last_value("nebulex.cache.stats.l1.misses", - event_name: "nebulex.cache.stats", - measurement: &get_in(&1, [:l1, :misses]), - tags: [:cache] - ) - - > See the section **"Instrumenting Multi-level caches"** in the - [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information. - - ## Extended API - - This adapter provides one additional convenience function for retrieving - the cache model for the given cache `name`: - - MyCache.model() - MyCache.model(:cache_name) - - ## Caveats of multi-level adapter - - Because this adapter reuses other existing/configured adapters, it inherits - all their limitations too. Therefore, it is highly recommended to check the - documentation of the adapters to use. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function to get the cache model. - """ - def model(name \\ __MODULE__) do - with_meta(name, & &1.model) - end - end - end - - @impl true - def init(opts) do - # Validate options - opts = __MODULE__.Options.validate!(opts) - - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = Keyword.fetch!(opts, :stats) - - # Get cache levels - levels = Keyword.fetch!(opts, :levels) - - # Get multilevel-cache model - model = Keyword.fetch!(opts, :model) - - # Build multi-level specs - {children, meta_list, _} = children(levels, telemetry_prefix, telemetry, stats) - - # Build adapter spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :one_for_one, - children: children - ) - - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - levels: meta_list, - model: model, - stats: stats, - started_at: DateTime.utc_now() - } - - {:ok, child_spec, adapter_meta} - end - - # sobelow_skip ["DOS.BinToAtom"] - defp children(levels, telemetry_prefix, telemetry, stats) do - levels - |> Enum.reverse() - |> Enum.reduce({[], [], length(levels)}, fn {l_cache, l_opts}, {child_acc, meta_acc, n} -> - l_opts = - Keyword.merge( - [ - telemetry_prefix: telemetry_prefix ++ [:"l#{n}"], - telemetry: telemetry, - stats: stats - ], - l_opts - ) - - meta = %{cache: l_cache, name: l_opts[:name]} - - {[{l_cache, l_opts} | child_acc], [meta | meta_acc], n - 1} - end) - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan fetch(adapter_meta, key, opts) do - default = wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.name - - fun = fn level, {default, prev} -> - case with_dynamic_cache(level, :fetch, [key, opts]) do - {:ok, _} = ok -> - {:halt, {ok, [level | prev]}} - - {:error, %Nebulex.KeyError{}} -> - {:cont, {default, [level | prev]}} - - {:error, _} = error -> - {:halt, {error, [level | prev]}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({default, []}, fun) - |> maybe_replicate(key, adapter_meta.model) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - fun = fn level, {{:ok, map_acc}, keys_acc} -> - case with_dynamic_cache(level, :get_all, [keys_acc, opts]) do - {:ok, map} -> - map_acc = Map.merge(map_acc, map) - - case keys_acc -- Map.keys(map) do - [] -> {:halt, {{:ok, map_acc}, []}} - keys_acc -> {:cont, {{:ok, map_acc}, keys_acc}} - end - - {:error, _} = error -> - {:halt, {error, keys_acc}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({{:ok, %{}}, keys}, fun) - |> elem(0) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case on_write do - :put -> - with :ok <- eval(adapter_meta, :put, [key, value, opts], opts) do - {:ok, true} - end - - :put_new -> - eval(adapter_meta, :put_new, [key, value, opts], opts) - - :replace -> - eval(adapter_meta, :replace, [key, value, opts], opts) - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - action = if on_write == :put_new, do: :put_new_all, else: :put_all - - reducer = fn level, {_, level_acc} -> - case with_dynamic_cache(level, action, [entries, opts]) do - :ok -> - {:cont, {{:ok, true}, [level | level_acc]}} - - {:ok, true} -> - {:cont, {{:ok, true}, [level | level_acc]}} - - {:ok, false} -> - _ = delete_from_levels(level_acc, entries) - {:halt, {{:ok, false}, level_acc}} - - {:error, _} = error -> - _ = delete_from_levels(level_acc, entries) - {:halt, {error, level_acc}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({true, []}, reducer) - |> elem(0) - end - - @impl true - defspan delete(adapter_meta, key, opts) do - eval(adapter_meta, :delete, [key, opts], Keyword.put(opts, :reverse, true)) - end - - @impl true - defspan take(adapter_meta, key, opts) do - default = wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.name - - opts - |> levels(adapter_meta.levels) - |> do_take(default, key, opts) - end - - defp do_take([], result, _key, _opts), do: result - - defp do_take([l_meta | rest], {:error, %Nebulex.KeyError{}}, key, opts) do - result = with_dynamic_cache(l_meta, :take, [key, opts]) - - do_take(rest, result, key, opts) - end - - defp do_take(levels, result, key, _opts) do - _ = eval(levels, :delete, [key, []], reverse: true) - - result - end - - @impl true - defspan has_key?(adapter_meta, key, opts) do - Enum.reduce_while(adapter_meta.levels, {:ok, false}, fn l_meta, acc -> - case with_dynamic_cache(l_meta, :has_key?, [key, opts]) do - {:ok, true} -> {:halt, {:ok, true}} - {:ok, false} -> {:cont, acc} - {:error, _} = error -> {:halt, error} - end - end) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - eval(adapter_meta, :incr, [key, amount, opts], opts) - end - - @impl true - defspan ttl(adapter_meta, key, opts) do - default = wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.name - - Enum.reduce_while(adapter_meta.levels, default, fn l_meta, acc -> - case with_dynamic_cache(l_meta, :ttl, [key, opts]) do - {:ok, _} = ok -> {:halt, ok} - {:error, %Nebulex.KeyError{}} -> {:cont, acc} - {:error, _} = error -> {:halt, error} - end - end) - end - - @impl true - defspan expire(adapter_meta, key, ttl, opts) do - eval_while(adapter_meta, :expire, [key, ttl, opts], {:ok, false}, &(&1 or &2)) - end - - @impl true - defspan touch(adapter_meta, key, opts) do - eval_while(adapter_meta, :touch, [key, opts], {:ok, false}, &(&1 or &2)) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta.levels, operation, query, opts) - end - - defp do_execute(levels, operation, query, opts) do - {levels, reducer, acc_in} = - case operation do - :all -> {levels, &(&1 ++ &2), []} - :delete_all -> {Enum.reverse(levels), &(&1 + &2), 0} - _ -> {levels, &(&1 + &2), 0} - end - - Enum.reduce_while(levels, {:ok, acc_in}, fn level, {:ok, acc} -> - case with_dynamic_cache(level, operation, [query, opts]) do - {:ok, result} -> {:cont, {:ok, reducer.(result, acc)}} - {:error, _} = error -> {:halt, error} - end - end) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - Stream.resource( - fn -> - adapter_meta.levels - end, - fn - [] -> - {:halt, []} - - [level | levels] -> - elements = - level - |> with_dynamic_cache(:stream!, [query, opts]) - |> Enum.to_list() - - {elements, levels} - end, - & &1 - ) - |> wrap_ok() - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - # Perhaps one of the levels is a distributed adapter, - # then ensure the lock on the right cluster nodes. - nodes = - adapter_meta.levels - |> Enum.reduce([node()], fn %{name: name, cache: cache}, acc -> - if cache.__adapter__ in [Nebulex.Adapters.Partitioned, Nebulex.Adapters.Replicated] do - Cluster.get_nodes(name || cache) ++ acc - else - acc - end - end) - |> Enum.uniq() - - super(adapter_meta, Keyword.put(opts, :nodes, nodes), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - if adapter_meta.stats do - init_acc = %Nebulex.Stats{ - metadata: %{ - cache: adapter_meta.name || adapter_meta.cache, - started_at: adapter_meta.started_at - } - } - - adapter_meta.levels - |> Enum.with_index(1) - |> Enum.reduce_while({:ok, init_acc}, &update_stats/2) - else - wrap_error Nebulex.Error, - reason: {:stats_error, adapter_meta[:name] || adapter_meta[:cache]} - end - end - - # We can safely disable this warning since the atom created dynamically is - # always re-used; the number of levels is limited and known before hand. - # sobelow_skip ["DOS.BinToAtom"] - defp update_stats({meta, idx}, {:ok, stats_acc}) do - case with_dynamic_cache(meta, :stats, []) do - {:ok, stats} -> - level_idx = :"l#{idx}" - measurements = Map.put(stats_acc.measurements, level_idx, stats.measurements) - metadata = Map.put(stats_acc.metadata, level_idx, stats.metadata) - - {:cont, {:ok, %{stats_acc | measurements: measurements, metadata: metadata}}} - - {:error, %Nebulex.Error{reason: {:stats_error, _}}} -> - {:cont, {:ok, stats_acc}} - - {:error, _} = error -> - {:halt, error} - end - end - - ## Helpers - - defp with_dynamic_cache(%{cache: cache, name: nil}, action, args) do - apply(cache, action, args) - end - - defp with_dynamic_cache(%{cache: cache, name: name}, action, args) do - cache.with_dynamic_cache(name, fn -> - apply(cache, action, args) - end) - end - - defp eval(%{levels: levels}, fun, args, opts) do - eval(levels, fun, args, opts) - end - - defp eval(levels, fun, args, opts) when is_list(levels) do - opts - |> levels(levels) - |> eval(fun, args) - end - - defp eval([level_meta | next], fun, args) do - Enum.reduce_while(next, with_dynamic_cache(level_meta, fun, args), fn - _l_meta, {:error, _} = error -> - {:halt, error} - - l_meta, ok -> - {:cont, ^ok = with_dynamic_cache(l_meta, fun, args)} - end) - end - - defp eval_while(%{levels: levels}, fun, args, init, reducer) do - Enum.reduce_while(levels, init, fn l_meta, {:ok, acc} -> - case with_dynamic_cache(l_meta, fun, args) do - {:ok, bool} -> {:cont, {:ok, reducer.(bool, acc)}} - {:error, _} = error -> {:halt, error} - end - end) - end - - defp levels(opts, levels) do - levels = - case Keyword.get(opts, :level) do - nil -> levels - level -> [Enum.at(levels, level - 1)] - end - - if Keyword.get(opts, :reverse) do - Enum.reverse(levels) - else - levels - end - end - - defp delete_from_levels(levels, entries) do - for level_meta <- levels, {key, _} <- entries do - with_dynamic_cache(level_meta, :delete, [key, []]) - end - end - - defp maybe_replicate({{:ok, value} = ok, [level_meta | [_ | _] = levels]}, key, :inclusive) do - with {:ok, ttl} <- with_dynamic_cache(level_meta, :ttl, [key]) do - :ok = - Enum.each(levels, fn l_meta -> - _ = with_dynamic_cache(l_meta, :put, [key, value, [ttl: ttl]]) - end) - - ok - end - end - - defp maybe_replicate({result, _levels}, _key, _model) do - result - end -end diff --git a/lib/nebulex/adapters/multilevel/options.ex b/lib/nebulex/adapters/multilevel/options.ex deleted file mode 100644 index 99aca7d0..00000000 --- a/lib/nebulex/adapters/multilevel/options.ex +++ /dev/null @@ -1,30 +0,0 @@ -defmodule Nebulex.Adapters.Multilevel.Options do - @moduledoc """ - Option definitions for the multilevel adapter. - """ - use Nebulex.Cache.Options - - definition = - [ - levels: [ - required: true, - type: :non_empty_keyword_list, - doc: """ - The list of the cache levels. - """ - ], - model: [ - required: false, - type: {:in, [:inclusive, :exclusive]}, - default: :inclusive, - doc: """ - Specifies the cache model: `:inclusive` or `:exclusive`. - """ - ] - ] ++ base_definition() - - @definition NimbleOptions.new!(definition) - - @doc false - def definition, do: @definition -end diff --git a/lib/nebulex/adapters/nil.ex b/lib/nebulex/adapters/nil.ex index a13cac27..234d3bc9 100644 --- a/lib/nebulex/adapters/nil.ex +++ b/lib/nebulex/adapters/nil.ex @@ -62,7 +62,7 @@ defmodule Nebulex.Adapters.Nil do # Provide Cache Implementation @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable @behaviour Nebulex.Adapter.Persistence @behaviour Nebulex.Adapter.Stats @@ -79,12 +79,12 @@ defmodule Nebulex.Adapters.Nil do @impl true def init(_opts) do - child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: {Agent, 1}) + child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: Agent) {:ok, child_spec, %{}} end - ## Nebulex.Adapter.Entry + ## Nebulex.Adapter.KV @impl true def fetch(adapter_meta, key, _) do diff --git a/lib/nebulex/adapters/partitioned.ex b/lib/nebulex/adapters/partitioned.ex deleted file mode 100644 index 37cdaddf..00000000 --- a/lib/nebulex/adapters/partitioned.ex +++ /dev/null @@ -1,787 +0,0 @@ -defmodule Nebulex.Adapters.Partitioned do - @moduledoc ~S""" - Built-in adapter for partitioned cache topology. - - ## Overall features - - * Partitioned cache topology (Sharding Distribution Model). - * Configurable primary storage adapter. - * Configurable Keyslot to distributed the keys across the cluster members. - * Support for transactions via Erlang global name registration facility. - * Stats support rely on the primary storage adapter. - - ## Partitioned Cache Topology - - There are several key points to consider about a partitioned cache: - - * _**Partitioned**_: The data in a distributed cache is spread out over - all the servers in such a way that no two servers are responsible for - the same piece of cached data. This means that the size of the cache - and the processing power associated with the management of the cache - can grow linearly with the size of the cluster. Also, it means that - operations against data in the cache can be accomplished with a - "single hop," in other words, involving at most one other server. - - * _**Load-Balanced**_: Since the data is spread out evenly over the - servers, the responsibility for managing the data is automatically - load-balanced across the cluster. - - * _**Ownership**_: Exactly one node in the cluster is responsible for each - piece of data in the cache. - - * _**Point-To-Point**_: The communication for the partitioned cache is all - point-to-point, enabling linear scalability. - - * _**Location Transparency**_: Although the data is spread out across - cluster nodes, the exact same API is used to access the data, and the - same behavior is provided by each of the API methods. This is called - location transparency, which means that the developer does not have to - code based on the topology of the cache, since the API and its behavior - will be the same with a local cache, a replicated cache, or a distributed - cache. - - * _**Failover**_: Failover of a distributed cache involves promoting backup - data to be primary storage. When a cluster node fails, all remaining - cluster nodes determine what data each holds in backup that the failed - cluster node had primary responsible for when it died. Those data becomes - the responsibility of whatever cluster node was the backup for the data. - However, this adapter does not provide fault-tolerance implementation, - each piece of data is kept in a single node/machine (via sharding), then, - if a node fails, the data kept by this node won't be available for the - rest of the cluster members. - - > Based on **"Distributed Caching Essential Lessons"** by **Cameron Purdy** - and [Coherence Partitioned Cache Service][oracle-pcs]. - - [oracle-pcs]: https://docs.oracle.com/cd/E13924_01/coh.340/e13819/partitionedcacheservice.htm - - ## Additional implementation notes - - `:pg2` or `:pg` (>= OTP 23) is used under-the-hood by the adapter to manage - the cluster nodes. When the partitioned cache is started in a node, it creates - a group and joins it (the cache supervisor PID is joined to the group). Then, - when a function is invoked, the adapter picks a node from the group members, - and then the function is executed on that specific node. In the same way, - when a partitioned cache supervisor dies (the cache is stopped or killed for - some reason), the PID of that process is automatically removed from the PG - group; this is why it's recommended to use consistent hashing for distributing - the keys across the cluster nodes. - - > **NOTE:** `pg2` will be replaced by `pg` in future, since the `pg2` module - is deprecated as of OTP 23 and scheduled for removal in OTP 24. - - This adapter depends on a local cache adapter (primary storage), it adds - a thin layer on top of it in order to distribute requests across a group - of nodes, where is supposed the local cache is running already. However, - you don't need to define any additional cache module for the primary - storage, instead, the adapter initializes it automatically (it adds the - primary storage as part of the supervision tree) based on the given - options within the `primary_storage_adapter:` argument. - - ## Usage - - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example: - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned - end - - Optionally, you can configure the desired primary storage adapter with the - option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`. - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local - end - - Also, you can provide a custom keyslot function: - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local - - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end - - Where the configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.PartitionedCache, - keyslot: MyApp.PartitionedCache, - primary: [ - gc_interval: 3_600_000, - backend: :shards - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.PartitionedCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:primary` - The options that will be passed to the adapter associated - with the local primary storage. These options will depend on the local - adapter to use. - - * `:keyslot` - Defines the module implementing `Nebulex.Adapter.Keyslot` - behaviour. - - * `:join_timeout` - Interval time in milliseconds for joining the - running partitioned cache to the cluster. This is to ensure it is - always joined. Defaults to `:timer.seconds(180)`. - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:timeout` - The time-out value in milliseconds for the command that - will be executed. If the timeout is exceeded, then the current process - will exit. For executing a command on remote nodes, this adapter uses - `Task.await/2` internally for receiving the result, so this option tells - how much time the adapter should wait for it. If the timeout is exceeded, - the task is shut down but the current process doesn't exit, only the - result associated with that task is skipped in the reduce phase. - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the partitioned adapter depends on the configured primary storage - adapter (local cache adapter), this one may also emit Telemetry events. - Therefore, there will be events emitted by the partitioned adapter as well - as the primary storage adapter. For example, for the cache defined before - `MyApp.PartitionedCache`, these would be the emitted events: - - * `[:my_app, :partitioned_cache, :command, :start]` - * `[:my_app, :partitioned_cache, :primary, :command, :start]` - * `[:my_app, :partitioned_cache, :command, :stop]` - * `[:my_app, :partitioned_cache, :primary, :command, :stop]` - * `[:my_app, :partitioned_cache, :command, :exception]` - * `[:my_app, :partitioned_cache, :primary, :command, :exception]` - - As you may notice, the telemetry prefix by default for the partitioned cache - is `[:my_app, :partitioned_cache]`, and the prefix for its primary storage - `[:my_app, :partitioned_cache, :primary]`. - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Adapter-specific telemetry events - - This adapter exposes following Telemetry events: - - * `telemetry_prefix ++ [:bootstrap, :started]` - Dispatched by the adapter - when the bootstrap process is started. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node] - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :stopped]` - Dispatched by the adapter - when the bootstrap process is stopped. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node], - reason: term - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :exit]` - Dispatched by the adapter - when the bootstrap has received an exit signal. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node], - reason: term - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :joined]` - Dispatched by the adapter - when the bootstrap has joined the cache to the cluster. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node] - } - ``` - - ## Stats - - This adapter depends on the primary storage adapter for the stats support. - Therefore, it is important to ensure the underlying primary storage adapter - does support stats, otherwise, you may get unexpected errors. - - ## Extended API - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Retrieving the primary storage or local cache module: - - MyCache.__primary__() - - Retrieving the cluster nodes associated with the given cache `name`: - - MyCache.nodes() - - Get a cluster node based on the given `key`: - - MyCache.get_node("mykey") - - Joining the cache to the cluster: - - MyCache.join_cluster() - - Leaving the cluster (removes the cache from the cluster): - - MyCache.leave_cluster() - - ## Caveats of partitioned adapter - - For `c:Nebulex.Cache.get_and_update/3` and `c:Nebulex.Cache.update/4`, - they both have a parameter that is the anonymous function, and it is compiled - into the module where it is created, which means it necessarily doesn't exists - on remote nodes. To ensure they work as expected, you must provide functions - from modules existing in all nodes of the group. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - # Inherit default keyslot implementation - use Nebulex.Adapter.Keyslot - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.RPC - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(env) do - otp_app = Module.get_attribute(env.module, :otp_app) - opts = Module.get_attribute(env.module, :opts) - primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local) - - quote do - defmodule Primary do - @moduledoc """ - This is the cache for the primary storage. - """ - use Nebulex.Cache, - otp_app: unquote(otp_app), - adapter: unquote(primary) - end - - @doc """ - A convenience function for getting the primary storage cache. - """ - def __primary__, do: Primary - - @doc """ - A convenience function for getting the cluster nodes. - """ - def nodes do - Cluster.get_nodes(get_dynamic_cache()) - end - - @doc """ - A convenience function to get the node of the given `key`. - """ - def get_node(key) do - with_meta(get_dynamic_cache(), fn %{name: name, keyslot: keyslot} -> - Cluster.get_node(name, key, keyslot) - end) - end - - @doc """ - A convenience function for joining the cache to the cluster. - """ - def join_cluster do - Cluster.join(get_dynamic_cache()) - end - - @doc """ - A convenience function for removing the cache from the cluster. - """ - def leave_cluster do - Cluster.leave(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Validate options - opts = __MODULE__.Options.validate!(opts) - - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = Keyword.fetch!(opts, :stats) - - # Primary cache options - primary_opts = - Keyword.merge( - [telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats], - Keyword.get(opts, :primary, []) - ) - - # Maybe put a name to primary storage - primary_opts = - if opts[:name], - do: [name: normalize_module_name([name, Primary])] ++ primary_opts, - else: primary_opts - - # Keyslot module for selecting nodes - keyslot = - opts - |> Keyword.get(:keyslot, __MODULE__) - |> assert_behaviour(Nebulex.Adapter.Keyslot, "keyslot") - - # Prepare metadata - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - primary_name: primary_opts[:name], - keyslot: keyslot, - stats: stats - } - - # Prepare child_spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :rest_for_one, - children: [ - {cache.__primary__, primary_opts}, - {__MODULE__.Bootstrap, {Map.put(adapter_meta, :cache, cache), opts}} - ] - ) - - {:ok, child_spec, adapter_meta} - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan fetch(adapter_meta, key, opts) do - adapter_meta - |> call(key, :fetch, [key, opts], opts) - |> handle_key_error(adapter_meta.name) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - case map_reduce(keys, adapter_meta, :get_all, [opts], Keyword.get(opts, :timeout)) do - {res, []} -> - {:ok, Enum.reduce(res, %{}, &Map.merge(&2, &1))} - - {_ok, errors} -> - wrap_error Nebulex.Error, reason: {:rpc_multicall_error, errors}, module: RPC - end - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case on_write do - :put -> - with :ok <- call(adapter_meta, key, :put, [key, value, opts], opts) do - {:ok, true} - end - - :put_new -> - call(adapter_meta, key, :put_new, [key, value, opts], opts) - - :replace -> - call(adapter_meta, key, :replace, [key, value, opts], opts) - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - case on_write do - :put -> - do_put_all(:put_all, adapter_meta, entries, opts) - - :put_new -> - do_put_all(:put_new_all, adapter_meta, entries, opts) - end - end - - def do_put_all(action, adapter_meta, entries, opts) do - case {action, map_reduce(entries, adapter_meta, action, [opts], Keyword.get(opts, :timeout))} do - {:put_all, {_res, []}} -> - {:ok, true} - - {:put_new_all, {res, []}} -> - {:ok, Enum.reduce(res, true, &(&1 and &2))} - - {_, {_ok, errors}} -> - wrap_error Nebulex.Error, reason: {:rpc_multicall_error, errors}, module: RPC - end - end - - @impl true - defspan delete(adapter_meta, key, opts) do - call(adapter_meta, key, :delete, [key, opts], opts) - end - - @impl true - defspan take(adapter_meta, key, opts) do - adapter_meta - |> call(key, :take, [key, opts], opts) - |> handle_key_error(adapter_meta.name) - end - - @impl true - defspan has_key?(adapter_meta, key, opts) do - call(adapter_meta, key, :has_key?, [key, opts]) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - call(adapter_meta, key, :incr, [key, amount, opts], opts) - end - - @impl true - defspan ttl(adapter_meta, key, opts) do - adapter_meta - |> call(key, :ttl, [key, opts]) - |> handle_key_error(adapter_meta.name) - end - - @impl true - defspan expire(adapter_meta, key, ttl, opts) do - call(adapter_meta, key, :expire, [key, ttl, opts]) - end - - @impl true - defspan touch(adapter_meta, key, opts) do - call(adapter_meta, key, :touch, [key, opts]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - reducer = - case operation do - :all -> &List.flatten/1 - _ -> &Enum.sum/1 - end - - adapter_meta.name - |> Cluster.get_nodes() - |> RPC.multicall( - __MODULE__, - :with_dynamic_cache, - [adapter_meta, operation, [query, opts]], - opts - ) - |> handle_rpc_multicall(reducer) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - timeout = opts[:timeout] || 5000 - - Stream.resource( - fn -> - Cluster.get_nodes(adapter_meta.name) - end, - fn - [] -> - {:halt, []} - - [node | nodes] -> - elements = - unwrap_or_raise RPC.call( - node, - __MODULE__, - :eval_stream, - [adapter_meta, query, opts], - timeout - ) - - {elements, nodes} - end, - & &1 - ) - |> wrap_ok() - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - nodes = Keyword.put_new_lazy(opts, :nodes, fn -> Cluster.get_nodes(adapter_meta.name) end) - - super(adapter_meta, nodes, fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with_dynamic_cache(adapter_meta, :stats, []) - end - - ## Helpers - - @doc """ - Helper function to use dynamic cache for internal primary cache storage - when needed. - """ - def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do - apply(cache.__primary__, action, args) - end - - def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do - cache.__primary__.with_dynamic_cache(primary_name, fn -> - apply(cache.__primary__, action, args) - end) - end - - @doc """ - Helper to perform `stream/3` locally. - """ - def eval_stream(meta, query, opts) do - with {:ok, stream} <- with_dynamic_cache(meta, :stream, [query, opts]) do - {:ok, Enum.to_list(stream)} - end - end - - ## Private Functions - - defp handle_key_error({:error, %Nebulex.KeyError{} = e}, name) do - {:error, %{e | cache: name}} - end - - defp handle_key_error(other, _name) do - other - end - - defp get_node(%{name: name, keyslot: keyslot}, key) do - Cluster.get_node(name, key, keyslot) - end - - defp call(adapter_meta, key, action, args, opts \\ []) do - adapter_meta - |> get_node(key) - |> rpc_call(adapter_meta, action, args, opts) - end - - defp rpc_call(node, meta, fun, args, opts) do - RPC.call(node, __MODULE__, :with_dynamic_cache, [meta, fun, args], opts[:timeout] || 5000) - end - - defp group_keys_by_node(enum, adapter_meta) do - Enum.reduce(enum, %{}, fn - {key, _} = entry, acc -> - node = get_node(adapter_meta, key) - - Map.put(acc, node, [entry | Map.get(acc, node, [])]) - - key, acc -> - node = get_node(adapter_meta, key) - - Map.put(acc, node, [key | Map.get(acc, node, [])]) - end) - end - - defp map_reduce(enum, meta, action, args, timeout) do - enum - |> group_keys_by_node(meta) - |> Enum.map(fn {node, group} -> - {node, {__MODULE__, :with_dynamic_cache, [meta, action, [group | args]]}} - end) - |> RPC.multicall(timeout: timeout) - end - - defp handle_rpc_multicall({res, []}, fun) do - {:ok, fun.(res)} - end - - defp handle_rpc_multicall({_ok, errors}, _) do - wrap_error Nebulex.Error, reason: {:rpc_multicall_error, errors}, module: RPC - end -end - -defmodule Nebulex.Adapters.Partitioned.Bootstrap do - @moduledoc false - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.Telemetry - - # State - defstruct [:adapter_meta, :join_timeout] - - ## API - - @doc false - def start_link({%{name: name}, _} = state) do - GenServer.start_link( - __MODULE__, - state, - name: normalize_module_name([name, Bootstrap]) - ) - end - - ## GenServer Callbacks - - @impl true - def init({adapter_meta, opts}) do - # Trap exit signals to run cleanup job - _ = Process.flag(:trap_exit, true) - - # Bootstrap started - :ok = dispatch_telemetry_event(:started, adapter_meta) - - # Ensure joining the cluster when the cache supervision tree is started - :ok = Cluster.join(adapter_meta.name) - - # Bootstrap joined the cache to the cluster - :ok = dispatch_telemetry_event(:joined, adapter_meta) - - # Build initial state - state = build_state(adapter_meta, opts) - - # Start bootstrap process - {:ok, state, state.join_timeout} - end - - @impl true - def handle_info(message, state) - - def handle_info(:timeout, %__MODULE__{adapter_meta: adapter_meta} = state) do - # Ensure it is always joined to the cluster - :ok = Cluster.join(adapter_meta.name) - - # Bootstrap joined the cache to the cluster - :ok = dispatch_telemetry_event(:joined, adapter_meta) - - {:noreply, state, state.join_timeout} - end - - def handle_info({:EXIT, _from, reason}, %__MODULE__{adapter_meta: adapter_meta} = state) do - # Bootstrap received exit signal - :ok = dispatch_telemetry_event(:exit, adapter_meta, %{reason: reason}) - - {:stop, reason, state} - end - - @impl true - def terminate(reason, %__MODULE__{adapter_meta: adapter_meta}) do - # Ensure leaving the cluster when the cache stops - :ok = Cluster.leave(adapter_meta.name) - - # Bootstrap stopped or terminated - :ok = dispatch_telemetry_event(:stopped, adapter_meta, %{reason: reason}) - end - - ## Private Functions - - defp build_state(adapter_meta, opts) do - # Join timeout to ensure it is always joined to the cluster - join_timeout = Keyword.fetch!(opts, :join_timeout) - - %__MODULE__{adapter_meta: adapter_meta, join_timeout: join_timeout} - end - - defp dispatch_telemetry_event(event, adapter_meta, meta \\ %{}) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:bootstrap, event], - %{system_time: System.system_time()}, - Map.merge(meta, %{ - adapter_meta: adapter_meta, - cluster_nodes: Cluster.get_nodes(adapter_meta.name) - }) - ) - end -end diff --git a/lib/nebulex/adapters/partitioned/options.ex b/lib/nebulex/adapters/partitioned/options.ex deleted file mode 100644 index eb81c59c..00000000 --- a/lib/nebulex/adapters/partitioned/options.ex +++ /dev/null @@ -1,39 +0,0 @@ -defmodule Nebulex.Adapters.Partitioned.Options do - @moduledoc """ - Option definitions for the partitioned adapter. - """ - use Nebulex.Cache.Options - - definition = - [ - primary: [ - required: false, - type: :keyword_list, - doc: """ - The options that will be passed to the adapter associated with the - local primary storage. - """ - ], - keyslot: [ - required: false, - type: :atom, - doc: """ - Defines the module implementing `Nebulex.Adapter.Keyslot` behaviour. - """ - ], - join_timeout: [ - required: false, - type: :pos_integer, - default: :timer.seconds(180), - doc: """ - Interval time in milliseconds for joining the running partitioned cache - to the cluster. This is to ensure it is always joined. - """ - ] - ] ++ base_definition() - - @definition NimbleOptions.new!(definition) - - @doc false - def definition, do: @definition -end diff --git a/lib/nebulex/adapters/replicated.ex b/lib/nebulex/adapters/replicated.ex deleted file mode 100644 index 55c93b9e..00000000 --- a/lib/nebulex/adapters/replicated.ex +++ /dev/null @@ -1,851 +0,0 @@ -defmodule Nebulex.Adapters.Replicated do - @moduledoc ~S""" - Built-in adapter for replicated cache topology. - - ## Overall features - - * Replicated cache topology. - * Configurable primary storage adapter. - * Cache-level locking when deleting all entries or adding new nodes. - * Key-level (or entry-level) locking for key-based write-like operations. - * Support for transactions via Erlang global name registration facility. - * Stats support rely on the primary storage adapter. - - ## Replicated Cache Topology - - A replicated cache is a clustered, fault tolerant cache where data is fully - replicated to every member in the cluster. This cache offers the fastest read - performance with linear performance scalability for reads but poor scalability - for writes (as writes must be processed by every member in the cluster). - Because data is replicated to all servers, adding servers does not increase - aggregate cache capacity. - - There are several challenges to building a reliably replicated cache. The - first is how to get it to scale and perform well. Updates to the cache have - to be sent to all cluster nodes, and all cluster nodes have to end up with - the same data, even if multiple updates to the same piece of data occur at - the same time. Also, if a cluster node requests a lock, ideally it should - not have to get all cluster nodes to agree on the lock or at least do it in - a very efficient way (`:global` is used here), otherwise it will scale - extremely poorly; yet in the case of a cluster node failure, all of the data - and lock information must be kept safely. - - The best part of a replicated cache is its access speed. Since the data is - replicated to each cluster node, it is available for use without any waiting. - This is referred to as "zero latency access," and is perfect for situations - in which an application requires the highest possible speed in its data - access. - - However, there are some limitations: - - * _**Cost Per Update**_ - Updating a replicated cache requires pushing - the new version of the data to all other cluster members, which will - limit scalability if there is a high frequency of updates per member. - - * _**Cost Per Entry**_ - The data is replicated to every cluster member, - so Memory Heap space is used on each member, which will impact - performance for large caches. - - > Based on **"Distributed Caching Essential Lessons"** by **Cameron Purdy**. - - ## Usage - - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example: - - defmodule MyApp.ReplicatedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Replicated - end - - Optionally, you can configure the desired primary storage adapter with the - option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`. - - defmodule MyApp.ReplicatedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Nebulex.Adapters.Local - end - - The configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.ReplicatedCache, - primary: [ - gc_interval: 3_600_000, - backend: :shards - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.ReplicatedCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:primary` - The options that will be passed to the adapter associated - with the local primary storage. These options will depend on the local - adapter to use. - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:timeout` - The time-out value in milliseconds for the command that - will be executed. If the timeout is exceeded, then the current process - will exit. For executing a command on remote nodes, this adapter uses - `Task.await/2` internally for receiving the result, so this option tells - how much time the adapter should wait for it. If the timeout is exceeded, - the task is shut down but the current process doesn't exit, only the - result associated with that task is skipped in the reduce phase. - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the replicated adapter depends on the configured primary storage - adapter (local cache adapter), this one may also emit Telemetry events. - Therefore, there will be events emitted by the replicated adapter as well - as the primary storage adapter. For example, for the cache defined before - `MyApp.ReplicatedCache`, these would be the emitted events: - - * `[:my_app, :replicated_cache, :command, :start]` - * `[:my_app, :replicated_cache, :primary, :command, :start]` - * `[:my_app, :replicated_cache, :command, :stop]` - * `[:my_app, :replicated_cache, :primary, :command, :stop]` - * `[:my_app, :replicated_cache, :command, :exception]` - * `[:my_app, :replicated_cache, :primary, :command, :exception]` - - As you may notice, the telemetry prefix by default for the replicated cache - is `[:my_app, :replicated_cache]`, and the prefix for its primary storage - `[:my_app, :replicated_cache, :primary]`. - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Stats - - This adapter depends on the primary storage adapter for the stats support. - Therefore, it is important to ensure the underlying primary storage adapter - does support stats, otherwise, you may get unexpected errors. - - ## Extended API - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Retrieving the primary storage or local cache module: - - MyCache.__primary__() - - Retrieving the cluster nodes associated with the given cache name: - - MyCache.nodes() - - Joining the cache to the cluster: - - MyCache.join_cluster() - - Leaving the cluster (removes the cache from the cluster): - - MyCache.leave_cluster() - - ## Adapter-specific telemetry events - - This adapter exposes following Telemetry events: - - * `telemetry_prefix ++ [:replication]` - Dispatched by the adapter - when a replication error occurs due to a write-like operation - under-the-hood. - - * Measurements: `%{rpc_errors: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - rpc_errors: [{node, error :: term}] - } - ``` - - * `telemetry_prefix ++ [:bootstrap]` - Dispatched by the adapter at start - time when there are errors while syncing up with the cluster nodes. - - * Measurements: - - ``` - %{ - failed_nodes: non_neg_integer, - remote_errors: non_neg_integer - } - ``` - - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - failed_nodes: [node], - remote_errors: [term] - } - ``` - - ## Caveats of replicated adapter - - As it is explained in the beginning, a replicated topology not only brings - with advantages (mostly for reads) but also with some limitations and - challenges. - - This adapter uses global locks (via `:global`) for all operation that modify - or alter the cache somehow to ensure as much consistency as possible across - all members of the cluster. These locks may be per key or for the entire cache - depending on the operation taking place. For that reason, it is very important - to be aware about those operation that can potentially lead to performance and - scalability issues, so that you can do a better usage of the replicated - adapter. The following is with the operations and aspects you should pay - attention to: - - * Starting and joining a new replicated node to the cluster is the most - expensive action, because all write-like operations across all members of - the cluster are blocked until the new node completes the synchronization - process, which involves copying cached data from any of the existing - cluster nodes into the new node, and this could be very expensive - depending on the number of caches entries. For that reason, adding new - nodes is considered an expensive operation that should happen only from - time to time. - - * Deleting all entries. When `c:Nebulex.Cache.delete_all/2` action is - executed, like in the previous case, all write-like operations in all - members of the cluster are blocked until the deletion action is completed - (this implies deleting all cached data from all cluster nodes). Therefore, - deleting all entries from cache is also considered an expensive operation - that should happen only from time to time. - - * Write-like operations based on a key only block operations related to - that key across all members of the cluster. This is not as critical as - the previous two cases but it is something to keep in mind anyway because - if there is a highly demanded key in terms of writes, that could be also - a potential bottleneck. - - Summing up, the replicated cache topology along with this adapter should - be used mainly when the the reads clearly dominate over the writes (e.g.: - Reads 80% and Writes 20% or less). Besides, operations like deleting all - entries from cache or adding new nodes must be executed only once in a while - to avoid performance issues, since they are very expensive. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - import Bitwise, only: [<<<: 2] - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.{RPC, Telemetry} - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(env) do - otp_app = Module.get_attribute(env.module, :otp_app) - opts = Module.get_attribute(env.module, :opts) - primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local) - - quote do - defmodule Primary do - @moduledoc """ - This is the cache for the primary storage. - """ - use Nebulex.Cache, - otp_app: unquote(otp_app), - adapter: unquote(primary) - end - - @doc """ - A convenience function for getting the primary storage cache. - """ - def __primary__, do: Primary - - @doc """ - A convenience function for getting the cluster nodes. - """ - def nodes do - Cluster.get_nodes(get_dynamic_cache()) - end - - @doc """ - A convenience function for joining the cache to the cluster. - """ - def join_cluster do - Cluster.join(get_dynamic_cache()) - end - - @doc """ - A convenience function for removing the cache from the cluster. - """ - def leave_cluster do - Cluster.leave(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Validate options - opts = __MODULE__.Options.validate!(opts) - - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = Keyword.fetch!(opts, :stats) - - # Primary cache options - primary_opts = - Keyword.merge( - [telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats], - Keyword.get(opts, :primary, []) - ) - - # Maybe put a name to primary storage - primary_opts = - if opts[:name], - do: [name: normalize_module_name([name, Primary])] ++ primary_opts, - else: primary_opts - - # Prepare metadata - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - primary_name: primary_opts[:name], - stats: stats - } - - # Prepare child_spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :rest_for_one, - children: [ - {cache.__primary__, primary_opts}, - {__MODULE__.Bootstrap, Map.put(adapter_meta, :cache, cache)} - ] - ) - - {:ok, child_spec, adapter_meta} - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan fetch(adapter_meta, key, opts) do - adapter_meta - |> with_dynamic_cache(:fetch, [key, opts]) - |> handle_key_error(adapter_meta.name) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - with_dynamic_cache(adapter_meta, :get_all, [keys, opts]) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case with_transaction(adapter_meta, on_write, [key], [key, value, opts], opts) do - :ok -> {:ok, true} - other -> other - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - action = if on_write == :put_new, do: :put_new_all, else: :put_all - keys = for {k, _} <- entries, do: k - - case with_transaction(adapter_meta, action, keys, [entries, opts], opts) do - :ok -> {:ok, true} - other -> other - end - end - - @impl true - defspan delete(adapter_meta, key, opts) do - with_transaction(adapter_meta, :delete, [key], [key, opts], opts) - end - - @impl true - defspan take(adapter_meta, key, opts) do - adapter_meta - |> with_transaction(:take, [key], [key, opts], opts) - |> handle_key_error(adapter_meta.name) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - with_transaction(adapter_meta, :incr, [key], [key, amount, opts], opts) - end - - @impl true - defspan has_key?(adapter_meta, key, opts) do - with_dynamic_cache(adapter_meta, :has_key?, [key, opts]) - end - - @impl true - defspan ttl(adapter_meta, key, opts) do - adapter_meta - |> with_dynamic_cache(:ttl, [key, opts]) - |> handle_key_error(adapter_meta.name) - end - - @impl true - defspan expire(adapter_meta, key, ttl, opts) do - with_transaction(adapter_meta, :expire, [key], [key, ttl, opts]) - end - - @impl true - defspan touch(adapter_meta, key, opts) do - with_transaction(adapter_meta, :touch, [key], [key, opts]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{name: name} = adapter_meta, :delete_all, query, opts) do - nodes = Cluster.get_nodes(name) - - # It is blocked until ongoing write operations finish (if there is any). - # Similarly, while it is executed, all later write-like operations are - # blocked until it finishes. - :global.trans( - {name, self()}, - fn -> - multicall(adapter_meta, nodes, :delete_all, [query, opts], opts) - end, - nodes - ) - end - - defp do_execute(adapter_meta, operation, query, opts) do - with_dynamic_cache(adapter_meta, operation, [query, opts]) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - with_dynamic_cache(adapter_meta, :stream, [query, opts]) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - nodes = Keyword.put_new_lazy(opts, :nodes, fn -> Cluster.get_nodes(adapter_meta.name) end) - - super(adapter_meta, nodes, fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with_dynamic_cache(adapter_meta, :stats, []) - end - - ## Helpers - - @doc """ - Helper function to use dynamic cache for internal primary cache storage - when needed. - """ - def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do - apply(cache.__primary__, action, args) - end - - def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do - cache.__primary__.with_dynamic_cache(primary_name, fn -> - apply(cache.__primary__, action, args) - end) - end - - ## Private Functions - - defp handle_key_error({:error, %Nebulex.KeyError{} = e}, name) do - {:error, %{e | cache: name}} - end - - defp handle_key_error(other, _name) do - other - end - - defp with_transaction(adapter_meta, action, keys, args, opts \\ []) do - do_with_transaction(adapter_meta, action, keys, args, opts, 1) - end - - defp do_with_transaction(%{name: name} = adapter_meta, action, keys, args, opts, times) do - # This is a bit hacky because the `:global_locks` table managed by - # `:global` is being accessed directly breaking the encapsulation. - # So far, this has been the simplest and fastest way to validate if - # the global sync lock `:"$sync_lock"` is set, so we block write-like - # operations until it finishes. The other option would be trying to - # lock the same key `:"$sync_lock"`, and then when the lock is acquired, - # delete it before processing the write operation. But this means another - # global lock across the cluster everytime there is a write. So for the - # time being, we just read the global table to validate it which is much - # faster; since it is a local read with the global ETS, there is no global - # locks across the cluster. - case :ets.lookup(:global_locks, :"$sync_lock") do - [_] -> - :ok = random_sleep(times) - - do_with_transaction(adapter_meta, action, keys, args, opts, times + 1) - - [] -> - nodes = Cluster.get_nodes(name) - - # Write-like operation must be wrapped within a transaction - # to ensure proper replication - with {:ok, res} <- - transaction(adapter_meta, [keys: keys, nodes: nodes], fn -> - multicall(adapter_meta, nodes, action, args, opts) - end) do - res - end - end - end - - defp multicall(meta, nodes, action, args, opts) do - # Run the command locally first and run replication only if success - case with_dynamic_cache(meta, action, args) do - {:error, _} = error -> - error - - local -> - # Run the command on the remote nodes - {ok_nodes, error_nodes} = - RPC.multicall( - nodes -- [node()], - __MODULE__, - :with_dynamic_cache, - [meta, action, args], - opts - ) - - # Process the responses adding the local one as main result - handle_rpc_multicall({[local | ok_nodes], error_nodes}, meta, action) - end - end - - defp handle_rpc_multicall({res, []}, _meta, _action) do - hd(res) - end - - defp handle_rpc_multicall({res, {[], ignored_errors}}, meta, action) do - _ = dispatch_replication_error(meta, action, ignored_errors) - - hd(res) - end - - defp handle_rpc_multicall({_responses, {filtered_errors, ignored_errors}}, meta, action) do - _ = dispatch_replication_error(meta, action, ignored_errors) - - wrap_error Nebulex.Error, reason: {:rpc_multicall_error, filtered_errors}, module: RPC - end - - defp handle_rpc_multicall({responses, errors}, meta, action) do - handle_rpc_multicall({responses, filter_errors(errors)}, meta, action) - end - - defp filter_errors(errors) do - Enum.reduce(errors, {[], []}, fn - {_node, {:error, %Nebulex.KeyError{}}} = error, {acc1, acc2} -> - # The key was not found on remote node, ignore the error - {acc1, [error | acc2]} - - {_node, {:error, %Nebulex.Error{reason: {:registry_lookup_error, _}}}} = error, - {acc1, acc2} -> - # The cache was not found in the remote node, maybe it was stopped and - # :pg ("Process Groups") is not updated yet, then ignore the error - {acc1, [error | acc2]} - - {_node, {:error, {:erpc, :noconnection}}} = error, {acc1, acc2} -> - # Remote node is down, maybe :pg ("Process Groups") is not updated yet - {acc1, [error | acc2]} - - error, {acc1, acc2} -> - {[error | acc1], acc2} - end) - end - - defp dispatch_replication_error(adapter_meta, action, rpc_errors) do - if adapter_meta.telemetry or Map.get(adapter_meta, :in_span?, false) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:replication], - %{rpc_errors: length(rpc_errors)}, - %{adapter_meta: adapter_meta, function_name: action, rpc_errors: rpc_errors} - ) - end - end - - # coveralls-ignore-start - - defp random_sleep(times) do - _ = - if rem(times, 10) == 0 do - _ = :rand.seed(:exsplus) - end - - # First time 1/4 seconds, then doubling each time up to 8 seconds max - tmax = - if times > 5 do - 8000 - else - div((1 <<< times) * 1000, 8) - end - - tmax - |> :rand.uniform() - |> Process.sleep() - end - - # coveralls-ignore-stop -end - -defmodule Nebulex.Adapters.Replicated.Bootstrap do - @moduledoc false - use GenServer - - import Nebulex.Helpers - - alias Nebulex.{Adapter, Entry, Telemetry} - alias Nebulex.Adapters.Replicated - alias Nebulex.Cache.Cluster - - # Max retries in intervals of 1 ms (5 seconds). - # If in 5 seconds the cache has not started, stop the server. - @max_retries 5000 - - ## API - - @doc false - def start_link(%{name: name} = adapter_meta) do - GenServer.start_link( - __MODULE__, - adapter_meta, - name: normalize_module_name([name, Bootstrap]) - ) - end - - ## GenServer Callbacks - - @impl true - def init(adapter_meta) do - # Trap exit signals to run cleanup job - _ = Process.flag(:trap_exit, true) - - # Ensure joining the cluster only when the cache supervision tree is started - :ok = Cluster.join(adapter_meta.name) - - # Set a global lock to stop any write operation - # until the synchronization process finishes - :ok = lock(adapter_meta.name) - - # Init retries - state = Map.put(adapter_meta, :retries, 0) - - # Start bootstrap process - {:ok, state, 1} - end - - @impl true - def handle_info(:timeout, %{pid: pid} = state) when is_pid(pid) do - # Start synchronization process - :ok = sync_data(state) - - # Delete global lock set when the server started - :ok = unlock(state.name) - - # Bootstrap process finished - {:noreply, state} - end - - def handle_info(:timeout, %{name: name, retries: retries} = state) - when retries < @max_retries do - with {:error, _} <- - Adapter.with_meta(name, fn adapter_meta -> - handle_info(:timeout, adapter_meta) - end) do - {:noreply, %{state | retries: retries + 1}, 1} - end - end - - def handle_info(:timeout, state) do - # coveralls-ignore-start - {:stop, :normal, state} - # coveralls-ignore-stop - end - - @impl true - def terminate(_reason, %{name: name}) do - # Delete global lock set when the server started - :ok = unlock(name) - - # Ensure leaving the cluster when the cache stops - :ok = Cluster.leave(name) - end - - ## Helpers - - defp lock(name) do - true = :global.set_lock({:"$sync_lock", self()}, Cluster.get_nodes(name)) - - :ok - end - - defp unlock(name) do - true = :global.del_lock({:"$sync_lock", self()}, Cluster.get_nodes(name)) - - :ok - end - - # FIXME: this is because coveralls does not mark this as covered - # coveralls-ignore-start - - defp sync_data(%{name: name} = adapter_meta) do - cluster_nodes = Cluster.get_nodes(name) - - case cluster_nodes -- [node()] do - [] -> - :ok - - nodes -> - # Sync process: - # 1. Push a new generation on all cluster nodes to make the newer one - # empty. - # 2. Copy cached data from one of the cluster nodes; entries will be - # stremed from the older generation since the newer one should be - # empty. - # 3. Push a new generation on the current/new node to make it a mirror - # of the other cluster nodes. - # 4. Reset GC timer for ell cluster nodes (making the generation timer - # gap among cluster nodes as small as possible). - with :ok <- maybe_run_on_nodes(adapter_meta, nodes, :new_generation), - :ok <- copy_entries_from_nodes(adapter_meta, nodes), - :ok <- maybe_run_on_nodes(adapter_meta, [node()], :new_generation) do - maybe_run_on_nodes(adapter_meta, nodes, :reset_generation_timer) - end - end - end - - defp maybe_run_on_nodes(%{cache: cache} = adapter_meta, nodes, fun) do - if cache.__primary__.__adapter__() == Nebulex.Adapters.Local do - nodes - |> :rpc.multicall(Replicated, :with_dynamic_cache, [adapter_meta, fun, []]) - |> handle_multicall(adapter_meta) - else - :ok - end - end - - defp handle_multicall({responses, failed_nodes}, adapter_meta) do - {_ok, errors} = Enum.split_with(responses, &(&1 == :ok)) - - dispatch_bootstrap_error( - adapter_meta, - %{failed_nodes: length(failed_nodes), remote_errors: length(errors)}, - %{failed_nodes: failed_nodes, remote_errors: errors} - ) - end - - defp copy_entries_from_nodes(adapter_meta, nodes) do - nodes - |> Enum.reduce_while([], &stream_entries(adapter_meta, &1, &2)) - |> Enum.each( - &Replicated.with_dynamic_cache( - adapter_meta, - :put, - [&1.key, &1.value, [ttl: Entry.ttl(&1)]] - ) - ) - end - - defp stream_entries(meta, node, acc) do - stream_fun = fn -> - with {:ok, stream} <- Replicated.stream(meta, nil, return: :entry, page_size: 100) do - stream - |> Stream.filter(&(not Entry.expired?(&1))) - |> Stream.map(& &1) - |> Enum.to_list() - |> wrap_ok() - end - end - - case :rpc.call(node, Kernel, :apply, [stream_fun, []]) do - {:ok, entries} -> {:halt, entries} - _error -> {:cont, acc} - end - end - - defp dispatch_bootstrap_error(adapter_meta, measurements, metadata) do - if adapter_meta.telemetry or Map.get(adapter_meta, :in_span?, false) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:bootstrap], - measurements, - Map.put(metadata, :adapter_meta, adapter_meta) - ) - end - end - - # coveralls-ignore-stop -end diff --git a/lib/nebulex/adapters/replicated/options.ex b/lib/nebulex/adapters/replicated/options.ex deleted file mode 100644 index ae00a74c..00000000 --- a/lib/nebulex/adapters/replicated/options.ex +++ /dev/null @@ -1,23 +0,0 @@ -defmodule Nebulex.Adapters.Replicated.Options do - @moduledoc """ - Option definitions for the replicated adapter. - """ - use Nebulex.Cache.Options - - definition = - [ - primary: [ - required: false, - type: :keyword_list, - doc: """ - The options that will be passed to the adapter associated with the - local primary storage. - """ - ] - ] ++ base_definition() - - @definition NimbleOptions.new!(definition) - - @doc false - def definition, do: @definition -end diff --git a/lib/nebulex/adapters/supervisor.ex b/lib/nebulex/adapters/supervisor.ex deleted file mode 100644 index 65edc0e4..00000000 --- a/lib/nebulex/adapters/supervisor.ex +++ /dev/null @@ -1,19 +0,0 @@ -defmodule Nebulex.Adapters.Supervisor do - # Utility module for building a supervisor to wrap up the adapter's children. - @moduledoc false - - @doc """ - Builds a supervisor spec with the given `options` for wrapping up the - adapter's children. - """ - @spec child_spec(keyword) :: Supervisor.child_spec() - def child_spec(options) do - {children, options} = Keyword.pop(options, :children, []) - - %{ - id: Keyword.fetch!(options, :name), - start: {Supervisor, :start_link, [children, options]}, - type: :supervisor - } - end -end diff --git a/lib/nebulex/cache.ex b/lib/nebulex/cache.ex index 45efe6dd..d7384893 100644 --- a/lib/nebulex/cache.ex +++ b/lib/nebulex/cache.ex @@ -58,10 +58,10 @@ defmodule Nebulex.Cache do Almost all of the cache functions outlined in this module accept the following options: - * `:dynamic_cache` - The name of the cache supervisor process. The name is - either an atom or a PID. There might be cases where we want to have - different cache instances but access them through the same cache module. - This option tells the executed cache command what cache instance to use + * `:dynamic_cache` - The name of the cache supervisor process. It can be + an atom or a PID. There might be cases where we want to have different + cache instances but access them through the same cache module. This + option tells the executed cache command what cache instance to use dynamically in runtime. ## Telemetry events @@ -350,13 +350,15 @@ defmodule Nebulex.Cache do quote do ## Helpers + import Nebulex.Helpers, only: [kw_pop_first_lazy: 3] + @doc """ Helper macro to resolve the dynamic cache. """ defmacro dynamic_cache(opts, do: block) do quote do {dynamic_cache, opts} = - __MODULE__.pop_first_lazy( + kw_pop_first_lazy( unquote(opts), :dynamic_cache, fn -> get_dynamic_cache() end @@ -366,16 +368,6 @@ defmodule Nebulex.Cache do end end - @doc """ - Custom convenience `pop_first_lazy/3` function. - """ - def pop_first_lazy(keyword, key, fun) do - case :lists.keytake(key, 1, keyword) do - {:value, {^key, value}, rest} -> {value, rest} - false -> {fun.(), keyword} - end - end - ## Config and metadata @impl true @@ -449,181 +441,181 @@ defmodule Nebulex.Cache do defp entry_defs do quote do - alias Nebulex.Cache.Entry + alias Nebulex.Cache.KV @impl true def fetch(key, opts \\ []) do - dynamic_cache opts, do: Entry.fetch(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.fetch(dynamic_cache, key, opts) end @impl true def fetch!(key, opts \\ []) do - dynamic_cache opts, do: Entry.fetch!(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.fetch!(dynamic_cache, key, opts) end @impl true def get(key, default \\ nil, opts \\ []) do - dynamic_cache opts, do: Entry.get(dynamic_cache, key, default, opts) + dynamic_cache opts, do: KV.get(dynamic_cache, key, default, opts) end @impl true def get!(key, default \\ nil, opts \\ []) do - dynamic_cache opts, do: Entry.get!(dynamic_cache, key, default, opts) + dynamic_cache opts, do: KV.get!(dynamic_cache, key, default, opts) end @impl true def get_all(keys, opts \\ []) do - dynamic_cache opts, do: Entry.get_all(dynamic_cache, keys, opts) + dynamic_cache opts, do: KV.get_all(dynamic_cache, keys, opts) end @impl true def get_all!(keys, opts \\ []) do - dynamic_cache opts, do: Entry.get_all!(dynamic_cache, keys, opts) + dynamic_cache opts, do: KV.get_all!(dynamic_cache, keys, opts) end @impl true def put(key, value, opts \\ []) do - dynamic_cache opts, do: Entry.put(dynamic_cache, key, value, opts) + dynamic_cache opts, do: KV.put(dynamic_cache, key, value, opts) end @impl true def put!(key, value, opts \\ []) do - dynamic_cache opts, do: Entry.put!(dynamic_cache, key, value, opts) + dynamic_cache opts, do: KV.put!(dynamic_cache, key, value, opts) end @impl true def put_new(key, value, opts \\ []) do - dynamic_cache opts, do: Entry.put_new(dynamic_cache, key, value, opts) + dynamic_cache opts, do: KV.put_new(dynamic_cache, key, value, opts) end @impl true def put_new!(key, value, opts \\ []) do - dynamic_cache opts, do: Entry.put_new!(dynamic_cache, key, value, opts) + dynamic_cache opts, do: KV.put_new!(dynamic_cache, key, value, opts) end @impl true def replace(key, value, opts \\ []) do - dynamic_cache opts, do: Entry.replace(dynamic_cache, key, value, opts) + dynamic_cache opts, do: KV.replace(dynamic_cache, key, value, opts) end @impl true def replace!(key, value, opts \\ []) do - dynamic_cache opts, do: Entry.replace!(dynamic_cache, key, value, opts) + dynamic_cache opts, do: KV.replace!(dynamic_cache, key, value, opts) end @impl true def put_all(entries, opts \\ []) do - dynamic_cache opts, do: Entry.put_all(dynamic_cache, entries, opts) + dynamic_cache opts, do: KV.put_all(dynamic_cache, entries, opts) end @impl true def put_all!(entries, opts \\ []) do - dynamic_cache opts, do: Entry.put_all!(dynamic_cache, entries, opts) + dynamic_cache opts, do: KV.put_all!(dynamic_cache, entries, opts) end @impl true def put_new_all(entries, opts \\ []) do - dynamic_cache opts, do: Entry.put_new_all(dynamic_cache, entries, opts) + dynamic_cache opts, do: KV.put_new_all(dynamic_cache, entries, opts) end @impl true def put_new_all!(entries, opts \\ []) do - dynamic_cache opts, do: Entry.put_new_all!(dynamic_cache, entries, opts) + dynamic_cache opts, do: KV.put_new_all!(dynamic_cache, entries, opts) end @impl true def delete(key, opts \\ []) do - dynamic_cache opts, do: Entry.delete(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.delete(dynamic_cache, key, opts) end @impl true def delete!(key, opts \\ []) do - dynamic_cache opts, do: Entry.delete!(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.delete!(dynamic_cache, key, opts) end @impl true def take(key, opts \\ []) do - dynamic_cache opts, do: Entry.take(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.take(dynamic_cache, key, opts) end @impl true def take!(key, opts \\ []) do - dynamic_cache opts, do: Entry.take!(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.take!(dynamic_cache, key, opts) end @impl true def has_key?(key, opts \\ []) do - dynamic_cache opts, do: Entry.has_key?(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.has_key?(dynamic_cache, key, opts) end @impl true def get_and_update(key, fun, opts \\ []) do - dynamic_cache opts, do: Entry.get_and_update(dynamic_cache, key, fun, opts) + dynamic_cache opts, do: KV.get_and_update(dynamic_cache, key, fun, opts) end @impl true def get_and_update!(key, fun, opts \\ []) do - dynamic_cache opts, do: Entry.get_and_update!(dynamic_cache, key, fun, opts) + dynamic_cache opts, do: KV.get_and_update!(dynamic_cache, key, fun, opts) end @impl true def update(key, initial, fun, opts \\ []) do - dynamic_cache opts, do: Entry.update(dynamic_cache, key, initial, fun, opts) + dynamic_cache opts, do: KV.update(dynamic_cache, key, initial, fun, opts) end @impl true def update!(key, initial, fun, opts \\ []) do - dynamic_cache opts, do: Entry.update!(get_dynamic_cache(), key, initial, fun, opts) + dynamic_cache opts, do: KV.update!(get_dynamic_cache(), key, initial, fun, opts) end @impl true def incr(key, amount \\ 1, opts \\ []) do - dynamic_cache opts, do: Entry.incr(dynamic_cache, key, amount, opts) + dynamic_cache opts, do: KV.incr(dynamic_cache, key, amount, opts) end @impl true def incr!(key, amount \\ 1, opts \\ []) do - dynamic_cache opts, do: Entry.incr!(dynamic_cache, key, amount, opts) + dynamic_cache opts, do: KV.incr!(dynamic_cache, key, amount, opts) end @impl true def decr(key, amount \\ 1, opts \\ []) do - dynamic_cache opts, do: Entry.decr(dynamic_cache, key, amount, opts) + dynamic_cache opts, do: KV.decr(dynamic_cache, key, amount, opts) end @impl true def decr!(key, amount \\ 1, opts \\ []) do - dynamic_cache opts, do: Entry.decr!(dynamic_cache, key, amount, opts) + dynamic_cache opts, do: KV.decr!(dynamic_cache, key, amount, opts) end @impl true def ttl(key, opts \\ []) do - dynamic_cache opts, do: Entry.ttl(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.ttl(dynamic_cache, key, opts) end @impl true def ttl!(key, opts \\ []) do - dynamic_cache opts, do: Entry.ttl!(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.ttl!(dynamic_cache, key, opts) end @impl true def expire(key, ttl, opts \\ []) do - dynamic_cache opts, do: Entry.expire(dynamic_cache, key, ttl, opts) + dynamic_cache opts, do: KV.expire(dynamic_cache, key, ttl, opts) end @impl true def expire!(key, ttl, opts \\ []) do - dynamic_cache opts, do: Entry.expire!(dynamic_cache, key, ttl, opts) + dynamic_cache opts, do: KV.expire!(dynamic_cache, key, ttl, opts) end @impl true def touch(key, opts \\ []) do - dynamic_cache opts, do: Entry.touch(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.touch(dynamic_cache, key, opts) end @impl true def touch!(key, opts \\ []) do - dynamic_cache opts, do: Entry.touch!(dynamic_cache, key, opts) + dynamic_cache opts, do: KV.touch!(dynamic_cache, key, opts) end end end @@ -807,7 +799,8 @@ defmodule Nebulex.Cache do for the cache supervisor process to terminate, or the atom `:infinity` to wait indefinitely. Defaults to `5000`. See `Supervisor.stop/3`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. """ @doc group: "Runtime API" @callback stop(opts) :: :ok @@ -892,7 +885,7 @@ defmodule Nebulex.Cache do args :: [term] ) :: term - ## Nebulex.Adapter.Entry + ## Nebulex.Adapter.KV @doc """ Fetches the value for a specific `key` in the cache. @@ -908,7 +901,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -922,7 +916,7 @@ defmodule Nebulex.Cache do _error """ - @doc group: "Entry API" + @doc group: "KV API" @callback fetch(key, opts) :: ok_error_tuple(value, fetch_error_reason) @doc """ @@ -930,7 +924,7 @@ defmodule Nebulex.Cache do contain `key`, or `Nebulex.Error` if any other error occurs while executing the command. """ - @doc group: "Entry API" + @doc group: "KV API" @callback fetch!(key, opts) :: value @doc """ @@ -946,7 +940,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -963,13 +958,13 @@ defmodule Nebulex.Cache do {:ok, :default} """ - @doc group: "Entry API" + @doc group: "KV API" @callback get(key, default :: value, opts) :: ok_error_tuple(value) @doc """ Same as `c:get/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback get!(key, default :: value, opts) :: value @doc """ @@ -981,7 +976,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -992,13 +988,13 @@ defmodule Nebulex.Cache do {:ok, %{a: 1, c: 3}} """ - @doc group: "Entry API" + @doc group: "KV API" @callback get_all(keys :: [key], opts) :: ok_error_tuple(map) @doc """ Same as `c:get_all/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback get_all!(keys :: [key], opts) :: map @doc """ @@ -1016,7 +1012,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1038,13 +1035,13 @@ defmodule Nebulex.Cache do :ok """ - @doc group: "Entry API" + @doc group: "KV API" @callback put(key, value, opts) :: :ok | error @doc """ Same as `c:put/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback put!(key, value, opts) :: :ok @doc """ @@ -1059,7 +1056,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1074,13 +1072,13 @@ defmodule Nebulex.Cache do backend used internally by the adapter. Hence, it is recommended to review the adapter's documentation. """ - @doc group: "Entry API" + @doc group: "KV API" @callback put_all(entries, opts) :: :ok | error @doc """ Same as `c:put_all/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback put_all!(entries, opts) :: :ok @doc """ @@ -1098,7 +1096,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1109,13 +1108,13 @@ defmodule Nebulex.Cache do {:ok, false} """ - @doc group: "Entry API" + @doc group: "KV API" @callback put_new(key, value, opts) :: ok_error_tuple(boolean) @doc """ Same as `c:put_new/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback put_new!(key, value, opts) :: boolean @doc """ @@ -1133,7 +1132,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1148,13 +1148,13 @@ defmodule Nebulex.Cache do backend used internally by the adapter. Hence, it is recommended to review the adapter's documentation. """ - @doc group: "Entry API" + @doc group: "KV API" @callback put_new_all(entries, opts) :: ok_error_tuple(boolean) @doc """ Same as `c:put_new_all/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback put_new_all!(entries, opts) :: boolean @doc """ @@ -1172,7 +1172,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1191,13 +1192,13 @@ defmodule Nebulex.Cache do {:ok, true} """ - @doc group: "Entry API" + @doc group: "KV API" @callback replace(key, value, opts) :: ok_error_tuple(boolean) @doc """ Same as `c:replace/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback replace!(key, value, opts) :: boolean @doc """ @@ -1207,7 +1208,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1224,13 +1226,13 @@ defmodule Nebulex.Cache do :ok """ - @doc group: "Entry API" + @doc group: "KV API" @callback delete(key, opts) :: :ok | error @doc """ Same as `c:delete/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback delete!(key, opts) :: :ok @doc """ @@ -1247,7 +1249,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1261,13 +1264,13 @@ defmodule Nebulex.Cache do _error """ - @doc group: "Entry API" + @doc group: "KV API" @callback take(key, opts) :: ok_error_tuple(value, fetch_error_reason) @doc """ Same as `c:take/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback take!(key, opts) :: value @doc """ @@ -1280,7 +1283,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1294,7 +1298,7 @@ defmodule Nebulex.Cache do {:ok, false} """ - @doc group: "Entry API" + @doc group: "KV API" @callback has_key?(key, opts) :: ok_error_tuple(boolean) @doc """ @@ -1316,7 +1320,8 @@ defmodule Nebulex.Cache do inserted as initial value of key before the it is incremented. Defaults to `0`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1333,13 +1338,13 @@ defmodule Nebulex.Cache do {:ok, 12} """ - @doc group: "Entry API" + @doc group: "KV API" @callback incr(key, amount :: integer, opts) :: ok_error_tuple(integer) @doc """ Same as `c:incr/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback incr!(key, amount :: integer, opts) :: integer @doc """ @@ -1361,7 +1366,8 @@ defmodule Nebulex.Cache do inserted as initial value of key before the it is incremented. Defaults to `0`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1378,13 +1384,13 @@ defmodule Nebulex.Cache do {:ok, 8} """ - @doc group: "Entry API" + @doc group: "KV API" @callback decr(key, amount :: integer, opts) :: ok_error_tuple(integer) @doc """ Same as `c:decr/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback decr!(key, amount :: integer, opts) :: integer @doc """ @@ -1401,7 +1407,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1421,13 +1428,13 @@ defmodule Nebulex.Cache do _error """ - @doc group: "Entry API" + @doc group: "KV API" @callback ttl(key, opts) :: ok_error_tuple(timeout, fetch_error_reason) @doc """ Same as `c:ttl/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback ttl!(key, opts) :: timeout @doc """ @@ -1438,7 +1445,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1455,13 +1463,13 @@ defmodule Nebulex.Cache do {:ok, false} """ - @doc group: "Entry API" + @doc group: "KV API" @callback expire(key, ttl :: timeout, opts) :: ok_error_tuple(boolean) @doc """ Same as `c:expire/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback expire!(key, ttl :: timeout, opts) :: boolean @doc """ @@ -1472,7 +1480,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1486,13 +1495,13 @@ defmodule Nebulex.Cache do {:ok, false} """ - @doc group: "Entry API" + @doc group: "KV API" @callback touch(key, opts) :: ok_error_tuple(boolean) @doc """ Same as `c:touch/2` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback touch!(key, opts) :: boolean @doc """ @@ -1517,7 +1526,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1546,7 +1556,7 @@ defmodule Nebulex.Cache do {:ok, {nil, nil}} """ - @doc group: "Entry API" + @doc group: "KV API" @callback get_and_update(key, (value -> {current_value, new_value} | :pop), opts) :: ok_error_tuple({current_value, new_value}) when current_value: value, new_value: value @@ -1554,7 +1564,7 @@ defmodule Nebulex.Cache do @doc """ Same as `c:get_and_update/3` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback get_and_update!(key, (value -> {current_value, new_value} | :pop), opts) :: {current_value, new_value} when current_value: value, new_value: value @@ -1579,7 +1589,8 @@ defmodule Nebulex.Cache do (or expiry time) for the given key in **milliseconds**. Defaults to `:infinity`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -1590,13 +1601,13 @@ defmodule Nebulex.Cache do {:ok, 2} """ - @doc group: "Entry API" + @doc group: "KV API" @callback update(key, initial :: value, (value -> value), opts) :: ok_error_tuple(value) @doc """ Same as `c:update/4` but raises an exception if an error occurs. """ - @doc group: "Entry API" + @doc group: "KV API" @callback update!(key, initial :: value, (value -> value), opts) :: value ## Nebulex.Adapter.Queryable @@ -1652,7 +1663,8 @@ defmodule Nebulex.Cache do adapters, but it is recommended to see the adapter's documentation to confirm its compatibility with this option. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Query return option @@ -1660,7 +1672,7 @@ defmodule Nebulex.Cache do * `:key` - Returns a list only with the keys. * `:value` - Returns a list only with the values. - * `:entry` - Returns a list of `t:Nebulex.Entry.t/0`. + * `:entry` - Returns a list of `t:Nebulex.KV.t/0`. * `{:key, :value}` - Returns a list of tuples in the form `{key, value}`. See adapters documentation to confirm what of these options are supported @@ -1772,7 +1784,8 @@ defmodule Nebulex.Cache do back from the cache's backend. Defaults to `20`; it's unlikely this will ever need changing. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Query return option @@ -1780,7 +1793,7 @@ defmodule Nebulex.Cache do * `:key` - Returns a list only with the keys. * `:value` - Returns a list only with the values. - * `:entry` - Returns a list of `t:Nebulex.Entry.t/0`. + * `:entry` - Returns a list of `t:Nebulex.KV.t/0`. * `{:key, :value}` - Returns a list of tuples in the form `{key, value}`. See adapters documentation to confirm what of these options are supported @@ -1870,7 +1883,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1921,7 +1935,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1968,7 +1983,8 @@ defmodule Nebulex.Cache do the default implementation from `Nebulex.Adapter.Persistence`, hence, review the available options there. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -2006,7 +2022,8 @@ defmodule Nebulex.Cache do default implementation from `Nebulex.Adapter.Persistence`, hence, review the available options there. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -2057,7 +2074,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -2089,7 +2107,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples @@ -2120,7 +2139,8 @@ defmodule Nebulex.Cache do ## Options - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -2179,7 +2199,8 @@ defmodule Nebulex.Cache do * `:metadata` – A map with additional metadata fields. Defaults to `%{}`. - See the "Shared options" section at the module documentation for more options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples diff --git a/lib/nebulex/cache/cluster.ex b/lib/nebulex/cache/cluster.ex deleted file mode 100644 index b7833472..00000000 --- a/lib/nebulex/cache/cluster.ex +++ /dev/null @@ -1,102 +0,0 @@ -defmodule Nebulex.Cache.Cluster do - # The module used by cache adapters for - # distributed caching functionality. - @moduledoc false - - @doc """ - Joins the node where the cache `name`'s supervisor process is running to the - `name`'s node group. - """ - @spec join(name :: atom) :: :ok - def join(name) do - pid = Process.whereis(name) || self() - - if pid in pg_members(name) do - :ok - else - :ok = pg_join(name, pid) - end - end - - @doc """ - Makes the node where the cache `name`'s supervisor process is running, leave - the `name`'s node group. - """ - @spec leave(name :: atom) :: :ok - def leave(name) do - pg_leave(name, Process.whereis(name) || self()) - end - - @doc """ - Returns the list of nodes joined to given `name`'s node group. - """ - @spec get_nodes(name :: atom) :: [node] - def get_nodes(name) do - name - |> pg_members() - |> Enum.map(&node/1) - |> :lists.usort() - end - - @doc """ - Selects one node based on the computation of the `key` slot. - """ - @spec get_node(name_or_nodes :: atom | [node], Nebulex.Cache.key(), keyslot :: module) :: node - def get_node(name_or_nodes, key, keyslot) - - def get_node(name, key, keyslot) when is_atom(name) do - name - |> get_nodes() - |> get_node(key, keyslot) - end - - def get_node(nodes, key, keyslot) when is_list(nodes) do - Enum.at(nodes, keyslot.hash_slot(key, length(nodes))) - end - - ## PG - - if Code.ensure_loaded?(:pg) do - defp pg_join(name, pid) do - :ok = :pg.join(__MODULE__, name, pid) - end - - defp pg_leave(name, pid) do - _ = :pg.leave(__MODULE__, name, pid) - :ok - end - - defp pg_members(name) do - :pg.get_members(__MODULE__, name) - end - else - # Inline common instructions - @compile {:inline, pg2_namespace: 1} - - defp pg_join(name, pid) do - name - |> ensure_namespace() - |> :pg2.join(pid) - end - - defp pg_leave(name, pid) do - name - |> ensure_namespace() - |> :pg2.leave(pid) - end - - defp pg_members(name) do - name - |> ensure_namespace() - |> :pg2.get_members() - end - - defp ensure_namespace(name) do - namespace = pg2_namespace(name) - :ok = :pg2.create(namespace) - namespace - end - - defp pg2_namespace(name), do: {:nbx, name} - end -end diff --git a/lib/nebulex/cache/entry.ex b/lib/nebulex/cache/kv.ex similarity index 87% rename from lib/nebulex/cache/entry.ex rename to lib/nebulex/cache/kv.ex index 116e55e9..d33fe7c6 100644 --- a/lib/nebulex/cache/entry.ex +++ b/lib/nebulex/cache/kv.ex @@ -1,4 +1,4 @@ -defmodule Nebulex.Cache.Entry do +defmodule Nebulex.Cache.KV do @moduledoc false import Nebulex.Helpers @@ -6,7 +6,7 @@ defmodule Nebulex.Cache.Entry do alias Nebulex.{Adapter, Time} # Inline common instructions - @compile {:inline, get_ttl: 1} + @compile {:inline, pop_ttl: 1} @doc """ Implementation for `c:Nebulex.Cache.fetch/2`. @@ -109,7 +109,11 @@ defmodule Nebulex.Cache.Entry do end defp do_put(name, key, value, on_write, opts) do - Adapter.with_meta(name, & &1.adapter.put(&1, key, value, get_ttl(opts), on_write, opts)) + Adapter.with_meta(name, fn %{adapter: adapter} = adapter_meta -> + {ttl, opts} = pop_ttl(opts) + + adapter.put(adapter_meta, key, value, ttl, on_write, opts) + end) end @doc """ @@ -154,7 +158,11 @@ defmodule Nebulex.Cache.Entry do end def do_put_all(name, entries, on_write, opts) do - Adapter.with_meta(name, & &1.adapter.put_all(&1, entries, get_ttl(opts), on_write, opts)) + Adapter.with_meta(name, fn %{adapter: adapter} = adapter_meta -> + {ttl, opts} = pop_ttl(opts) + + adapter.put_all(adapter_meta, entries, ttl, on_write, opts) + end) end @doc """ @@ -212,7 +220,10 @@ defmodule Nebulex.Cache.Entry do {get, get} {get, update} -> - {:ok, true} = adapter.put(adapter_meta, key, update, get_ttl(opts), :put, opts) + {ttl, opts} = pop_ttl(opts) + + {:ok, true} = adapter.put(adapter_meta, key, update, ttl, :put, opts) + {get, update} :pop when is_nil(current) -> @@ -220,6 +231,7 @@ defmodule Nebulex.Cache.Entry do :pop -> :ok = adapter.delete(adapter_meta, key, opts) + {current, nil} other -> @@ -248,7 +260,9 @@ defmodule Nebulex.Cache.Entry do {:error, _} = error -> throw({:return, error}) end - with {:ok, true} <- adapter.put(adapter_meta, key, value, get_ttl(opts), :put, opts) do + {ttl, opts} = pop_ttl(opts) + + with {:ok, true} <- adapter.put(adapter_meta, key, value, ttl, :put, opts) do {:ok, value} end end) @@ -269,22 +283,23 @@ defmodule Nebulex.Cache.Entry do def incr(name, key, amount, opts) def incr(name, key, amount, opts) when is_integer(amount) do - default = - case Keyword.fetch(opts, :default) do - {:ok, value} when is_integer(value) -> - value + Adapter.with_meta(name, fn %{adapter: adapter} = adapter_meta -> + {default, opts} = + case Keyword.pop(opts, :default) do + {nil, opts} -> + {0, opts} - {:ok, value} -> - raise ArgumentError, "expected default: to be an integer, got: #{inspect(value)}" + {val, opts} when is_integer(val) -> + {val, opts} - :error -> - 0 - end + {val, _opts} -> + raise ArgumentError, "expected default: to be an integer, got: #{inspect(val)}" + end - Adapter.with_meta( - name, - & &1.adapter.update_counter(&1, key, amount, get_ttl(opts), default, opts) - ) + {ttl, opts} = pop_ttl(opts) + + adapter.update_counter(adapter_meta, key, amount, ttl, default, opts) + end) end def incr(_cache, _key, amount, _opts) do @@ -369,17 +384,17 @@ defmodule Nebulex.Cache.Entry do ## Helpers - defp get_ttl(opts) do - case Keyword.fetch(opts, :ttl) do - {:ok, ttl} -> + defp pop_ttl(opts) do + case Keyword.pop(opts, :ttl) do + {nil, opts} -> + {:infinity, opts} + + {ttl, opts} -> if not Time.timeout?(ttl) do raise ArgumentError, "expected ttl: to be a valid timeout, got: #{inspect(ttl)}" end - ttl - - :error -> - :infinity + {ttl, opts} end end end diff --git a/lib/nebulex/cache/registry.ex b/lib/nebulex/cache/registry.ex index da2f786d..2a4dab09 100644 --- a/lib/nebulex/cache/registry.ex +++ b/lib/nebulex/cache/registry.ex @@ -30,8 +30,11 @@ defmodule Nebulex.Cache.Registry do def lookup(pid) when is_pid(pid) do case :persistent_term.get({__MODULE__, pid}, nil) do - {_ref, _name, value} -> {:ok, value} - nil -> wrap_error Nebulex.Error, reason: {:registry_lookup_error, pid} + {_ref, _name, value} -> + {:ok, value} + + nil -> + wrap_error Nebulex.Error, reason: {:registry_lookup_error, pid} end end diff --git a/lib/nebulex/cache/stats.ex b/lib/nebulex/cache/stats.ex index a7b04eb6..23c77752 100644 --- a/lib/nebulex/cache/stats.ex +++ b/lib/nebulex/cache/stats.ex @@ -27,16 +27,12 @@ defmodule Nebulex.Cache.Stats do """ def dispatch_stats(name, opts \\ []) do Adapter.with_meta(name, fn %{adapter: adapter} = meta -> - with true <- is_list(meta.telemetry_prefix), - {:ok, %Nebulex.Stats{} = info} <- adapter.stats(meta) do + with {:ok, %Nebulex.Stats{} = info} <- adapter.stats(meta) do :telemetry.execute( meta.telemetry_prefix ++ [:stats], info.measurements, Map.merge(info.metadata, opts[:metadata] || %{}) ) - else - {:error, _} = error -> error - _ -> :ok end end) end diff --git a/lib/nebulex/cache/supervisor.ex b/lib/nebulex/cache/supervisor.ex index f63deff0..1a647856 100644 --- a/lib/nebulex/cache/supervisor.ex +++ b/lib/nebulex/cache/supervisor.ex @@ -94,18 +94,14 @@ defmodule Nebulex.Cache.Supervisor do @doc false def start_child({mod, fun, args}, name, cache, adapter, meta) do - case apply(mod, fun, args) do - {:ok, pid} -> - # Add the pid and the adapter to the meta - meta = Map.merge(meta, %{pid: pid, cache: cache, adapter: adapter}) + with {:ok, pid} <- apply(mod, fun, args) do + # Add to the metadata: pid, name, cache, and adapter + meta = Map.merge(meta, %{pid: pid, name: name, cache: cache, adapter: adapter}) - # Register the started cache's pid - :ok = Nebulex.Cache.Registry.register(self(), name, meta) + # Register the started cache's pid + :ok = Nebulex.Cache.Registry.register(self(), name, meta) - {:ok, pid} - - other -> - other + {:ok, pid} end end diff --git a/lib/nebulex/helpers.ex b/lib/nebulex/helpers.ex index 5891a774..be328a57 100644 --- a/lib/nebulex/helpers.ex +++ b/lib/nebulex/helpers.ex @@ -4,6 +4,39 @@ defmodule Nebulex.Helpers do ## API + @doc """ + A wrapper for `Keyword.get/3` but validates the returned value invoking + the function `valid?`. + + Raises an `ArgumentError` in case the validation fails. + + ## Examples + + iex> Nebulex.Helpers.get_option( + ...> [keys: [1, 2, 3]], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + [1, 2, 3] + + iex> Nebulex.Helpers.get_option( + ...> [], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + nil + + iex> Nebulex.Helpers.get_option( + ...> [keys: 123], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + ** (ArgumentError) expected keys: to be a list with at least one element, got: 123 + + """ @spec get_option(keyword, atom, binary, (any -> boolean), term) :: term def get_option(opts, key, expected, valid?, default \\ nil) when is_list(opts) and is_atom(key) do @@ -26,6 +59,12 @@ defmodule Nebulex.Helpers do end end + @doc """ + Returns the implemented behaviours for the given `module`. + + Raises an `ArgumentError ` in case the given `module` is not compiled + or incorrect. + """ @spec module_behaviours(module, binary) :: [module] def module_behaviours(module, msg) do if Code.ensure_compiled(module) != {:module, module} do @@ -39,13 +78,45 @@ defmodule Nebulex.Helpers do do: behaviour end - @spec normalize_module_name([atom | binary | number]) :: module - def normalize_module_name(list) when is_list(list) do + @doc """ + Concatenates a list of "camelized" aliases and returns a new alias. + + It handles binaries, atoms, and numbers. + + ## Examples + + iex> Nebulex.Helpers.camelize_and_concat([Foo, :bar]) + Foo.Bar + + iex> Nebulex.Helpers.camelize_and_concat([Foo, "bar"]) + Foo.Bar + + iex> Nebulex.Helpers.camelize_and_concat([Foo, "Bar", 1]) + :"Elixir.Foo.Bar.1" + + """ + @spec camelize_and_concat([atom | binary | number]) :: module + def camelize_and_concat(list) when is_list(list) do list |> Enum.map(&Macro.camelize("#{&1}")) |> Module.concat() end + @doc """ + Similar to `Keyword.pop_first/3`, but lazily returns and removes the first + value associated with key in the keyword list. + """ + @spec kw_pop_first_lazy(keyword, term, (() -> term)) :: {term, keyword} + @compile {:inline, kw_pop_first_lazy: 3} + def kw_pop_first_lazy(keywords, key, fun) when is_list(keywords) and is_atom(key) do + case :lists.keytake(key, 1, keywords) do + {:value, {^key, value}, rest} -> {value, rest} + false -> {fun.(), keywords} + end + end + + ## Macros + @doc false defmacro unwrap_or_raise(call) do quote do diff --git a/lib/nebulex/rpc.ex b/lib/nebulex/rpc.ex deleted file mode 100644 index 6d744695..00000000 --- a/lib/nebulex/rpc.ex +++ /dev/null @@ -1,202 +0,0 @@ -defmodule Nebulex.RPC do - @moduledoc """ - RPC utilities for distributed task execution. - - This module uses supervised tasks underneath `Task.Supervisor`. - - > **NOTE:** The approach by using distributed tasks will be deprecated - in the future in favor of `:erpc`. - """ - - import Nebulex.Helpers - - @typedoc "Task callback" - @type callback :: {module, atom, [term]} - - @typedoc "Group entry: node -> callback" - @type node_callback :: {node, callback} - - @typedoc "Node group" - @type node_group :: %{optional(node) => callback} | [node_callback] - - @typedoc "Error kind" - @type error_kind :: :error | :exit | :throw - - @typedoc "Reducer function spec" - @type reducer_fun :: ({:ok, term} | {error_kind, term}, node_callback | node, term -> term) - - @typedoc "Reducer spec" - @type reducer :: {acc :: term, reducer_fun} - - ## API - - @doc """ - Evaluates `apply(mod, fun, args)` on node `node` and returns the corresponding - evaluation result, or `{:error, Nebulex.Error.t()}` if the call fails. - - A timeout, in milliseconds or `:infinity`, can be given with a default value - of `5000`. - - ## Example - - iex> Nebulex.RPC.call(:node1, Kernel, :to_string, [1]) - "1" - - """ - @spec call(node, module, atom, [term], timeout) :: term | {:error, Nebulex.Error.t()} - def call(node, mod, fun, args, timeout) - - def call(node, mod, fun, args, _timeout) when node == node() do - apply(mod, fun, args) - end - - def call(node, mod, fun, args, timeout) do - with {:badrpc, reason} <- :rpc.call(node, mod, fun, args, timeout) do - wrap_error Nebulex.Error, reason: {:rpc_error, {node, reason}}, module: __MODULE__ - end - end - - @doc """ - In contrast to a regular single-node RPC, a multicall is an RPC that is sent - concurrently from one client to multiple servers. The function evaluates - `apply(mod, fun, args)` on each `node_group` entry and collects the answers. - Then, evaluates the `reducer` function (set in the `opts`) on each answer. - - ## Options - - * `:timeout` - A timeout, in milliseconds or `:infinity`, can be given with - a default value of `5000`. It uses `Task.yield_many/2` internally. - - * `:reducer` - Reducer function to be executed on each collected result. - (check out `reducer` type). - - ## Example - - iex> Nebulex.RPC.multicall( - ...> %{ - ...> node1: {Kernel, :to_string, [1]}, - ...> node2: {Kernel, :to_string, [2]} - ...> }, - ...> timeout: 10_000, - ...> reducer: { - ...> [], - ...> fn - ...> {:ok, res}, _node_callback, acc -> - ...> [res | acc] - ...> - ...> {:error, _}, _node_callback, acc -> - ...> acc - ...> end - ...> } - ...> ) - ["1", "2"] - - """ - @spec multicall(node_group, keyword) :: term - def multicall(node_group, opts \\ []) do - {reducer_acc, reducer_fun} = opts[:reducer] || default_reducer() - timeout = opts[:timeout] || 5000 - - for {node, {mod, fun, args}} = group <- node_group do - {:erpc.send_request(node, mod, fun, args), group} - end - |> Enum.reduce(reducer_acc, fn {req_id, group}, acc -> - try do - res = :erpc.receive_response(req_id, timeout) - - reducer_fun.({:ok, res}, group, acc) - rescue - exception in ErlangError -> - reducer_fun.({:error, exception.original}, group, acc) - catch - :exit, reason -> - reducer_fun.({:exit, reason}, group, acc) - end - end) - end - - @doc """ - Similar to `multicall/3` but it uses `:erpc.multicall/5` under the hood. - - ## Options - - Same options as `multicall/3`. - - ## Example - - iex> Nebulex.RPC.multicall( - ...> [:node1, :node2], - ...> Kernel, - ...> :to_string, - ...> [1], - ...> timeout: 5000, - ...> reducer: { - ...> [], - ...> fn - ...> {:ok, res}, _node_callback, acc -> - ...> [res | acc] - ...> - ...> {:error, _}, _node_callback, acc -> - ...> acc - ...> end - ...> } - ...> ) - ["1", "1"] - - """ - @spec multicall([node], module, atom, [term], keyword) :: term - def multicall(nodes, mod, fun, args, opts \\ []) do - {reducer_acc, reducer_fun} = opts[:reducer] || default_reducer() - - nodes - |> :erpc.multicall(mod, fun, args, opts[:timeout] || 5000) - |> :lists.zip(nodes) - |> Enum.reduce(reducer_acc, fn {res, node}, acc -> - reducer_fun.(res, node, acc) - end) - end - - ## Helpers - - @doc """ - Helper for formatting RPC errors. - """ - @spec format_error(term) :: binary - def format_error(error) - - def format_error({:rpc_error, {node, reason}}) do - "RPC call failed on node #{inspect(node)} with reason: #{inspect(reason)}" - end - - def format_error({:rpc_multicall_error, errors}) when is_list(errors) do - """ - RPC multicall failed with errors ([{node, error}, ...]): - - #{inspect(errors, pretty: true)} - """ - end - - ## Private Functions - - defp default_reducer do - { - {[], []}, - fn - {:ok, {:ok, res}}, _node_callback, {ok, err} -> - {[res | ok], err} - - {:ok, {:error, _} = error}, node_callback, {ok, err} -> - {ok, [{node_callback, error} | err]} - - {:ok, res}, _node_callback, {ok, err} -> - {[res | ok], err} - - {kind, _} = error, {node, callback}, {ok, err} when kind in [:error, :exit, :throw] -> - {ok, [{node, {error, callback}} | err]} - - {kind, _} = error, node, {ok, err} when kind in [:error, :exit, :throw] -> - {ok, [{node, error} | err]} - end - } - end -end diff --git a/mix.exs b/mix.exs index 0ba50fec..f4a4e2e1 100644 --- a/mix.exs +++ b/mix.exs @@ -8,7 +8,7 @@ defmodule Nebulex.MixProject do [ app: :nebulex, version: @version, - elixir: "~> 1.9", + elixir: "~> 1.11", elixirc_paths: elixirc_paths(Mix.env()), aliases: aliases(), deps: deps(), @@ -36,7 +36,7 @@ defmodule Nebulex.MixProject do ] end - defp elixirc_paths(:test), do: ["lib", "test/support", "test/dialyzer"] + defp elixirc_paths(:test), do: ["lib", "test/dialyzer"] defp elixirc_paths(_), do: ["lib"] def application do @@ -48,18 +48,16 @@ defmodule Nebulex.MixProject do defp deps do [ - {:nimble_options, "~> 0.4"}, - {:shards, "~> 1.0", optional: true}, + {:nimble_options, "~> 0.5"}, {:decorator, "~> 1.4", optional: true}, - {:telemetry, "~> 0.4 or ~> 1.0", optional: true}, + {:telemetry, "~> 1.2", optional: true}, # Test & Code Analysis - {:excoveralls, "~> 0.14", only: :test}, - {:credo, "~> 1.6", only: [:dev, :test], runtime: false}, + {:excoveralls, "~> 0.16", only: :test}, + {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, {:dialyxir, "~> 1.2", only: [:dev, :test], runtime: false}, {:sobelow, "~> 0.11", only: [:dev, :test], runtime: false}, {:stream_data, "~> 0.5", only: [:dev, :test]}, - {:ex2ms, "~> 1.6", only: :test}, {:mimic, "~> 1.7", only: :test}, # Benchmark Test @@ -67,7 +65,7 @@ defmodule Nebulex.MixProject do {:benchee_html, "~> 1.0", only: [:dev, :test]}, # Docs - {:ex_doc, "~> 0.28", only: [:dev, :test], runtime: false}, + {:ex_doc, "~> 0.29", only: [:dev, :test], runtime: false}, {:inch_ex, "~> 2.0", only: :docs} ] end @@ -117,7 +115,7 @@ defmodule Nebulex.MixProject do # Cache API group_for_function("User callbacks"), group_for_function("Runtime API"), - group_for_function("Entry API"), + group_for_function("KV API"), group_for_function("Query API"), group_for_function("Persistence API"), group_for_function("Transaction API"), @@ -130,7 +128,7 @@ defmodule Nebulex.MixProject do defp dialyzer do [ - plt_add_apps: [:shards, :mix, :telemetry, :ex_unit], + plt_add_apps: [:mix, :telemetry, :ex_unit], plt_file: {:no_warn, "priv/plts/" <> plt_file_name()}, flags: [ :unmatched_returns, diff --git a/mix.lock b/mix.lock index 7f9f0997..37c3109b 100644 --- a/mix.lock +++ b/mix.lock @@ -4,18 +4,17 @@ "benchee_json": {:hex, :benchee_json, "1.0.0", "cc661f4454d5995c08fe10dd1f2f72f229c8f0fb1c96f6b327a8c8fc96a91fe5", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "da05d813f9123505f870344d68fb7c86a4f0f9074df7d7b7e2bb011a63ec231c"}, "bunt": {:hex, :bunt, "0.2.1", "e2d4792f7bc0ced7583ab54922808919518d0e57ee162901a16a1b6664ef3b14", [:mix], [], "hexpm", "a330bfb4245239787b15005e66ae6845c9cd524a288f0d141c148b02603777a5"}, "certifi": {:hex, :certifi, "2.9.0", "6f2a475689dd47f19fb74334859d460a2dc4e3252a3324bd2111b8f0429e7e21", [:rebar3], [], "hexpm", "266da46bdb06d6c6d35fde799bcb28d36d985d424ad7c08b5bb48f5b5cdd4641"}, - "credo": {:hex, :credo, "1.6.7", "323f5734350fd23a456f2688b9430e7d517afb313fbd38671b8a4449798a7854", [:mix], [{:bunt, "~> 0.2.1", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "41e110bfb007f7eda7f897c10bf019ceab9a0b269ce79f015d54b0dcf4fc7dd3"}, + "credo": {:hex, :credo, "1.7.0", "6119bee47272e85995598ee04f2ebbed3e947678dee048d10b5feca139435f75", [:mix], [{:bunt, "~> 0.2.1", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "6839fcf63d1f0d1c0f450abc8564a57c43d644077ab96f2934563e68b8a769d7"}, "decorator": {:hex, :decorator, "1.4.0", "a57ac32c823ea7e4e67f5af56412d12b33274661bb7640ec7fc882f8d23ac419", [:mix], [], "hexpm", "0a07cedd9083da875c7418dea95b78361197cf2bf3211d743f6f7ce39656597f"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, "dialyxir": {:hex, :dialyxir, "1.2.0", "58344b3e87c2e7095304c81a9ae65cb68b613e28340690dfe1a5597fd08dec37", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "61072136427a851674cab81762be4dbeae7679f85b1272b6d25c3a839aff8463"}, - "earmark_parser": {:hex, :earmark_parser, "1.4.29", "149d50dcb3a93d9f3d6f3ecf18c918fb5a2d3c001b5d3305c926cddfbd33355b", [:mix], [], "hexpm", "4902af1b3eb139016aed210888748db8070b8125c2342ce3dcae4f38dcc63503"}, + "earmark_parser": {:hex, :earmark_parser, "1.4.31", "a93921cdc6b9b869f519213d5bc79d9e218ba768d7270d46fdcf1c01bacff9e2", [:mix], [], "hexpm", "317d367ee0335ef037a87e46c91a2269fef6306413f731e8ec11fc45a7efd059"}, "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, - "ex2ms": {:hex, :ex2ms, "1.6.1", "66d472eb14da43087c156e0396bac3cc7176b4f24590a251db53f84e9a0f5f72", [:mix], [], "hexpm", "a7192899d84af03823a8ec2f306fa858cbcce2c2e7fd0f1c49e05168fb9c740e"}, - "ex_doc": {:hex, :ex_doc, "0.29.0", "4a1cb903ce746aceef9c1f9ae8a6c12b742a5461e6959b9d3b24d813ffbea146", [:mix], [{:earmark_parser, "~> 1.4.19", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "f096adb8bbca677d35d278223361c7792d496b3fc0d0224c9d4bc2f651af5db1"}, - "excoveralls": {:hex, :excoveralls, "0.15.0", "ac941bf85f9f201a9626cc42b2232b251ad8738da993cf406a4290cacf562ea4", [:mix], [{:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "9631912006b27eca30a2f3c93562bc7ae15980afb014ceb8147dc5cdd8f376f1"}, + "ex_doc": {:hex, :ex_doc, "0.29.3", "f07444bcafb302db86e4f02d8bbcd82f2e881a0dcf4f3e4740e4b8128b9353f7", [:mix], [{:earmark_parser, "~> 1.4.31", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "3dc6787d7b08801ec3b51e9bd26be5e8826fbf1a17e92d1ebc252e1a1c75bfe1"}, + "excoveralls": {:hex, :excoveralls, "0.16.0", "41f4cfbf7caaa3bc2cf411db6f89c1f53afedf0f1fe8debac918be1afa19c668", [:mix], [{:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "401205356482ab99fb44d9812cd14dd83b65de8e7ae454697f8b34ba02ecd916"}, "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"}, - "hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~>2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"}, - "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~> 2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"}, + "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, "inch_ex": {:hex, :inch_ex, "2.0.0", "24268a9284a1751f2ceda569cd978e1fa394c977c45c331bb52a405de544f4de", [:mix], [{:bunt, "~> 0.2", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "96d0ec5ecac8cf63142d02f16b7ab7152cf0f0f1a185a80161b758383c9399a8"}, "jason": {:hex, :jason, "1.4.0", "e855647bc964a44e2f67df589ccf49105ae039d4179db7f6271dfd3843dc27e6", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "79a3791085b2a0f743ca04cec0f7be26443738779d09302e01318f97bdb82121"}, "makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"}, @@ -24,14 +23,13 @@ "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"}, "mimic": {:hex, :mimic, "1.7.4", "cd2772ffbc9edefe964bc668bfd4059487fa639a5b7f1cbdf4fd22946505aa4f", [:mix], [], "hexpm", "437c61041ecf8a7fae35763ce89859e4973bb0666e6ce76d75efc789204447c3"}, - "nimble_options": {:hex, :nimble_options, "0.5.1", "5c166f7669e40333191bea38e3bd3811cc13f459f1e4be49e89128a21b5d8c4d", [:mix], [], "hexpm", "d176cf7baa4fef0ceb301ca3eb8b55bd7de3e45f489c4f8b4f2849f1f114ef3e"}, + "nimble_options": {:hex, :nimble_options, "0.5.2", "42703307b924880f8c08d97719da7472673391905f528259915782bb346e0a1b", [:mix], [], "hexpm", "4da7f904b915fd71db549bcdc25f8d56f378ef7ae07dc1d372cbe72ba950dce0"}, "nimble_parsec": {:hex, :nimble_parsec, "1.2.3", "244836e6e3f1200c7f30cb56733fd808744eca61fd182f731eac4af635cc6d0b", [:mix], [], "hexpm", "c8d789e39b9131acf7b99291e93dae60ab48ef14a7ee9d58c6964f59efb570b0"}, "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, - "shards": {:hex, :shards, "1.0.1", "1bdbbf047db27f3c3eb800a829d4a47062c84d5543cbfebcfc4c14d038bf9220", [:make, :rebar3], [], "hexpm", "2c57788afbf053c4024366772892beee89b8b72e884e764fb0a075dfa7442041"}, "sobelow": {:hex, :sobelow, "0.11.1", "23438964486f8112b41e743bbfd402da3e5b296fdc9eacab29914b79c48916dd", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "9897363a7eff96f4809304a90aad819e2ad5e5d24db547af502885146746a53c"}, "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.6", "cf344f5692c82d2cd7554f5ec8fd961548d4fd09e7d22f5b62482e5aeaebd4b0", [:make, :mix, :rebar3], [], "hexpm", "bdb0d2471f453c88ff3908e7686f86f9be327d065cc1ec16fa4540197ea04680"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, "stream_data": {:hex, :stream_data, "0.5.0", "b27641e58941685c75b353577dc602c9d2c12292dd84babf506c2033cd97893e", [:mix], [], "hexpm", "012bd2eec069ada4db3411f9115ccafa38540a3c78c4c0349f151fc761b9e271"}, - "telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"}, + "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, } diff --git a/test/nebulex/adapters/local/generation_test.exs b/test/nebulex/adapters/local/generation_test.exs deleted file mode 100644 index a5216fe6..00000000 --- a/test/nebulex/adapters/local/generation_test.exs +++ /dev/null @@ -1,379 +0,0 @@ -defmodule Nebulex.Adapters.Local.GenerationTest do - use ExUnit.Case, async: true - - defmodule LocalWithSizeLimit do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local, - gc_interval: :timer.hours(1) - end - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Adapters.Local.GenerationTest.LocalWithSizeLimit - alias Nebulex.TestCache.Cache - - describe "init" do - test "ok: with default options" do - assert {:ok, _pid} = LocalWithSizeLimit.start_link() - - assert %Nebulex.Adapters.Local.Generation{ - allocated_memory: nil, - backend: :ets, - backend_opts: [ - :set, - :public, - {:keypos, 2}, - {:read_concurrency, true}, - {:write_concurrency, true} - ], - gc_cleanup_max_timeout: 600_000, - gc_cleanup_min_timeout: 10_000, - gc_cleanup_ref: nil, - gc_heartbeat_ref: nil, - gc_interval: nil, - max_size: nil, - meta_tab: meta_tab, - stats_counter: nil - } = Generation.get_state(LocalWithSizeLimit) - - assert is_reference(meta_tab) - - :ok = LocalWithSizeLimit.stop() - end - - test "ok: with custom options" do - assert {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 10, - max_size: 10, - allocated_memory: 1000 - ) - - assert %Nebulex.Adapters.Local.Generation{ - allocated_memory: 1000, - backend: :ets, - backend_opts: [ - :set, - :public, - {:keypos, 2}, - {:read_concurrency, true}, - {:write_concurrency, true} - ], - gc_cleanup_max_timeout: 600_000, - gc_cleanup_min_timeout: 10_000, - gc_cleanup_ref: gc_cleanup_ref, - gc_heartbeat_ref: gc_heartbeat_ref, - gc_interval: 10, - max_size: 10, - meta_tab: meta_tab, - stats_counter: nil - } = Generation.get_state(LocalWithSizeLimit) - - assert is_reference(gc_cleanup_ref) - assert is_reference(gc_heartbeat_ref) - assert is_reference(meta_tab) - - :ok = LocalWithSizeLimit.stop() - end - - test "error: invalid gc_cleanup_min_timeout" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: err}, _}} = - LocalWithSizeLimit.start_link( - gc_interval: 3600, - gc_cleanup_min_timeout: -1, - gc_cleanup_max_timeout: -1 - ) - - assert Regex.match?(~r/invalid value for :gc_cleanup_min_timeout/, err) - end - end - - describe "gc" do - setup_with_dynamic_cache Cache, - :gc_test, - backend: :shards, - gc_interval: 1000, - compressed: true - - test "create generations", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(1020) - assert generations_len(name) == 2 - - assert cache.delete_all!() == 0 - - :ok = Process.sleep(1020) - assert generations_len(name) == 2 - end - - test "create new generation and reset timeout", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.new_generation() - end) - - assert generations_len(name) == 2 - - :ok = Process.sleep(500) - assert generations_len(name) == 2 - - :ok = Process.sleep(520) - assert generations_len(name) == 2 - end - - test "create new generation without reset timeout", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.new_generation(reset_timer: false) - end) - - assert generations_len(name) == 2 - - :ok = Process.sleep(500) - assert generations_len(name) == 2 - end - - test "reset timer", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.reset_generation_timer() - end) - - :ok = Process.sleep(220) - assert generations_len(name) == 1 - - :ok = Process.sleep(1000) - assert generations_len(name) == 2 - end - end - - describe "allocated memory" do - test "cleanup is triggered when max generation size is reached" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - allocated_memory: 100_000, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 3000 - ) - - assert generations_len(LocalWithSizeLimit) == 1 - - {mem_size, _} = Generation.memory_info(LocalWithSizeLimit) - :ok = Generation.realloc(LocalWithSizeLimit, mem_size * 2) - - # Trigger the cleanup event - :ok = check_cache_size(LocalWithSizeLimit) - - :ok = flood_cache(mem_size, mem_size * 2) - - assert generations_len(LocalWithSizeLimit) == 1 - assert_mem_size(:>) - - # Wait until the cleanup event is triggered - :ok = Process.sleep(3100) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:<=) - end) - - :ok = flood_cache(mem_size, mem_size * 2) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:>) - end) - - :ok = flood_cache(mem_size, mem_size * 2) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:>) - end) - - # triggers the cleanup event - :ok = check_cache_size(LocalWithSizeLimit) - - assert generations_len(LocalWithSizeLimit) == 2 - - :ok = LocalWithSizeLimit.stop() - end - - test "cleanup while cache is being used" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - allocated_memory: 100, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 3000 - ) - - assert generations_len(LocalWithSizeLimit) == 1 - - tasks = for i <- 1..3, do: Task.async(fn -> task_fun(LocalWithSizeLimit, i) end) - - for _ <- 1..100 do - :ok = Process.sleep(10) - - LocalWithSizeLimit - |> Generation.server() - |> send(:cleanup) - end - - :ok = Enum.each(tasks, &Task.shutdown/1) - - :ok = LocalWithSizeLimit.stop() - end - end - - describe "max size" do - test "cleanup is triggered when size limit is reached" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - max_size: 3, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 1500 - ) - - # Initially there should be only 1 generation and no entries - assert generations_len(LocalWithSizeLimit) == 1 - assert LocalWithSizeLimit.count_all!() == 0 - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 1..4) - - # Validate current size - assert LocalWithSizeLimit.count_all!() == 4 - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # There should be 2 generation now - assert generations_len(LocalWithSizeLimit) == 2 - - # The entries should be now in the older generation - assert LocalWithSizeLimit.count_all!() == 4 - - # Wait the min cleanup timeout since max size is exceeded - :ok = Process.sleep(1100) - - # Cache should be empty now - assert LocalWithSizeLimit.count_all!() == 0 - - # Put some entries without exceeding the max size - _ = cache_put(LocalWithSizeLimit, 5..6) - - # Validate current size - assert LocalWithSizeLimit.count_all!() == 2 - - # Wait the max cleanup timeout (timeout should be relative to the size) - :ok = Process.sleep(1600) - - # The entries should be in the newer generation yet - assert LocalWithSizeLimit.count_all!() == 2 - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 7..8) - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # The entries should be in the newer generation yet - assert LocalWithSizeLimit.count_all!() == 4 - - # Wait the min cleanup timeout since max size is exceeded - :ok = Process.sleep(1100) - - # Cache should be empty now - assert LocalWithSizeLimit.count_all!() == 0 - - # Stop the cache - :ok = LocalWithSizeLimit.stop() - end - - test "cleanup works ok when gc_interval not set or is nil" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - max_size: 3, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 1500 - ) - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 1..4) - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # Assert not crashed - assert LocalWithSizeLimit.count_all!() == 4 - - # Stop the cache - :ok = LocalWithSizeLimit.stop() - end - end - - ## Private Functions - - defp check_cache_size(cache) do - :cleanup = - cache - |> Generation.server() - |> send(:cleanup) - - :ok = Process.sleep(1000) - end - - defp flood_cache(mem_size, max_size) when mem_size > max_size do - :ok - end - - defp flood_cache(mem_size, max_size) when mem_size <= max_size do - :ok = - 100_000 - |> :rand.uniform() - |> LocalWithSizeLimit.put(generate_value(1000)) - - :ok = Process.sleep(500) - {mem_size, _} = Generation.memory_info(LocalWithSizeLimit) - flood_cache(mem_size, max_size) - end - - defp assert_mem_size(greater_or_less) do - {mem_size, max_size} = Generation.memory_info(LocalWithSizeLimit) - assert apply(Kernel, greater_or_less, [mem_size, max_size]) - end - - defp generate_value(n) do - for(_ <- 1..n, do: "a") - end - - defp generations_len(name) do - name - |> Generation.list() - |> length() - end - - defp task_fun(cache, i) do - :ok = cache.put("#{inspect(self())}.#{i}", i) - :ok = Process.sleep(1) - task_fun(cache, i + 1) - end -end diff --git a/test/nebulex/adapters/local_duplicate_keys_test.exs b/test/nebulex/adapters/local_duplicate_keys_test.exs deleted file mode 100644 index 4875d7dc..00000000 --- a/test/nebulex/adapters/local_duplicate_keys_test.exs +++ /dev/null @@ -1,180 +0,0 @@ -defmodule Nebulex.Adapters.LocalDuplicateKeysTest do - use ExUnit.Case, async: true - - defmodule ETS do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule Shards do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - import Ex2ms - - alias Nebulex.Adapters.LocalDuplicateKeysTest.{ETS, Shards} - - setup do - {:ok, ets} = ETS.start_link(backend_type: :duplicate_bag) - {:ok, shards} = Shards.start_link(backend: :shards, backend_type: :duplicate_bag) - - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(ets), do: ETS.stop() - if Process.alive?(shards), do: Shards.stop() - end) - - {:ok, caches: [ETS, Shards]} - end - - describe "duplicate keys" do - test "get and get_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.get!(:a) == [1, 2, 2] - assert cache.get!(:b) == [1, 2] - assert cache.get!(:c) == 1 - - assert cache.get_all!([:a, :b, :c]) == %{a: [1, 2, 2], b: [1, 2], c: 1} - end) - end - - test "take!", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.take!(:a) == [1, 2, 2] - assert cache.take!(:b) == [1, 2] - assert cache.take!(:c) == 1 - - assert cache.get_all!([:a, :b, :c]) == %{} - end) - end - - test "delete", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - :ok = cache.put(:a, 2) - - assert cache.get!(:a) == [1, 2, 2] - assert cache.delete!(:a) == :ok - refute cache.get!(:a) - end) - end - - test "put_new", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert cache.put_new(:a, 1) == {:ok, true} - :ok = cache.put(:a, 2) - assert cache.put_new(:a, 3) == {:ok, false} - - assert cache.get!(:a) == [1, 2] - end) - end - - test "has_key?", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert cache.has_key?(:a) == {:ok, true} - assert cache.has_key?(:b) == {:ok, false} - end) - end - - test "ttl", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1, ttl: 5000) - :ok = cache.put(:a, 2, ttl: 10_000) - :ok = cache.put(:a, 3) - - {:ok, [ttl1, ttl2, ttl3]} = cache.ttl(:a) - assert ttl1 > 1000 - assert ttl2 > 6000 - assert ttl3 == :infinity - - assert {:error, %Nebulex.KeyError{key: :b}} = cache.ttl(:b) - end) - end - - test "count_all and delete_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.count_all!() == 6 - assert cache.delete_all!() == 6 - assert cache.count_all!() == 0 - end) - end - - test "all and stream using match_spec queries", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - test_ms = - fun do - {_, key, value, _, _} when value == 2 -> key - end - - res_stream = test_ms |> cache.stream!() |> Enum.to_list() |> Enum.sort() - res_query = test_ms |> cache.all!() |> Enum.sort() - - assert res_stream == [:a, :a, :b] - assert res_query == res_stream - end) - end - end - - describe "unsupported commands" do - test "replace", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert_raise ArgumentError, fn -> - cache.replace(:a, 1) - end - end) - end - - test "incr", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert_raise ArgumentError, fn -> - cache.incr(:a) - end - end) - end - - test "expire", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert_raise ArgumentError, fn -> - cache.expire(:a, 5000) - end - end) - end - - test "touch", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert_raise ArgumentError, fn -> - cache.touch(:a) - end - end) - end - end - - ## Helpers - - defp for_all_caches(caches, fun) do - Enum.each(caches, fn cache -> - fun.(cache) - end) - end -end diff --git a/test/nebulex/adapters/local_ets_test.exs b/test/nebulex/adapters/local_ets_test.exs deleted file mode 100644 index fc8da49b..00000000 --- a/test/nebulex/adapters/local_ets_test.exs +++ /dev/null @@ -1,22 +0,0 @@ -defmodule Nebulex.Adapters.LocalEtsTest do - use ExUnit.Case, async: true - - # Inherit tests - use Nebulex.LocalTest - use Nebulex.CacheTest - - import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 3] - - alias Nebulex.Adapter - alias Nebulex.TestCache.Cache - - setup_with_dynamic_cache Cache, :local_with_ets, purge_chunk_size: 10 - - describe "ets" do - test "backend", %{name: name} do - Adapter.with_meta(name, fn meta -> - assert meta.backend == :ets - end) - end - end -end diff --git a/test/nebulex/adapters/local_shards_test.exs b/test/nebulex/adapters/local_shards_test.exs deleted file mode 100644 index e34a9cec..00000000 --- a/test/nebulex/adapters/local_shards_test.exs +++ /dev/null @@ -1,39 +0,0 @@ -defmodule Nebulex.Adapters.LocalWithShardsTest do - use ExUnit.Case, async: true - - # Inherit tests - use Nebulex.LocalTest - use Nebulex.CacheTest - - import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 3] - - alias Nebulex.Adapter - alias Nebulex.TestCache.Cache - - setup_with_dynamic_cache Cache, :local_with_shards, backend: :shards - - describe "shards" do - test "backend", %{name: name} do - Adapter.with_meta(name, fn meta -> - assert meta.backend == :shards - end) - end - - test "custom partitions" do - defmodule CustomPartitions do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - :ok = Application.put_env(:nebulex, CustomPartitions, backend: :shards, partitions: 2) - {:ok, _pid} = CustomPartitions.start_link() - - assert CustomPartitions.newer_generation() - |> :shards.meta() - |> :shards_meta.partitions() == 2 - - :ok = CustomPartitions.stop() - end - end -end diff --git a/test/nebulex/adapters/multilevel_concurrency_test.exs b/test/nebulex/adapters/multilevel_concurrency_test.exs deleted file mode 100644 index bfa80875..00000000 --- a/test/nebulex/adapters/multilevel_concurrency_test.exs +++ /dev/null @@ -1,178 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelConcurrencyTest do - use ExUnit.Case, async: true - - import Nebulex.CacheCase - - defmodule SleeperMock do - @moduledoc false - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - alias Nebulex.Adapters.Local - - ## Callbacks - - @impl true - defmacro __before_compile__(_), do: :ok - - @impl true - defdelegate init(opts), to: Local - - @impl true - defdelegate fetch(meta, key, opts), to: Local - - @impl true - defdelegate put(meta, key, value, ttl, on_write, opts), to: Local - - @impl true - def delete(meta, key, opts) do - result = Local.delete(meta, key, opts) - - post(opts) - - result - end - - @impl true - defdelegate take(meta, key, opts), to: Local - - @impl true - defdelegate has_key?(meta, key, opts), to: Local - - @impl true - defdelegate ttl(meta, key, opts), to: Local - - @impl true - defdelegate expire(meta, key, ttl, opts), to: Local - - @impl true - defdelegate touch(meta, key, opts), to: Local - - @impl true - defdelegate update_counter(meta, key, amount, ttl, default, opts), to: Local - - @impl true - defdelegate get_all(meta, keys, opts), to: Local - - @impl true - defdelegate put_all(meta, entries, ttl, on_write, opts), to: Local - - @impl true - def execute(meta, operation, query, opts) do - result = Local.execute(meta, operation, query, opts) - - post(opts) - - result - end - - @impl true - defdelegate stream(meta, query, opts), to: Local - - ## Helpers - - def post(opts) do - with f when is_function(f) <- opts[:post] do - f.() - end - end - end - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: SleeperMock - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - end - - @levels [ - {L1, name: :multilevel_concurrency_l1}, - {L2, name: :multilevel_concurrency_l2} - ] - - setup_with_cache(Multilevel, - model: :inclusive, - levels: @levels - ) - - describe "delete" do - test "deletes in reverse order", %{cache: cache} do - test_pid = self() - - :ok = cache.put!("foo", "stale") - - task = - Task.async(fn -> - cache.delete!("foo", - post: fn -> - send(test_pid, :deleted_in_l1) - - receive do - :continue -> :ok - after - 5000 -> - raise "Did not receive continue message" - end - end - ) - end) - - assert_receive :deleted_in_l1 - refute cache.get!("foo") - - _ = send(task.pid, :continue) - - assert Task.await(task) == :ok - - assert cache.get!("foo", nil, level: 1) == nil - assert cache.get!("foo", nil, level: 2) == nil - end - end - - describe "delete_all" do - test "deletes in reverse order", %{cache: cache} do - test_pid = self() - - :ok = cache.put_all!(%{a: "stale", b: "stale"}) - - task = - Task.async(fn -> - cache.delete_all!(nil, - post: fn -> - send(test_pid, :deleted_in_l1) - - receive do - :continue -> :ok - after - 5000 -> - raise "Did not receive continue message" - end - end - ) - end) - - assert_receive :deleted_in_l1 - - refute cache.get!(:a) - refute cache.get!(:b) - - _ = send(task.pid, :continue) - - assert Task.await(task) == 4 - - assert cache.get_all!([:a, :b]) == %{} - end - end -end diff --git a/test/nebulex/adapters/multilevel_error_test.exs b/test/nebulex/adapters/multilevel_error_test.exs deleted file mode 100644 index 80b65fb7..00000000 --- a/test/nebulex/adapters/multilevel_error_test.exs +++ /dev/null @@ -1,84 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelErrorTest do - use ExUnit.Case, async: false - use Mimic - - import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 3] - - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - {L1, name: :multilevel_error_cache_l1, gc_interval: @gc_interval}, - {L2, name: :multilevel_error_cache_l2, primary: [gc_interval: @gc_interval]}, - {L3, name: :multilevel_error_cache_l3, primary: [gc_interval: @gc_interval]} - ] - - setup_with_dynamic_cache Multilevel, :multilevel_error_cache, levels: @levels - - describe "cache level error" do - test "fetch/2", %{cache: cache} do - L1 - |> expect(:fetch, fn _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert cache.fetch(1) == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - - test "get_all/2", %{cache: cache} do - L1 - |> expect(:get_all, fn _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert cache.get_all(1) == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - - test "put/3", %{cache: cache} do - L1 - |> expect(:put, fn _, _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert cache.put("hello", "world") == - {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - - test "put_all/2", %{cache: cache} do - L1 - |> expect(:put_all, fn _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert cache.put_all(%{"apples" => 1, "bananas" => 3}) == - {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - - test "has_key?/1", %{cache: cache} do - L1 - |> expect(:has_key?, fn _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert cache.has_key?("error") == - {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - - test "expire!/2", %{cache: cache} do - L1 - |> expect(:expire, fn _, _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert_raise Nebulex.Error, ~r"Nebulex error:\n\n:error", fn -> - cache.expire!(:raise, 100) - end - end - - test "touch!/1", %{cache: cache} do - L1 - |> expect(:touch, fn _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert_raise Nebulex.Error, ~r"Nebulex error:\n\n:error", fn -> - cache.touch!(:raise) - end - end - - test "ttl/1", %{cache: cache} do - L1 - |> expect(:ttl, fn _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - - assert cache.ttl(1) == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - end -end diff --git a/test/nebulex/adapters/multilevel_exclusive_test.exs b/test/nebulex/adapters/multilevel_exclusive_test.exs deleted file mode 100644 index ab07aa44..00000000 --- a/test/nebulex/adapters/multilevel_exclusive_test.exs +++ /dev/null @@ -1,95 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelExclusiveTest do - use Nebulex.NodeCase - - # Inherit tests - use Nebulex.MultilevelTest - use Nebulex.Cache.QueryableTest - use Nebulex.Cache.TransactionTest - - import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 3] - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Cache.Cluster - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - { - L1, - name: :multilevel_exclusive_l1, gc_interval: @gc_interval, backend: :shards, partitions: 2 - }, - { - L2, - name: :multilevel_exclusive_l2, primary: [gc_interval: @gc_interval] - }, - { - L3, - name: :multilevel_exclusive_l3, - primary: [gc_interval: @gc_interval, backend: :shards, partitions: 2] - } - ] - - setup_with_dynamic_cache Multilevel, - :multilevel_exclusive, - model: :exclusive, - levels: @levels - - describe "multilevel exclusive" do - test "returns partitions for L1 with shards backend", %{name: name} do - assert :"#{name}_l1" - |> Generation.newer() - |> :shards.meta() - |> :shards_meta.partitions() == 2 - end - - test "get" do - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get!(1) == 1 - assert Multilevel.get!(2, return: :key) == 2 - assert Multilevel.get!(3) == 3 - refute Multilevel.get!(2, nil, level: 1) - refute Multilevel.get!(3, nil, level: 1) - refute Multilevel.get!(1, nil, level: 2) - refute Multilevel.get!(3, nil, level: 2) - refute Multilevel.get!(1, nil, level: 3) - refute Multilevel.get!(2, nil, level: 3) - end - end - - describe "partitioned level" do - test "returns cluster nodes" do - assert Cluster.get_nodes(:multilevel_exclusive_l3) == [node()] - end - - test "joining new node" do - node = :"node1@127.0.0.1" - - {:ok, pid} = - start_cache(node, Multilevel, - name: :multilevel_exclusive, - model: :exclusive, - levels: @levels - ) - - # check cluster nodes - assert Cluster.get_nodes(:multilevel_exclusive_l3) == [node, node()] - - kv_pairs = for k <- 1..100, do: {k, k} - - Multilevel.transaction(fn -> - assert Multilevel.put_all(kv_pairs) == :ok - - for k <- 1..100 do - assert Multilevel.get!(k) == k - end - end) - - :ok = stop_cache(:"node1@127.0.0.1", pid) - end - end -end diff --git a/test/nebulex/adapters/multilevel_inclusive_test.exs b/test/nebulex/adapters/multilevel_inclusive_test.exs deleted file mode 100644 index 18f4e5ac..00000000 --- a/test/nebulex/adapters/multilevel_inclusive_test.exs +++ /dev/null @@ -1,133 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelInclusiveTest do - use Nebulex.NodeCase - - # Inherit tests - use Nebulex.MultilevelTest - use Nebulex.Cache.QueryableTest - use Nebulex.Cache.TransactionTest - - import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 3] - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Cache.Cluster - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - { - L1, - name: :multilevel_inclusive_l1, gc_interval: @gc_interval, backend: :shards, partitions: 2 - }, - { - L2, - name: :multilevel_inclusive_l2, primary: [gc_interval: @gc_interval] - }, - { - L3, - name: :multilevel_inclusive_l3, - primary: [gc_interval: @gc_interval, backend: :shards, partitions: 2] - } - ] - - setup_with_dynamic_cache Multilevel, - :multilevel_inclusive, - model: :inclusive, - levels: @levels - - describe "multilevel inclusive" do - test "returns partitions for L1 with shards backend", %{name: name} do - assert :"#{name}_l1" - |> Generation.newer() - |> :shards.meta() - |> :shards_meta.partitions() == 2 - end - - test "get" do - :ok = Process.sleep(2000) - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get!(1) == 1 - refute Multilevel.get!(1, nil, level: 2) - refute Multilevel.get!(1, nil, level: 3) - - assert Multilevel.get!(2) == 2 - assert Multilevel.get!(2, nil, level: 1) == 2 - assert Multilevel.get!(2, nil, level: 2) == 2 - refute Multilevel.get!(2, nil, level: 3) - - assert Multilevel.get!(3, nil, level: 3) == 3 - refute Multilevel.get!(3, nil, level: 1) - refute Multilevel.get!(3, nil, level: 2) - - assert Multilevel.get!(3) == 3 - assert Multilevel.get!(3, nil, level: 1) == 3 - assert Multilevel.get!(3, nil, level: 2) == 3 - assert Multilevel.get!(3, nil, level: 2) == 3 - end - - test "fetched value is replicated with TTL on previous levels" do - assert Multilevel.put(:a, 1, ttl: 1000) == :ok - assert Multilevel.ttl(:a) > 0 - - :ok = Process.sleep(1100) - refute Multilevel.get!(:a, nil, level: 1) - refute Multilevel.get!(:a, nil, level: 2) - refute Multilevel.get!(:a, nil, level: 3) - - assert Multilevel.put(:b, 1, level: 3) == :ok - assert Multilevel.ttl!(:b) == :infinity - assert Multilevel.expire!(:b, 1000) - assert Multilevel.ttl!(:b) > 0 - refute Multilevel.get!(:b, nil, level: 1) - refute Multilevel.get!(:b, nil, level: 2) - assert Multilevel.get!(:b, nil, level: 3) == 1 - - assert Multilevel.get!(:b) == 1 - assert Multilevel.get!(:b, nil, level: 1) == 1 - assert Multilevel.get!(:b, nil, level: 2) == 1 - assert Multilevel.get!(:b, nil, level: 3) == 1 - - :ok = Process.sleep(1100) - refute Multilevel.get!(:b, nil, level: 1) - refute Multilevel.get!(:b, nil, level: 2) - refute Multilevel.get!(:b, nil, level: 3) - end - end - - describe "distributed levels" do - test "return cluster nodes" do - assert Cluster.get_nodes(:multilevel_inclusive_l2) == [node()] - assert Cluster.get_nodes(:multilevel_inclusive_l3) == [node()] - end - - test "joining new node" do - node = :"node1@127.0.0.1" - - {:ok, pid} = - start_cache(node, Multilevel, - name: :multilevel_inclusive, - model: :inclusive, - levels: @levels - ) - - # check cluster nodes - assert Cluster.get_nodes(:multilevel_inclusive_l3) == [node, node()] - - kv_pairs = for k <- 1..100, do: {k, k} - - Multilevel.transaction(fn -> - assert Multilevel.put_all(kv_pairs) == :ok - - for k <- 1..100 do - assert Multilevel.get!(k) == k - end - end) - - :ok = stop_cache(:"node1@127.0.0.1", pid) - end - end -end diff --git a/test/nebulex/adapters/nil_test.exs b/test/nebulex/adapters/nil_test.exs index 5d88be8e..4fe80a4e 100644 --- a/test/nebulex/adapters/nil_test.exs +++ b/test/nebulex/adapters/nil_test.exs @@ -36,7 +36,7 @@ defmodule Nebulex.Adapters.NilTest do end test "get_all", %{cache: cache} do - assert cache.get_all("foo") == {:ok, %{}} + assert cache.get_all(["foo"]) == {:ok, %{}} end test "delete", %{cache: cache} do diff --git a/test/nebulex/adapters/partitioned_error_test.exs b/test/nebulex/adapters/partitioned_error_test.exs deleted file mode 100644 index 8ba1abfd..00000000 --- a/test/nebulex/adapters/partitioned_error_test.exs +++ /dev/null @@ -1,27 +0,0 @@ -defmodule Nebulex.Adapters.PartitionedErrorTest do - use ExUnit.Case, async: true - use Mimic - - # Inherit error tests - use Nebulex.Cache.EntryErrorTest - use Nebulex.Cache.EntryExpirationErrorTest - - import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 2] - - setup_with_dynamic_cache Nebulex.TestCache.Partitioned, :partitioned_error_cache - - setup do - Nebulex.RPC - |> stub(:call, fn _, _, _, _, _ -> {:error, %Nebulex.Error{reason: :error}} end) - |> stub(:multicall, fn _ -> {[], [:error]} end) - |> stub(:multicall, fn _, _ -> {[], [:error]} end) - |> stub(:multicall, fn _, _, _, _ -> {[], [:error]} end) - |> stub(:multicall, fn _, _, _, _, _ -> {[], [:error]} end) - - {:ok, - %{ - error_module: &(&1 in [Nebulex.Error, Nebulex.RPC]), - error_reason: &(&1 in [:error, {:rpc_multicall_error, [:error]}]) - }} - end -end diff --git a/test/nebulex/adapters/partitioned_test.exs b/test/nebulex/adapters/partitioned_test.exs deleted file mode 100644 index e5766641..00000000 --- a/test/nebulex/adapters/partitioned_test.exs +++ /dev/null @@ -1,278 +0,0 @@ -defmodule Nebulex.Adapters.PartitionedTest do - use Nebulex.NodeCase - - # Inherit tests - use Nebulex.CacheTest - - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.TestCache.{Partitioned, PartitionedMock} - - @primary :"primary@127.0.0.1" - @cache_name :partitioned_cache - - setup_all do - # Set config - :ok = Application.put_env(:nebulex, Partitioned, primary: [backend: :shards]) - end - - setup do - cluster = :lists.usort([@primary | Application.get_env(:nebulex, :nodes, [])]) - - node_pid_list = - start_caches( - [node() | Node.list()], - [ - {Partitioned, [name: @cache_name, join_timeout: 2000]}, - {PartitionedMock, []} - ] - ) - - default_dynamic_cache = Partitioned.get_dynamic_cache() - _ = Partitioned.put_dynamic_cache(@cache_name) - - on_exit(fn -> - _ = Partitioned.put_dynamic_cache(default_dynamic_cache) - - :ok = Process.sleep(100) - - stop_caches(node_pid_list) - end) - - {:ok, cache: Partitioned, name: @cache_name, cluster: cluster, on_error: &assert_query_error/1} - end - - defp assert_query_error(%Nebulex.Error{reason: {:rpc_multicall_error, errors}}) do - for {_node, {:error, reason}} <- errors do - assert %Nebulex.QueryError{} = reason - end - end - - describe "c:init/1" do - test "initializes the primary store metadata" do - Adapter.with_meta(PartitionedCache.Primary, fn meta -> - assert meta.adapter == Nebulex.Adapters.Local - assert meta.backend == :shards - end) - end - - test "raises an exception because invalid primary store" do - assert_raise ArgumentError, ~r"adapter Invalid was not compiled", fn -> - defmodule Demo do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Invalid - end - end - end - - test "fails because unloaded keyslot module" do - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :unloaded_keyslot, - keyslot: UnloadedKeyslot - ) - - assert Regex.match?(~r"keyslot UnloadedKeyslot was not compiled", msg) - end - - test "fails because keyslot module does not implement expected behaviour" do - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :invalid_keyslot, - keyslot: __MODULE__ - ) - - mod = inspect(__MODULE__) - behaviour = "Nebulex.Adapter.Keyslot" - assert Regex.match?(~r"expected #{mod} to implement the behaviour #{behaviour}", msg) - end - - test "fails because invalid keyslot option" do - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :invalid_keyslot, - keyslot: "invalid" - ) - - assert Regex.match?(~r/invalid value for :keyslot/, msg) - end - end - - describe "partitioned cache" do - test "custom keyslot" do - defmodule Keyslot do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> rem(range) - end - end - - test_with_dynamic_cache(Partitioned, [name: :custom_keyslot, keyslot: Keyslot], fn -> - refute Partitioned.get!("foo") - assert Partitioned.put("foo", "bar") == :ok - assert Partitioned.get!("foo") == "bar" - end) - end - - test "get_and_update" do - assert Partitioned.get_and_update!(1, &Partitioned.get_and_update_fun/1) == {nil, 1} - assert Partitioned.get_and_update!(1, &Partitioned.get_and_update_fun/1) == {1, 2} - assert Partitioned.get_and_update!(1, &Partitioned.get_and_update_fun/1) == {2, 4} - - assert_raise ArgumentError, fn -> - Partitioned.get_and_update!(1, &Partitioned.get_and_update_bad_fun/1) - end - end - - test "incr raises when the counter is not an integer" do - :ok = Partitioned.put(:counter, "string") - - assert_raise Nebulex.Error, ~r"RPC call failed on node", fn -> - Partitioned.incr!(:counter, 10) - end - end - end - - describe "cluster scenario:" do - test "node leaves and then rejoins", %{name: name, cluster: cluster} do - assert node() == @primary - assert :lists.usort(Node.list()) == cluster -- [node()] - assert Partitioned.nodes() == cluster - - Partitioned.with_dynamic_cache(name, fn -> - :ok = Partitioned.leave_cluster() - - assert Partitioned.nodes() == cluster -- [node()] - end) - - Partitioned.with_dynamic_cache(name, fn -> - :ok = Partitioned.join_cluster() - - assert Partitioned.nodes() == cluster - end) - end - - test "teardown cache node", %{cluster: cluster} do - assert Partitioned.nodes() == cluster - - assert Partitioned.put(1, 1) == :ok - assert Partitioned.get!(1) == 1 - - node = teardown_cache(1) - - wait_until(fn -> - assert Partitioned.nodes() == cluster -- [node] - end) - - refute Partitioned.get!(1) - - assert :ok == Partitioned.put_all([{4, 44}, {2, 2}, {1, 1}]) - - assert Partitioned.get!(4) == 44 - assert Partitioned.get!(2) == 2 - assert Partitioned.get!(1) == 1 - end - - test "cache leaves the cluster when terminated and then rejoins when restarted", %{ - name: name - } do - prefix = [:nebulex, :test_cache, :partitioned, :bootstrap] - started = prefix ++ [:started] - stopped = prefix ++ [:stopped] - joined = prefix ++ [:joined] - exit_sig = prefix ++ [:exit] - - with_telemetry_handler(__MODULE__, [started, stopped, joined, exit_sig], fn -> - assert node() in Partitioned.nodes() - - true = - [name, Bootstrap] - |> normalize_module_name() - |> Process.whereis() - |> Process.exit(:stop) - - assert_receive {^exit_sig, %{system_time: _}, %{reason: :stop}}, 5000 - assert_receive {^stopped, %{system_time: _}, %{reason: :stop, cluster_nodes: nodes}}, 5000 - - refute node() in nodes - - assert_receive {^started, %{system_time: _}, %{}}, 5000 - assert_receive {^joined, %{system_time: _}, %{cluster_nodes: nodes}}, 5000 - - assert node() in nodes - assert nodes -- Partitioned.nodes() == [] - - :ok = Process.sleep(2100) - - assert_receive {^joined, %{system_time: _}, %{cluster_nodes: nodes}}, 5000 - assert node() in nodes - end) - end - end - - describe "rpc" do - test "timeout error" do - assert Partitioned.put_all(for(x <- 1..100_000, do: {x, x}), timeout: 60_000) == :ok - assert Partitioned.get!(1, timeout: 1000) == 1 - - assert_raise Nebulex.Error, ~r"RPC multicall failed with errors", fn -> - Partitioned.all!(nil, timeout: 0) - end - - assert {:error, %Nebulex.Error{reason: {:rpc_multicall_error, errors}}} = - Partitioned.all(nil, timeout: 0) - - for {_node, error} <- errors do - assert error == {:error, {:erpc, :timeout}} - end - end - - test "runtime error" do - _ = Process.flag(:trap_exit, true) - - assert {:error, %Nebulex.Error{reason: {:rpc_multicall_error, errors}}} = - PartitionedMock.get_all([1, 2], timeout: 10) - - for {_node, {error, _call}} <- errors do - assert error == {:error, {:erpc, :timeout}} - end - - assert {:error, %Nebulex.Error{reason: {:rpc_multicall_error, errors}}} = - PartitionedMock.put_all(a: 1, b: 2) - - for {_node, {error, _call}} <- errors do - assert error == {:exit, {:signal, :normal}} - end - - assert {:error, %Nebulex.Error{reason: {:rpc_error, {node, {:EXIT, {reason, _}}}}}} = - PartitionedMock.get(1) - - assert node == :"node3@127.0.0.1" - assert reason == %ArgumentError{message: "Error"} - - assert {:error, %Nebulex.Error{reason: {:rpc_multicall_error, errors}}} = - PartitionedMock.count_all() - - for {_node, error} <- errors do - assert error == {:exit, {:signal, :normal}} - end - end - end - - ## Private Functions - - defp teardown_cache(key) do - node = Partitioned.get_node(key) - remote_pid = :rpc.call(node, Process, :whereis, [@cache_name]) - :ok = :rpc.call(node, Supervisor, :stop, [remote_pid]) - - node - end -end diff --git a/test/nebulex/adapters/replicated_test.exs b/test/nebulex/adapters/replicated_test.exs deleted file mode 100644 index 398e7fd7..00000000 --- a/test/nebulex/adapters/replicated_test.exs +++ /dev/null @@ -1,281 +0,0 @@ -defmodule Nebulex.Adapters.ReplicatedTest do - use Nebulex.NodeCase - use Mimic - - # Inherit tests - use Nebulex.CacheTest - - import Nebulex.Helpers - - alias Nebulex.TestCache.{Replicated, ReplicatedMock} - - @cache_name :replicated_cache - - setup do - node_pid_list = start_caches(cluster_nodes(), [{Replicated, [name: @cache_name]}]) - - default_dynamic_cache = Replicated.get_dynamic_cache() - _ = Replicated.put_dynamic_cache(@cache_name) - - on_exit(fn -> - _ = Replicated.put_dynamic_cache(default_dynamic_cache) - - :ok = Process.sleep(100) - - stop_caches(node_pid_list) - end) - - {:ok, cache: Replicated, name: @cache_name} - end - - describe "c:init/1" do - test "raises an exception because invalid primary store" do - assert_raise ArgumentError, ~r"adapter Invalid was not compiled", fn -> - defmodule Demo do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Invalid - end - end - end - end - - describe "replicated cache:" do - test "put/3" do - assert Replicated.put(1, 1) == :ok - assert Replicated.get!(1) == 1 - - assert_for_all_replicas(Replicated, :get!, [1], 1) - - assert Replicated.put_all(a: 1, b: 2, c: 3) == :ok - - assert_for_all_replicas(Replicated, :get_all!, [[:a, :b, :c]], %{a: 1, b: 2, c: 3}) - end - - test "delete/2" do - assert Replicated.put("foo", "bar") == :ok - assert Replicated.get!("foo") == "bar" - - assert_for_all_replicas(Replicated, :get!, ["foo"], "bar") - - assert Replicated.delete("foo") == :ok - refute Replicated.get!("foo") - - assert_for_all_replicas(Replicated, :get!, ["foo"], nil) - end - - test "take/2" do - assert Replicated.put("foo", "bar") == :ok - assert Replicated.get!("foo") == "bar" - - assert_for_all_replicas(Replicated, :get!, ["foo"], "bar") - - assert Replicated.take!("foo") == "bar" - refute Replicated.get!("foo") - - assert_for_all_replicas(Replicated, :get!, ["foo"], nil) - end - - test "take/2 (Nebulex.KeyError on remote nodes)" do - Replicated.__primary__().with_dynamic_cache( - normalize_module_name([@cache_name, Primary]), - fn -> - :ok = Replicated.__primary__().put("foo", "bar") - end - ) - - assert Replicated.take!("foo") == "bar" - refute Replicated.get!("foo") - - assert_for_all_replicas(Replicated, :get!, ["foo"], nil) - end - - test "incr/3" do - assert Replicated.incr!(:counter, 3) == 3 - assert Replicated.incr!(:counter) == 4 - - assert_for_all_replicas(Replicated, :get!, [:counter], 4) - end - - test "incr/3 raises when the counter is not an integer" do - :ok = Replicated.put(:counter, "string") - - assert_raise ArgumentError, fn -> - Replicated.incr(:counter, 10) - end - end - - test "delete_all/2" do - assert Replicated.put_all(a: 1, b: 2, c: 3) == :ok - - assert_for_all_replicas(Replicated, :get_all!, [[:a, :b, :c]], %{a: 1, b: 2, c: 3}) - - assert Replicated.delete_all!() == 3 - assert Replicated.count_all!() == 0 - - assert_for_all_replicas(Replicated, :get_all!, [[:a, :b, :c]], %{}) - end - end - - describe "cluster" do - test "node leaves and then rejoins", %{name: name} do - cluster = :lists.usort(cluster_nodes()) - - wait_until(fn -> - assert Replicated.nodes() == cluster - end) - - Replicated.with_dynamic_cache(name, fn -> - :ok = Replicated.leave_cluster() - end) - - wait_until(fn -> - assert Replicated.nodes() == cluster -- [node()] - end) - - Replicated.with_dynamic_cache(name, fn -> - :ok = Replicated.join_cluster() - end) - - wait_until(fn -> - assert Replicated.nodes() == cluster - end) - end - - test "error: rpc error" do - node_pid_list = start_caches(cluster_nodes(), [{ReplicatedMock, []}]) - - try do - _ = Process.flag(:trap_exit, true) - - assert {:error, %Nebulex.Error{reason: {:rpc_multicall_error, errors}}} = - ReplicatedMock.put_new_all(a: 1, b: 2) - - for {_node, error} <- errors do - assert error == {:exit, {:signal, :normal}} - end - after - stop_caches(node_pid_list) - end - end - - test "ok: start/stop cache nodes" do - event = [:nebulex, :test_cache, :replicated, :replication] - - with_telemetry_handler(__MODULE__, [event], fn -> - assert Replicated.nodes() |> :lists.usort() == :lists.usort(cluster_nodes()) - - assert Replicated.put_all(a: 1, b: 2) == :ok - assert Replicated.put(:c, 3, ttl: 5000) == :ok - - assert_for_all_replicas( - Replicated, - :get_all!, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - - # start new cache nodes - nodes = [:"node3@127.0.0.1", :"node4@127.0.0.1"] - node_pid_list = start_caches(nodes, [{Replicated, [name: @cache_name]}]) - - wait_until(fn -> - assert Replicated.nodes() |> :lists.usort() == :lists.usort(nodes ++ cluster_nodes()) - end) - - wait_until(10, 1000, fn -> - assert_for_all_replicas( - Replicated, - :get_all!, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - end) - - # stop cache node - :ok = node_pid_list |> hd() |> List.wrap() |> stop_caches() - - if Code.ensure_loaded?(:pg) do - # errors on failed nodes should be ignored - Nebulex.Cache.Cluster - |> expect(:get_nodes, fn _ -> [:"node5@127.0.0.1"] ++ nodes end) - - assert Replicated.put(:foo, :bar) == :ok - - assert_receive {^event, %{rpc_errors: 2}, meta} - assert meta[:adapter_meta][:cache] == Replicated - assert meta[:adapter_meta][:name] == :replicated_cache - assert meta[:function_name] == :put - - assert [ - "node5@127.0.0.1": {:error, {:erpc, :noconnection}}, - "node3@127.0.0.1": {:error, %Nebulex.Error{reason: {:registry_lookup_error, _}}} - ] = meta[:rpc_errors] - end - - wait_until(10, 1000, fn -> - assert Replicated.nodes() |> :lists.usort() == - :lists.usort(cluster_nodes() ++ [:"node4@127.0.0.1"]) - end) - - assert_for_all_replicas( - Replicated, - :get_all!, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - - :ok = stop_caches(node_pid_list) - end) - end - end - - describe "write-like operations locked" do - test "when a delete_all command is ongoing" do - test_with_dynamic_cache(ReplicatedMock, [name: :replicated_global_mock], fn -> - true = Process.register(self(), __MODULE__) - _ = Process.flag(:trap_exit, true) - - task1 = - Task.async(fn -> - _ = ReplicatedMock.put_dynamic_cache(:replicated_global_mock) - _ = ReplicatedMock.delete_all() - send(__MODULE__, :delete_all) - end) - - task2 = - Task.async(fn -> - :ok = Process.sleep(1000) - _ = ReplicatedMock.put_dynamic_cache(:replicated_global_mock) - :ok = ReplicatedMock.put("foo", "bar") - :ok = Process.sleep(100) - send(__MODULE__, :put) - end) - - assert_receive :delete_all, 5000 - assert_receive :put, 5000 - - [_, _] = Task.yield_many([task1, task2]) - end) - end - end - - ## Helpers - - defp assert_for_all_replicas(cache, action, args, expected) do - assert {res_lst, []} = - :rpc.multicall( - cache.nodes(), - cache, - :with_dynamic_cache, - [@cache_name, cache, action, args] - ) - - Enum.each(res_lst, fn res -> assert res == expected end) - end - - defp cluster_nodes do - [node() | Node.list()] -- [:"node3@127.0.0.1", :"node4@127.0.0.1"] - end -end diff --git a/test/nebulex/adapters/stats_test.exs b/test/nebulex/adapters/stats_test.exs deleted file mode 100644 index 833112e0..00000000 --- a/test/nebulex/adapters/stats_test.exs +++ /dev/null @@ -1,350 +0,0 @@ -defmodule Nebulex.Adapters.StatsTest do - use ExUnit.Case, asyc: true - use Mimic - - import Nebulex.CacheCase - - alias Nebulex.TestCache.StatsCache, as: Cache - - ## Shared constants - - @config [ - model: :inclusive, - levels: [ - {Cache.L1, gc_interval: :timer.hours(1), backend: :shards}, - {Cache.L2, primary: [gc_interval: :timer.hours(1)]}, - {Cache.L3, primary: [gc_interval: :timer.hours(1)]} - ] - ] - - @event [:nebulex, :test_cache, :stats_cache, :stats] - - ## Tests - - describe "(multilevel) stats/0" do - setup_with_cache Cache, [stats: true] ++ @config - - test "returns an error" do - Cache.L1 - |> Mimic.expect(:stats, fn -> {:error, %Nebulex.Error{reason: :error}} end) - - assert Cache.stats() == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - - test "hits and misses" do - :ok = Cache.put_all!(a: 1, b: 2) - - assert Cache.get!(:a) == 1 - assert Cache.has_key?(:a) - assert Cache.ttl!(:b) == :infinity - refute Cache.get!(:c) - refute Cache.get!(:d) - - assert Cache.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert_stats_measurements(Cache, - l1: [hits: 5, misses: 4, writes: 2], - l2: [hits: 0, misses: 4, writes: 2], - l3: [hits: 0, misses: 4, writes: 2] - ) - end - - test "writes and updates" do - assert Cache.put_all!(a: 1, b: 2) == :ok - assert Cache.put_all(%{a: 1, b: 2}) == :ok - refute Cache.put_new_all!(a: 1, b: 2) - assert Cache.put_new_all!(c: 3, d: 4, e: 3) - assert Cache.put!(1, 1) == :ok - refute Cache.put_new!(1, 2) - refute Cache.replace!(2, 2) - assert Cache.put_new!(2, 2) - assert Cache.replace!(2, 22) - assert Cache.incr!(:counter) == 1 - assert Cache.incr!(:counter) == 2 - refute Cache.expire!(:f, 1000) - assert Cache.expire!(:a, 1000) - refute Cache.touch!(:f) - assert Cache.touch!(:b) - - :ok = Process.sleep(1100) - refute Cache.get!(:a) - - wait_until(fn -> - assert_stats_measurements(Cache, - l1: [expirations: 1, misses: 1, writes: 10, updates: 4], - l2: [expirations: 1, misses: 1, writes: 10, updates: 4], - l3: [expirations: 1, misses: 1, writes: 10, updates: 4] - ) - end) - end - - test "evictions" do - entries = for x <- 1..10, do: {x, x} - :ok = Cache.put_all!(entries) - - assert Cache.delete!(1) == :ok - assert Cache.take!(2) == 2 - - assert_raise Nebulex.KeyError, fn -> - Cache.take!(20) - end - - assert_stats_measurements(Cache, - l1: [evictions: 2, misses: 1, writes: 10], - l2: [evictions: 2, misses: 1, writes: 10], - l3: [evictions: 2, misses: 1, writes: 10] - ) - - assert Cache.delete_all!() == 24 - - assert_stats_measurements(Cache, - l1: [evictions: 10, misses: 1, writes: 10], - l2: [evictions: 10, misses: 1, writes: 10], - l3: [evictions: 10, misses: 1, writes: 10] - ) - end - - test "expirations" do - :ok = Cache.put_all!(a: 1, b: 2) - :ok = Cache.put_all!([c: 3, d: 4], ttl: 1000) - - assert Cache.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2, c: 3, d: 4} - - :ok = Process.sleep(1100) - assert Cache.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2} - - wait_until(fn -> - assert_stats_measurements(Cache, - l1: [evictions: 2, expirations: 2, hits: 6, misses: 2, writes: 4], - l2: [evictions: 2, expirations: 2, hits: 0, misses: 2, writes: 4], - l3: [evictions: 2, expirations: 2, hits: 0, misses: 2, writes: 4] - ) - end) - end - end - - describe "(replicated) stats/0" do - alias Cache.L2, as: Replicated - - setup_with_cache Replicated, stats: true - - test "hits and misses" do - :ok = Replicated.put_all!(a: 1, b: 2) - - assert Replicated.get!(:a) == 1 - assert Replicated.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert %Nebulex.Stats{measurements: measurements} = Replicated.stats!() - assert measurements.hits == 3 - assert measurements.misses == 2 - end - end - - describe "(partitioned) stats/0" do - alias Cache.L3, as: Partitioned - - setup_with_cache Partitioned, stats: true - - test "hits and misses" do - :ok = Partitioned.put_all!(a: 1, b: 2) - - assert Partitioned.get!(:a) == 1 - assert Partitioned.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert %Nebulex.Stats{measurements: measurements} = Partitioned.stats!() - assert measurements.hits == 3 - assert measurements.misses == 2 - end - end - - describe "disabled stats in a cache level" do - @updated_config Keyword.update!( - @config, - :levels, - &(&1 ++ [{Cache.L4, gc_interval: :timer.hours(1), stats: false}]) - ) - - setup_with_cache Cache, [stats: true] ++ @updated_config - - test "ignored when returning stats" do - measurements = Cache.stats!().measurements - assert Map.get(measurements, :l1) - assert Map.get(measurements, :l2) - assert Map.get(measurements, :l3) - refute Map.get(measurements, :l4) - end - end - - describe "cache init error" do - test "because invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {%ArgumentError{message: msg}, _}} = - Cache.start_link(stats: 123, levels: [{Cache.L1, []}]) - - assert Regex.match?(~r/invalid value/, msg) - end - - test "L1: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L1, {%ArgumentError{message: msg}, _}}}}}} = - Cache.start_link(stats: true, levels: [{Cache.L1, [stats: 123]}]) - - assert Regex.match?(~r/invalid value/, msg) - end - - test "L2: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L2, {%ArgumentError{message: msg}, _}}}}}} = - Cache.start_link(stats: true, levels: [{Cache.L1, []}, {Cache.L2, [stats: 123]}]) - - assert Regex.match?(~r/invalid value/, msg) - end - - test "L3: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L3, {%ArgumentError{message: msg}, _}}}}}} = - Cache.start_link( - stats: true, - levels: [{Cache.L1, []}, {Cache.L2, []}, {Cache.L3, [stats: 123]}] - ) - - assert Regex.match?(~r/invalid value/, msg) - end - end - - describe "new generation" do - alias Cache.L1 - alias Cache.L2.Primary, as: L2Primary - alias Cache.L3.Primary, as: L3Primary - - setup_with_cache Cache, [stats: true] ++ @config - - test "updates evictions" do - :ok = Cache.put_all!(a: 1, b: 2, c: 3) - assert Cache.count_all!() == 9 - - assert_stats_measurements(Cache, - l1: [evictions: 0, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L1.new_generation() - assert Cache.count_all!() == 9 - - assert_stats_measurements(Cache, - l1: [evictions: 0, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L1.new_generation() - assert Cache.count_all!() == 6 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L2Primary.new_generation() - _ = L2Primary.new_generation() - assert Cache.count_all!() == 3 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 3, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L3Primary.new_generation() - _ = L3Primary.new_generation() - assert Cache.count_all!() == 0 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 3, writes: 3], - l3: [evictions: 3, writes: 3] - ) - end - end - - describe "disabled stats:" do - setup_with_cache Cache, @config - - test "stats/0 returns nil" do - assert_raise Nebulex.Error, ~r"stats disabled or not supported by the cache", fn -> - Cache.stats!() - end - end - - test "dispatch_stats/1 is skipped" do - with_telemetry_handler(__MODULE__, [@event], fn -> - assert {:error, %Nebulex.Error{reason: {:stats_error, _}}} = Cache.dispatch_stats() - end) - end - end - - describe "dispatch_stats/1" do - setup_with_cache Cache, [stats: true] ++ @config - - test "emits a telemetry event when called" do - with_telemetry_handler(__MODULE__, [@event], fn -> - :ok = Cache.dispatch_stats(metadata: %{node: node()}) - - node = node() - - assert_receive {@event, measurements, %{cache: Cache, node: ^node}} - - assert measurements == %{ - l1: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l2: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l3: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0} - } - end) - end - - test "returns an error" do - Cache.L1 - |> Mimic.expect(:stats, fn -> {:error, %Nebulex.Error{reason: :error}} end) - - assert Cache.dispatch_stats() == - {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} - end - end - - describe "dispatch_stats/1 with dynamic cache" do - setup_with_dynamic_cache Cache, - :stats_with_dispatch, - [telemetry_prefix: [:my_event], stats: true] ++ @config - - test "emits a telemetry event with custom telemetry_prefix when called" do - with_telemetry_handler(__MODULE__, [[:my_event, :stats]], fn -> - :ok = Cache.dispatch_stats(metadata: %{foo: :bar}) - - assert_receive {[:my_event, :stats], measurements, - %{cache: :stats_with_dispatch, foo: :bar}} - - assert measurements == %{ - l1: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l2: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l3: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0} - } - end) - end - end - - ## Helpers - - defp assert_stats_measurements(cache, levels) do - measurements = cache.stats!().measurements - - for {level, stats} <- levels, {stat, expected} <- stats do - assert get_in(measurements, [level, stat]) == expected - end - end -end diff --git a/test/nebulex/cache/supervisor_test.exs b/test/nebulex/cache/supervisor_test.exs index 2de67b53..974f5f42 100644 --- a/test/nebulex/cache/supervisor_test.exs +++ b/test/nebulex/cache/supervisor_test.exs @@ -4,7 +4,7 @@ defmodule Nebulex.Cache.SupervisorTest do defmodule MyCache do use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter @impl true def init(opts) do @@ -68,6 +68,7 @@ defmodule Nebulex.Cache.SupervisorTest do end assert {:ok, _pid} = CustomCache.start_link(child_name: :custom_cache) + _ = Process.flag(:trap_exit, true) assert {:error, error} = diff --git a/test/nebulex/adapters/local_error_test.exs b/test/nebulex/cache_error_test.exs similarity index 66% rename from test/nebulex/adapters/local_error_test.exs rename to test/nebulex/cache_error_test.exs index ad68f648..1099e5ee 100644 --- a/test/nebulex/adapters/local_error_test.exs +++ b/test/nebulex/cache_error_test.exs @@ -1,16 +1,16 @@ -defmodule Nebulex.Adapters.LocalErrorTest do +defmodule Nebulex.CacheErrorTest do use ExUnit.Case, async: true use Mimic # Inherit error tests - use Nebulex.Cache.EntryErrorTest - use Nebulex.Cache.EntryExpirationErrorTest + use Nebulex.Cache.KVErrorTest + use Nebulex.Cache.KVExpirationErrorTest setup do Nebulex.Cache.Registry |> expect(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) - {:ok, cache: Nebulex.TestCache.Cache, name: :local_error_cache} + {:ok, cache: Nebulex.TestCache.Cache, name: :test_cache_local_error} end describe "put!/3" do diff --git a/test/nebulex/cache_test.exs b/test/nebulex/cache_test.exs new file mode 100644 index 00000000..cc71728f --- /dev/null +++ b/test/nebulex/cache_test.exs @@ -0,0 +1,126 @@ +defmodule Nebulex.Adapters.CacheTest do + use ExUnit.Case, async: true + + # Cache API test cases + use Nebulex.CacheTestCase + + import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 2] + + setup_with_dynamic_cache Nebulex.TestCache.Cache, :test_cache_local + + describe "entry:" do + test "get_and_update", %{cache: cache} do + fun = fn + nil -> {nil, 1} + val -> {val, val * 2} + end + + assert cache.get_and_update!(1, fun) == {nil, 1} + assert cache.get_and_update!(1, &{&1, &1 * 2}) == {1, 2} + assert cache.get_and_update!(1, &{&1, &1 * 3}) == {2, 6} + assert cache.get_and_update!(1, &{&1, nil}) == {6, 6} + assert cache.get!(1) == 6 + assert cache.get_and_update!(1, fn _ -> :pop end) == {6, nil} + assert cache.get_and_update!(1, fn _ -> :pop end) == {nil, nil} + assert cache.get_and_update!(3, &{&1, 3}) == {nil, 3} + end + + test "get_and_update fails because function returns invalid value", %{cache: cache} do + assert_raise ArgumentError, fn -> + cache.get_and_update(1, fn _ -> :other end) + end + end + + test "get_and_update fails because cache is not started", %{cache: cache} do + :ok = cache.stop() + + assert_raise Nebulex.Error, fn -> + assert cache.get_and_update!(1, fn _ -> :pop end) + end + end + + test "incr and update", %{cache: cache} do + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter) == 2 + + assert cache.get_and_update!(:counter, &{&1, &1 * 2}) == {2, 4} + assert cache.incr!(:counter) == 5 + + assert cache.update!(:counter, 1, &(&1 * 2)) == 10 + assert cache.incr!(:counter, -10) == 0 + + assert cache.put("foo", "bar") == :ok + + assert_raise Nebulex.Error, fn -> + cache.incr!("foo") + end + end + + test "incr with ttl", %{cache: cache} do + assert cache.incr!(:counter_with_ttl, 1, ttl: 1000) == 1 + assert cache.incr!(:counter_with_ttl) == 2 + assert cache.fetch!(:counter_with_ttl) == 2 + + :ok = Process.sleep(1010) + + assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) + + assert cache.incr!(:counter_with_ttl, 1, ttl: 5000) == 1 + assert {:ok, ttl} = cache.ttl(:counter_with_ttl) + assert ttl > 1000 + + assert cache.expire(:counter_with_ttl, 500) == {:ok, true} + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) + end + + test "incr existing entry", %{cache: cache} do + assert cache.put(:counter, 0) == :ok + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter, 2) == 3 + end + end + + describe "queryable:" do + test "error because invalid query", %{cache: cache} do + for action <- [:all, :stream] do + assert {:error, %Nebulex.QueryError{}} = apply(cache, action, [:invalid]) + end + end + + test "raise exception because invalid query", %{cache: cache} do + for action <- [:all!, :stream!] do + assert_raise Nebulex.QueryError, ~r"invalid query", fn -> + apply(cache, action, [:invalid]) + end + end + end + + test "default query error message" do + assert_raise Nebulex.QueryError, "invalid query :invalid", fn -> + raise Nebulex.QueryError, query: :invalid + end + end + end + + describe "error" do + test "because cache is stopped", %{cache: cache, name: name} do + :ok = cache.stop() + + assert cache.put(1, 13) == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + reason: {:registry_lookup_error, name} + }} + + msg = ~r"could not lookup Nebulex cache" + + assert_raise Nebulex.Error, msg, fn -> cache.put!(1, 13) end + assert_raise Nebulex.Error, msg, fn -> cache.get!(1) end + assert_raise Nebulex.Error, msg, fn -> cache.delete!(1) end + end + end +end diff --git a/test/nebulex/caching_test.exs b/test/nebulex/caching_test.exs index 4bb02f9c..0870c8ba 100644 --- a/test/nebulex/caching_test.exs +++ b/test/nebulex/caching_test.exs @@ -12,14 +12,14 @@ defmodule Nebulex.CachingTest do @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter end defmodule CacheWithDefaultKeyGenerator do @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local, + adapter: Nebulex.TestAdapter, default_key_generator: __MODULE__ @behaviour Nebulex.Caching.KeyGenerator @@ -32,13 +32,15 @@ defmodule Nebulex.CachingTest do @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter end defmodule Meta do @moduledoc false - defstruct [:id, :count] + @type t :: %__MODULE__{} + + defstruct [:id, :count] end defmodule TestKeyGenerator do diff --git a/test/nebulex/helpers_test.exs b/test/nebulex/helpers_test.exs new file mode 100644 index 00000000..abacd3a7 --- /dev/null +++ b/test/nebulex/helpers_test.exs @@ -0,0 +1,38 @@ +defmodule Nebulex.HelpersTest do + use ExUnit.Case, async: true + doctest Nebulex.Helpers + + alias Nebulex.Helpers + + describe "module_behaviours/2" do + test "ok: returns implemented modules" do + assert Helpers.module_behaviours(Nebulex.TestAdapter, "module") == [ + Nebulex.Adapter, + Nebulex.Adapter.KV, + Nebulex.Adapter.Queryable, + Nebulex.Adapter.Transaction, + Nebulex.Adapter.Persistence, + Nebulex.Adapter.Stats, + Nebulex.Cache.Options + ] + end + + test "error: invalid module" do + assert_raise ArgumentError, fn -> + Helpers.module_behaviours(InvalidModule, "module") + end + end + end + + describe "assert_behaviour/3" do + test "ok: returns implemented modules" do + assert Helpers.assert_behaviour(Nebulex.TestAdapter, Nebulex.Adapter) == Nebulex.TestAdapter + end + end + + test "error: behaviour not implemented" do + assert_raise ArgumentError, fn -> + Helpers.assert_behaviour(Nebulex.TestAdapter, XYZ) + end + end +end diff --git a/test/nebulex/stats_test.exs b/test/nebulex/stats_test.exs new file mode 100644 index 00000000..268255c6 --- /dev/null +++ b/test/nebulex/stats_test.exs @@ -0,0 +1,219 @@ +defmodule Nebulex.StatsTest do + use ExUnit.Case, asyc: true + use Mimic + + import Nebulex.CacheCase + + alias Nebulex.Adapter.Stats + alias Nebulex.TestCache.StatsCache, as: Cache + + ## Shared constants + + @event [:nebulex, :test_cache, :stats_cache, :stats] + + ## Tests + + describe "stats/0" do + setup_with_cache Cache, stats: true + + test "returns an error" do + Nebulex.Cache.Registry + |> expect(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) + + assert Cache.stats() == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} + end + + test "hits and misses" do + :ok = Cache.put_all!(a: 1, b: 2) + + assert Cache.get!(:a) == 1 + assert Cache.has_key?(:a) + assert Cache.ttl!(:b) == :infinity + refute Cache.get!(:c) + refute Cache.get!(:d) + + assert Cache.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2} + + assert Cache.stats!().measurements == %{ + hits: 5, + misses: 4, + writes: 2, + evictions: 0, + expirations: 0, + updates: 0 + } + end + + test "writes and updates" do + assert Cache.put_all!(a: 1, b: 2) == :ok + assert Cache.put_all(%{a: 1, b: 2}) == :ok + refute Cache.put_new_all!(a: 1, b: 2) + assert Cache.put_new_all!(c: 3, d: 4, e: 3) + assert Cache.put!(1, 1) == :ok + refute Cache.put_new!(1, 2) + refute Cache.replace!(2, 2) + assert Cache.put_new!(2, 2) + assert Cache.replace!(2, 22) + assert Cache.incr!(:counter) == 1 + assert Cache.incr!(:counter) == 2 + refute Cache.expire!(:f, 1000) + assert Cache.expire!(:a, 1000) + refute Cache.touch!(:f) + assert Cache.touch!(:b) + + :ok = Process.sleep(1100) + + refute Cache.get!(:a) + + wait_until(fn -> + assert Cache.stats!().measurements == %{ + hits: 0, + misses: 1, + writes: 10, + evictions: 1, + expirations: 1, + updates: 4 + } + end) + end + + test "evictions" do + entries = for x <- 1..10, do: {x, x} + :ok = Cache.put_all!(entries) + + assert Cache.delete!(1) == :ok + assert Cache.take!(2) == 2 + + assert_raise Nebulex.KeyError, fn -> + Cache.take!(20) + end + + assert Cache.stats!().measurements == %{ + hits: 1, + misses: 1, + writes: 10, + evictions: 2, + expirations: 0, + updates: 0 + } + + assert Cache.delete_all!() == 8 + + assert Cache.stats!().measurements == %{ + hits: 1, + misses: 1, + writes: 10, + evictions: 10, + expirations: 0, + updates: 0 + } + end + + test "expirations" do + :ok = Cache.put_all!(a: 1, b: 2) + :ok = Cache.put_all!([c: 3, d: 4], ttl: 1000) + + assert Cache.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2, c: 3, d: 4} + + :ok = Process.sleep(1100) + assert Cache.get_all!([:a, :b, :c, :d]) == %{a: 1, b: 2} + + wait_until(fn -> + assert Cache.stats!().measurements == %{ + hits: 6, + misses: 2, + writes: 4, + evictions: 2, + expirations: 2, + updates: 0 + } + end) + end + end + + describe "cache init error" do + test "because invalid stats option" do + _ = Process.flag(:trap_exit, true) + + {:error, {%ArgumentError{message: msg}, _}} = Cache.start_link(stats: 123) + + assert Regex.match?(~r/invalid value/, msg) + end + end + + describe "disabled stats:" do + setup_with_cache Cache, stats: false + + test "stats/0 returns nil" do + assert_raise Nebulex.Error, ~r"stats disabled or not supported by the cache", fn -> + Cache.stats!() + end + end + + test "dispatch_stats/1 is skipped" do + with_telemetry_handler(__MODULE__, [@event], fn -> + assert {:error, %Nebulex.Error{reason: {:stats_error, _}}} = Cache.dispatch_stats() + end) + end + + test "Nebulex.Adapter.Stats.incr/3 does nothing when counter is nil" do + assert Stats.incr(nil, :test) == :ok + end + end + + describe "dispatch_stats/1" do + setup_with_cache Cache, stats: true + + test "emits a telemetry event when called" do + with_telemetry_handler(__MODULE__, [@event], fn -> + :ok = Cache.dispatch_stats(metadata: %{node: node()}) + + node = node() + + assert_receive {@event, measurements, %{cache: Cache, node: ^node}} + + assert measurements == %{ + hits: 0, + misses: 0, + writes: 0, + evictions: 0, + expirations: 0, + updates: 0 + } + end) + end + + test "returns an error" do + Nebulex.Cache.Registry + |> expect(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) + + assert Cache.dispatch_stats() == + {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} + end + end + + describe "dispatch_stats/1 with dynamic cache" do + setup_with_dynamic_cache Cache, + :stats_with_dispatch, + telemetry_prefix: [:my_event], + stats: true + + test "emits a telemetry event with custom telemetry_prefix when called" do + with_telemetry_handler(__MODULE__, [[:my_event, :stats]], fn -> + :ok = Cache.dispatch_stats(metadata: %{foo: :bar}) + + assert_receive {[:my_event, :stats], measurements, + %{cache: :stats_with_dispatch, foo: :bar}} + + assert measurements == %{ + hits: 0, + misses: 0, + writes: 0, + evictions: 0, + expirations: 0, + updates: 0 + } + end) + end + end +end diff --git a/test/nebulex/telemetry_test.exs b/test/nebulex/telemetry_test.exs index 0b999d10..8ef606ec 100644 --- a/test/nebulex/telemetry_test.exs +++ b/test/nebulex/telemetry_test.exs @@ -10,126 +10,58 @@ defmodule Nebulex.TelemetryTest do defmodule Cache do use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end + adapter: Nebulex.TestAdapter end ## Shared constants @prefix [:nebulex, :telemetry_test, :cache] - @start @prefix ++ [:command, :start] @stop @prefix ++ [:command, :stop] - - @start_events [ - @prefix ++ [:command, :start], - @prefix ++ [:l1, :command, :start], - @prefix ++ [:l2, :command, :start], - @prefix ++ [:l2, :primary, :command, :start], - @prefix ++ [:l3, :command, :start], - @prefix ++ [:l3, :primary, :command, :start] - ] - - @stop_events [ - @prefix ++ [:command, :stop], - @prefix ++ [:l1, :command, :stop], - @prefix ++ [:l2, :command, :stop], - @prefix ++ [:l2, :primary, :command, :stop], - @prefix ++ [:l3, :command, :stop], - @prefix ++ [:l3, :primary, :command, :stop] - ] - - @exception_events [ - @prefix ++ [:command, :exception], - @prefix ++ [:l1, :command, :exception], - @prefix ++ [:l2, :command, :exception], - @prefix ++ [:l2, :primary, :command, :exception], - @prefix ++ [:l3, :command, :stop], - @prefix ++ [:l3, :primary, :command, :exception] - ] - - @caches [Cache, Cache.L1, Cache.L2, Cache.L2.Primary, Cache.L3, Cache.L3.Primary] - - @events Enum.zip([@caches, @start_events, @stop_events]) - - @config [ - model: :inclusive, - levels: [ - {Cache.L1, gc_interval: :timer.hours(1)}, - {Cache.L2, primary: [gc_interval: :timer.hours(1)]}, - {Cache.L3, primary: [gc_interval: :timer.hours(1)]} - ] - ] + @exception @prefix ++ [:command, :exception] + @test_adapter_start [:nebulex, :test_adapter, :start] + @events [@start, @stop, @exception, @test_adapter_start] ## Tests describe "span/3" do - setup_with_cache Cache, @config + setup_with_cache Cache test "ok: emits start and stop events" do - with_telemetry_handler(__MODULE__, @start_events ++ @stop_events, fn -> + with_telemetry_handler(__MODULE__, @events, fn -> assert Cache.put("foo", "bar") == :ok - for {cache, start, stop} <- @events do - assert_receive {^start, measurements, %{function_name: :put} = metadata} - assert measurements[:system_time] |> DateTime.from_unix!(:native) - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", "bar", :infinity, :put, []] - assert metadata[:telemetry_span_context] |> is_reference() - - assert_receive {^stop, measurements, %{function_name: :put} = metadata} - assert measurements[:duration] > 0 - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", "bar", :infinity, :put, []] - assert metadata[:result] == {:ok, true} - assert metadata[:telemetry_span_context] |> is_reference() - end + assert_receive {@start, measurements, %{function_name: :put} = metadata} + assert measurements[:system_time] |> DateTime.from_unix!(:native) + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, []] + assert metadata[:telemetry_span_context] |> is_reference() + + assert_receive {@stop, measurements, %{function_name: :put} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, []] + assert metadata[:result] == {:ok, true} + assert metadata[:telemetry_span_context] |> is_reference() end) end test "raise: emits start and exception events" do - with_telemetry_handler(__MODULE__, @exception_events, fn -> - Adapter.with_meta(Cache.L3.Primary, fn meta -> - true = :ets.delete(meta.meta_tab) - end) + with_telemetry_handler(__MODULE__, @events, fn -> + key = {:eval, fn -> raise ArgumentError, "error" end} assert_raise ArgumentError, fn -> - Cache.get("foo") + Cache.fetch(key) end - ex_events = [ - @prefix ++ [:command, :exception], - @prefix ++ [:l3, :command, :exception], - @prefix ++ [:l3, :primary, :command, :exception] - ] - - for {cache, exception} <- ex_events do - assert_receive {^exception, measurements, %{function_name: :get} = metadata} - assert measurements[:duration] > 0 - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", []] - assert metadata[:kind] == :error - assert metadata[:reason] == :badarg - assert metadata[:stacktrace] - assert metadata[:telemetry_span_context] |> is_reference() - end + assert_receive {@exception, measurements, %{function_name: :fetch} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == [key, []] + assert metadata[:kind] == :error + assert metadata[:reason] == %ArgumentError{message: "error"} + assert metadata[:stacktrace] + assert metadata[:telemetry_span_context] |> is_reference() end) end @@ -153,18 +85,16 @@ defmodule Nebulex.TelemetryTest do end describe "span/3 bypassed" do - setup_with_cache Cache, Keyword.put(@config, :telemetry, false) + setup_with_cache Cache, telemetry: false test "telemetry set to false" do - for cache <- @caches do - Adapter.with_meta(cache, fn meta -> - assert meta.telemetry == false - end) - end + Adapter.with_meta(Cache, fn meta -> + assert meta.telemetry == false + end) end test "ok: does not emit start and stop events" do - with_telemetry_handler(__MODULE__, @start_events ++ @stop_events, fn -> + with_telemetry_handler(__MODULE__, @events, fn -> commands = [ put: ["foo", "bar"], put_all: [%{"foo foo" => "bar bar"}], @@ -187,23 +117,17 @@ defmodule Nebulex.TelemetryTest do ] for {command, args} <- commands do - _ = apply(Cache.L1, command, args) - _ = apply(Cache.L2, command, args) - _ = apply(Cache.L3, command, args) - - for {_cache, start, stop} <- @events do - refute_received {^start, _, %{function_name: :command}} - refute_received {^stop, _, %{function_name: :command}} - end + _ = apply(Cache, command, args) + + refute_received {@start, _, %{function_name: :command}} + refute_received {@stop, _, %{function_name: :command}} end for {command, args} <- Keyword.drop(commands, [:dump, :load]) do _ = apply(Cache, command, args) - for {_cache, start, stop} <- @events do - refute_received {^start, _, %{function_name: :command}} - refute_received {^stop, _, %{function_name: :command}} - end + refute_received {@start, _, %{function_name: :command}} + refute_received {@stop, _, %{function_name: :command}} end end) end diff --git a/test/shared/cache/entry_boolean_values_test.exs b/test/shared/cache/entry_boolean_values_test.exs deleted file mode 100644 index 3f5068f2..00000000 --- a/test/shared/cache/entry_boolean_values_test.exs +++ /dev/null @@ -1,57 +0,0 @@ -defmodule Nebulex.Cache.EntryBooleanValuesTest do - import Nebulex.CacheCase - - deftests do - describe "boolean values:" do - test "get and get_all", %{cache: cache} do - :ok = cache.put_all!(a: true, b: false) - - assert cache.get!(:a) == true - assert cache.get!(:b) == false - - assert cache.get_all!([:a, :b]) == %{a: true, b: false} - end - - test "take", %{cache: cache} do - :ok = cache.put_all!(a: true, b: false) - - assert cache.take!(:a) == true - assert cache.take!(:b) == false - - assert cache.get_all!([:a, :b]) == %{} - end - - test "delete true value", %{cache: cache} do - :ok = cache.put!(:a, true) - - assert cache.get!(:a) == true - assert cache.delete!(:a) == :ok - assert cache.get!(:a) == nil - end - - test "delete false value", %{cache: cache} do - :ok = cache.put!(:a, false) - - assert cache.get!(:a) == false - assert cache.delete!(:a) == :ok - assert cache.get!(:a) == nil - end - - test "put_new", %{cache: cache} do - assert cache.put_new!(:a, true) - - :ok = cache.put!(:a, false) - - refute cache.put_new!(:a, false) - assert cache.get!(:a) == false - end - - test "has_key?", %{cache: cache} do - :ok = cache.put!(:a, true) - - assert cache.has_key?(:a) == {:ok, true} - assert cache.has_key?(:b) == {:ok, false} - end - end - end -end diff --git a/test/shared/cache/entry_error_test.exs b/test/shared/cache/kv_error_test.exs similarity index 99% rename from test/shared/cache/entry_error_test.exs rename to test/shared/cache/kv_error_test.exs index 46354ec2..b0b2ef52 100644 --- a/test/shared/cache/entry_error_test.exs +++ b/test/shared/cache/kv_error_test.exs @@ -1,4 +1,4 @@ -defmodule Nebulex.Cache.EntryErrorTest do +defmodule Nebulex.Cache.KVErrorTest do import Nebulex.CacheCase deftests do diff --git a/test/shared/cache/entry_expiration_error_test.exs b/test/shared/cache/kv_expiration_error_test.exs similarity index 88% rename from test/shared/cache/entry_expiration_error_test.exs rename to test/shared/cache/kv_expiration_error_test.exs index 97dd81dc..9c9e042d 100644 --- a/test/shared/cache/entry_expiration_error_test.exs +++ b/test/shared/cache/kv_expiration_error_test.exs @@ -1,4 +1,4 @@ -defmodule Nebulex.Cache.EntryExpirationErrorTest do +defmodule Nebulex.Cache.KVExpirationErrorTest do import Nebulex.CacheCase deftests do diff --git a/test/shared/cache/entry_expiration_test.exs b/test/shared/cache/kv_expiration_test.exs similarity index 99% rename from test/shared/cache/entry_expiration_test.exs rename to test/shared/cache/kv_expiration_test.exs index d72960bd..77b9cced 100644 --- a/test/shared/cache/entry_expiration_test.exs +++ b/test/shared/cache/kv_expiration_test.exs @@ -1,4 +1,4 @@ -defmodule Nebulex.Cache.EntryExpirationTest do +defmodule Nebulex.Cache.KVExpirationTest do import Nebulex.CacheCase deftests do diff --git a/test/shared/cache/entry_prop_test.exs b/test/shared/cache/kv_prop_test.exs similarity index 95% rename from test/shared/cache/entry_prop_test.exs rename to test/shared/cache/kv_prop_test.exs index f5547519..1b25031c 100644 --- a/test/shared/cache/entry_prop_test.exs +++ b/test/shared/cache/kv_prop_test.exs @@ -1,4 +1,4 @@ -defmodule Nebulex.Cache.EntryPropTest do +defmodule Nebulex.Cache.KVPropTest do import Nebulex.CacheCase deftests do diff --git a/test/shared/cache/entry_test.exs b/test/shared/cache/kv_test.exs similarity index 88% rename from test/shared/cache/entry_test.exs rename to test/shared/cache/kv_test.exs index 5879da9e..a6f660d8 100644 --- a/test/shared/cache/entry_test.exs +++ b/test/shared/cache/kv_test.exs @@ -1,4 +1,4 @@ -defmodule Nebulex.Cache.EntryTest do +defmodule Nebulex.Cache.KVTest do import Nebulex.CacheCase deftests do @@ -20,6 +20,14 @@ defmodule Nebulex.Cache.EntryTest do assert cache.fetch("foo") == {:ok, nil} end + test "puts a boolean value", %{cache: cache} do + assert cache.put(:boolean, true) == :ok + assert cache.fetch(:boolean) == {:ok, true} + + assert cache.put(:boolean, false) == :ok + assert cache.fetch(:boolean) == {:ok, false} + end + test "raises when invalid option is given", %{cache: cache} do assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> cache.put("hello", "world", ttl: "1") @@ -73,6 +81,14 @@ defmodule Nebulex.Cache.EntryTest do assert cache.fetch(:mykey) == {:ok, nil} end + test "puts a boolean value", %{cache: cache} do + assert cache.put_new(true, true) == {:ok, true} + assert cache.fetch(true) == {:ok, true} + + assert cache.put_new(false, false) == {:ok, true} + assert cache.fetch(false) == {:ok, false} + end + test "raises when invalid option is given", %{cache: cache} do assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> cache.put_new("hello", "world", ttl: "1") @@ -110,6 +126,16 @@ defmodule Nebulex.Cache.EntryTest do assert cache.fetch("hello") == {:ok, nil} end + test "existing boolean value", %{cache: cache} do + :ok = cache.put(:boolean, true) + + assert cache.replace(:boolean, false) == {:ok, true} + assert cache.fetch(:boolean) == {:ok, false} + + assert cache.replace(:boolean, true) == {:ok, true} + assert cache.fetch(:boolean) == {:ok, true} + end + test "raises when invalid option is given", %{cache: cache} do assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> cache.replace("hello", "world", ttl: "1") @@ -142,6 +168,7 @@ defmodule Nebulex.Cache.EntryTest do test "empty list or map has not any effect", %{cache: cache} do assert cache.put_all([]) == :ok assert cache.put_all(%{}) == :ok + assert count = cache.count_all() assert cache.delete_all() == count end @@ -155,7 +182,9 @@ defmodule Nebulex.Cache.EntryTest do "#{elem}" => elem, {:tuple, elem} => elem, <<100, elem>> => elem, - [elem] => elem + [elem] => elem, + true => true, + false => false } Map.merge(acc, sample) @@ -196,6 +225,12 @@ defmodule Nebulex.Cache.EntryTest do refute cache.get!("oranges") end + test "puts a boolean values", %{cache: cache} do + assert cache.put_new_all(%{true => true, false => false}) == {:ok, true} + assert cache.fetch!(true) == true + assert cache.fetch!(false) == false + end + test "raises when invalid option is given", %{cache: cache} do assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> cache.put_new_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") @@ -324,6 +359,22 @@ defmodule Nebulex.Cache.EntryTest do assert cache.delete(:non_existent) == :ok refute cache.get!(:non_existent) end + + test "deletes boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.fetch!(true) == true + assert cache.fetch!(false) == false + assert cache.fetch!(nil) == nil + + assert cache.delete(true) == :ok + assert cache.delete(false) == :ok + assert cache.delete(nil) == :ok + + refute cache.get!(true) + refute cache.get!(false) + refute cache.get!(nil) + end end describe "delete!/2" do @@ -346,6 +397,18 @@ defmodule Nebulex.Cache.EntryTest do end end + test "returns boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.take(true) == {:ok, true} + assert cache.take(false) == {:ok, false} + assert cache.take(nil) == {:ok, nil} + + refute cache.get!(true) + refute cache.get!(false) + refute cache.get!(nil) + end + test "returns nil if the key does not exist in cache", %{cache: cache} do assert {:error, %Nebulex.KeyError{key: :non_existent}} = cache.take(:non_existent) assert {:error, %Nebulex.KeyError{key: nil}} = cache.take(nil) @@ -377,6 +440,14 @@ defmodule Nebulex.Cache.EntryTest do end end + test "returns boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.has_key?(true) == {:ok, true} + assert cache.has_key?(false) == {:ok, true} + assert cache.has_key?(nil) == {:ok, true} + end + test "returns false if key does not exist in cache", %{cache: cache} do assert cache.has_key?(:non_existent) == {:ok, false} assert cache.has_key?(nil) == {:ok, false} diff --git a/test/shared/cache_test.exs b/test/shared/cache_test_case.exs similarity index 57% rename from test/shared/cache_test.exs rename to test/shared/cache_test_case.exs index aa697f59..09130d58 100644 --- a/test/shared/cache_test.exs +++ b/test/shared/cache_test_case.exs @@ -1,14 +1,13 @@ -defmodule Nebulex.CacheTest do +defmodule Nebulex.CacheTestCase do @moduledoc """ Shared Tests """ defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest - use Nebulex.Cache.EntryBooleanValuesTest - use Nebulex.Cache.EntryExpirationTest - use Nebulex.Cache.EntryPropTest + use Nebulex.Cache.KVTest + use Nebulex.Cache.KVExpirationTest + use Nebulex.Cache.KVPropTest use Nebulex.Cache.QueryableTest use Nebulex.Cache.TransactionTest use Nebulex.Cache.PersistenceTest diff --git a/test/shared/local_test.exs b/test/shared/local_test.exs deleted file mode 100644 index 47f597f9..00000000 --- a/test/shared/local_test.exs +++ /dev/null @@ -1,541 +0,0 @@ -defmodule Nebulex.LocalTest do - import Nebulex.CacheCase - - deftests do - import Ex2ms - import Nebulex.CacheCase, only: [cache_put: 2, cache_put: 3, cache_put: 4] - - alias Nebulex.{Adapter, Entry} - - describe "error" do - test "on init because invalid backend", %{cache: cache} do - assert {:error, {%ArgumentError{message: msg}, _}} = - cache.start_link(name: :invalid_backend, backend: :xyz) - - assert Regex.match?(~r/invalid value for :backend/, msg) - end - - test "because cache is stopped", %{cache: cache, name: name} do - :ok = cache.stop() - - assert cache.put(1, 13) == - {:error, - %Nebulex.Error{ - module: Nebulex.Error, - reason: {:registry_lookup_error, name} - }} - - msg = ~r"could not lookup Nebulex cache" - - assert_raise Nebulex.Error, msg, fn -> cache.put!(1, 13) end - assert_raise Nebulex.Error, msg, fn -> cache.get!(1) end - assert_raise Nebulex.Error, msg, fn -> cache.delete!(1) end - end - end - - describe "entry:" do - test "get_and_update", %{cache: cache} do - fun = fn - nil -> {nil, 1} - val -> {val, val * 2} - end - - assert cache.get_and_update!(1, fun) == {nil, 1} - assert cache.get_and_update!(1, &{&1, &1 * 2}) == {1, 2} - assert cache.get_and_update!(1, &{&1, &1 * 3}) == {2, 6} - assert cache.get_and_update!(1, &{&1, nil}) == {6, 6} - assert cache.get!(1) == 6 - assert cache.get_and_update!(1, fn _ -> :pop end) == {6, nil} - assert cache.get_and_update!(1, fn _ -> :pop end) == {nil, nil} - assert cache.get_and_update!(3, &{&1, 3}) == {nil, 3} - end - - test "get_and_update fails because function returns invalid value", %{cache: cache} do - assert_raise ArgumentError, fn -> - cache.get_and_update(1, fn _ -> :other end) - end - end - - test "get_and_update fails because cache is not started", %{cache: cache} do - :ok = cache.stop() - - assert_raise Nebulex.Error, fn -> - assert cache.get_and_update!(1, fn _ -> :pop end) - end - end - - test "incr and update", %{cache: cache} do - assert cache.incr!(:counter) == 1 - assert cache.incr!(:counter) == 2 - - assert cache.get_and_update!(:counter, &{&1, &1 * 2}) == {2, 4} - assert cache.incr!(:counter) == 5 - - assert cache.update!(:counter, 1, &(&1 * 2)) == 10 - assert cache.incr!(:counter, -10) == 0 - - assert cache.put("foo", "bar") == :ok - - assert_raise ArgumentError, fn -> - cache.incr!("foo") - end - end - - test "incr with ttl", %{cache: cache} do - assert cache.incr!(:counter_with_ttl, 1, ttl: 1000) == 1 - assert cache.incr!(:counter_with_ttl) == 2 - assert cache.fetch!(:counter_with_ttl) == 2 - - :ok = Process.sleep(1010) - - assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) - - assert cache.incr!(:counter_with_ttl, 1, ttl: 5000) == 1 - assert {:ok, ttl} = cache.ttl(:counter_with_ttl) - assert ttl > 1000 - - assert cache.expire(:counter_with_ttl, 500) == {:ok, true} - - :ok = Process.sleep(600) - - assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) - end - - test "incr existing entry", %{cache: cache} do - assert cache.put(:counter, 0) == :ok - assert cache.incr!(:counter) == 1 - assert cache.incr!(:counter, 2) == 3 - end - end - - describe "queryable:" do - test "error because invalid query", %{cache: cache} do - for action <- [:all, :stream] do - assert {:error, %Nebulex.QueryError{}} = apply(cache, action, [:invalid]) - end - end - - test "raise exception because invalid query", %{cache: cache} do - for action <- [:all!, :stream!] do - assert_raise Nebulex.QueryError, ~r"expected query to be one of", fn -> - all_or_stream(cache, action, :invalid) - end - end - end - - test "default query error message" do - assert_raise Nebulex.QueryError, "invalid query :invalid", fn -> - raise Nebulex.QueryError, query: :invalid - end - end - - test "ETS match_spec queries", %{cache: cache, name: name} do - values = cache_put(cache, 1..5, &(&1 * 2)) - _ = new_generation(cache, name) - values = values ++ cache_put(cache, 6..10, &(&1 * 2)) - - assert nil - |> cache.stream!(page_size: 3, return: :value) - |> Enum.to_list() - |> :lists.usort() == values - - {_, expected} = Enum.split(values, 5) - - test_ms = - fun do - {_, _, value, _, _} when value > 10 -> value - end - - for action <- [:all!, :stream!] do - assert all_or_stream(cache, action, test_ms, page_size: 3, return: :value) == expected - end - end - - test "expired and unexpired queries", %{cache: cache} do - for action <- [:all!, :stream!] do - expired = cache_put(cache, 1..5, &(&1 * 2), ttl: 1000) - unexpired = cache_put(cache, 6..10, &(&1 * 2)) - - all = expired ++ unexpired - - opts = [page_size: 3, return: :value] - - assert all_or_stream(cache, action, nil, opts) == all - assert all_or_stream(cache, action, :unexpired, opts) == all - assert all_or_stream(cache, action, :expired, opts) == [] - - :ok = Process.sleep(1100) - - assert all_or_stream(cache, action, :unexpired, opts) == unexpired - assert all_or_stream(cache, action, :expired, opts) == expired - end - end - - test "all entries", %{cache: cache} do - assert cache.put_all([a: 1, b: 2, c: 3], ttl: 5000) == :ok - - assert all = cache.all!(:unexpired, return: :entry) - assert length(all) == 3 - - for %Entry{} = entry <- all do - assert Entry.ttl(entry) > 0 - end - end - - test "delete all expired and unexpired entries", %{cache: cache} do - _ = cache_put(cache, 1..5, & &1, ttl: 1500) - _ = cache_put(cache, 6..10) - - assert cache.delete_all!(:expired) == 0 - assert cache.count_all!(:expired) == 0 - - :ok = Process.sleep(1600) - - assert cache.delete_all!(:expired) == 5 - assert cache.count_all!(:expired) == 0 - assert cache.count_all!(:unexpired) == 5 - - assert cache.delete_all!(:unexpired) == 5 - assert cache.count_all!(:unexpired) == 0 - assert cache.count_all!() == 0 - end - - test "delete all matched entries", %{cache: cache, name: name} do - values = cache_put(cache, 1..5) - - _ = new_generation(cache, name) - - values = values ++ cache_put(cache, 6..10) - - assert cache.count_all!() == 10 - - test_ms = - fun do - {_, _, value, _, _} when rem(value, 2) == 0 -> value - end - - {expected, rem} = Enum.split_with(values, &(rem(&1, 2) == 0)) - - assert cache.count_all!(test_ms) == 5 - assert cache.all!(test_ms) |> Enum.sort() == Enum.sort(expected) - - assert cache.delete_all!(test_ms) == 5 - assert cache.count_all!(test_ms) == 0 - assert cache.all!() |> Enum.sort() == Enum.sort(rem) - end - - test "delete all entries given by a list of keys", %{cache: cache} do - entries = for x <- 1..10, into: %{}, do: {x, x} - - :ok = cache.put_all(entries) - - assert cache.count_all!() == 10 - - assert cache.delete_all!({:in, [2, 4, 6, 8, 10, 12]}) == 5 - - assert cache.count_all!() == 5 - assert cache.all!() |> Enum.sort() == [1, 3, 5, 7, 9] - end - end - - describe "older generation hitted on" do - test "put/3 (key is removed from older generation)", %{cache: cache, name: name} do - :ok = cache.put("foo", "bar") - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - :ok = cache.put("foo", "bar bar") - - assert get_from_new(cache, name, "foo") == "bar bar" - refute get_from_old(cache, name, "foo") - end - - test "put_new/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put_new!("foo", "bar") == true - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.put_new!("foo", "bar") == false - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - _ = new_generation(cache, name) - - assert cache.put_new!("foo", "bar") == true - - assert get_from_new(cache, name, "foo") == "bar" - refute get_from_old(cache, name, "foo") - end - - test "replace/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.replace!("foo", "bar bar") == false - - :ok = cache.put("foo", "bar") - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.replace!("foo", "bar bar") == true - - assert get_from_new(cache, name, "foo") == "bar bar" - refute get_from_old(cache, name, "foo") - - _ = new_generation(cache, name) - _ = new_generation(cache, name) - - assert cache.replace!("foo", "bar bar") == false - end - - test "put_all/2 (keys are removed from older generation)", %{cache: cache, name: name} do - entries = Enum.map(1..100, &{{:key, &1}, &1}) - - :ok = cache.put_all(entries) - - _ = new_generation(cache, name) - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - :ok = cache.put_all(entries) - - Enum.each(entries, fn {k, v} -> - assert get_from_new(cache, name, k) == v - refute get_from_old(cache, name, k) - end) - end - - test "put_new_all/2 (fallback to older generation)", %{cache: cache, name: name} do - entries = Enum.map(1..100, &{&1, &1}) - - assert cache.put_new_all!(entries) == true - - _ = new_generation(cache, name) - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - assert cache.put_new_all!(entries) == false - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - _ = new_generation(cache, name) - - assert cache.put_new_all!(entries) == true - - Enum.each(entries, fn {k, v} -> - assert get_from_new(cache, name, k) == v - refute get_from_old(cache, name, k) - end) - end - - test "expire/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put("foo", "bar") == :ok - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.expire!("foo", 200) == true - - assert get_from_new(cache, name, "foo") == "bar" - refute get_from_old(cache, name, "foo") - - :ok = Process.sleep(210) - - refute cache.get!("foo") - end - - test "incr/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put(:counter, 0, ttl: 200) == :ok - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, :counter) - assert get_from_old(cache, name, :counter) == 0 - - assert cache.incr!(:counter) == 1 - assert cache.incr!(:counter) == 2 - - assert get_from_new(cache, name, :counter) == 2 - refute get_from_old(cache, name, :counter) - - :ok = Process.sleep(210) - - assert cache.incr!(:counter) == 1 - end - - test "all/2 (no duplicates)", %{cache: cache, name: name} do - entries = for x <- 1..20, into: %{}, do: {x, x} - keys = Map.keys(entries) |> Enum.sort() - - :ok = cache.put_all(entries) - - assert cache.count_all!() == 20 - assert cache.all!() |> Enum.sort() == keys - - _ = new_generation(cache, name) - - :ok = cache.put_all(entries) - - assert cache.count_all!() == 20 - assert cache.all!() |> Enum.sort() == keys - - _ = new_generation(cache, name) - - more_entries = for x <- 10..30, into: %{}, do: {x, x} - more_keys = Map.keys(more_entries) |> Enum.sort() - - :ok = cache.put_all(more_entries) - - assert cache.count_all!() == 30 - assert cache.all!() |> Enum.sort() == (keys ++ more_keys) |> Enum.uniq() - - _ = new_generation(cache, name) - - assert cache.count_all!() == 21 - assert cache.all!() |> Enum.sort() == more_keys - end - end - - describe "generation" do - test "created with unexpired entries", %{cache: cache, name: name} do - assert cache.put("foo", "bar") == :ok - assert cache.fetch!("foo") == "bar" - assert cache.ttl("foo") == {:ok, :infinity} - - _ = new_generation(cache, name) - - assert cache.fetch!("foo") == "bar" - end - - test "lifecycle", %{cache: cache, name: name} do - # should be empty - assert {:error, %Nebulex.KeyError{key: 1}} = cache.fetch(1) - - # set some entries - for x <- 1..2, do: cache.put(x, x) - - # fetch one entry from new generation - assert cache.fetch!(1) == 1 - - # fetch non-existent entries - assert {:error, %Nebulex.KeyError{key: 3}} = cache.fetch(3) - assert {:error, %Nebulex.KeyError{key: :non_existent}} = cache.fetch(:non_existent) - - # create a new generation - _ = new_generation(cache, name) - - # both entries should be in the old generation - refute get_from_new(cache, name, 1) - refute get_from_new(cache, name, 2) - assert get_from_old(cache, name, 1) == 1 - assert get_from_old(cache, name, 2) == 2 - - # fetch entry 1 and put it into the new generation - assert cache.fetch!(1) == 1 - assert get_from_new(cache, name, 1) == 1 - refute get_from_new(cache, name, 2) - refute get_from_old(cache, name, 1) - assert get_from_old(cache, name, 2) == 2 - - # create a new generation, the old generation should be deleted - _ = new_generation(cache, name) - - # entry 1 should be into the old generation and entry 2 deleted - refute get_from_new(cache, name, 1) - refute get_from_new(cache, name, 2) - assert get_from_old(cache, name, 1) == 1 - refute get_from_old(cache, name, 2) - end - - test "creation with ttl", %{cache: cache, name: name} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.fetch!(1) == 1 - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, 1) - assert get_from_old(cache, name, 1) == 1 - assert cache.fetch!(1) == 1 - - :ok = Process.sleep(1100) - - assert {:error, %Nebulex.KeyError{key: 1}} = cache.fetch(1) - refute get_from_new(cache, name, 1) - refute get_from_old(cache, name, 1) - end - end - - ## Helpers - - defp new_generation(cache, name) do - cache.with_dynamic_cache(name, fn -> - cache.new_generation() - end) - end - - defp get_from_new(cache, name, key) do - cache.with_dynamic_cache(name, fn -> - get_from(cache.newer_generation(), name, key) - end) - end - - defp get_from_old(cache, name, key) do - cache.with_dynamic_cache(name, fn -> - cache.generations() - |> List.last() - |> get_from(name, key) - end) - end - - defp get_from(gen, name, key) do - Adapter.with_meta(name, fn %{backend: backend} -> - case backend.lookup(gen, key) do - [] -> nil - [{_, ^key, val, _, _}] -> val - end - end) - end - - defp all_or_stream(cache, action, ms, opts \\ []) - - defp all_or_stream(cache, :all!, ms, opts) do - ms - |> cache.all!(opts) - |> handle_query_result() - end - - defp all_or_stream(cache, :stream!, ms, opts) do - ms - |> cache.stream!(opts) - |> handle_query_result() - end - - defp handle_query_result(list) when is_list(list) do - :lists.usort(list) - end - - defp handle_query_result(stream) do - stream - |> Enum.to_list() - |> :lists.usort() - end - end -end diff --git a/test/shared/multilevel_test.exs b/test/shared/multilevel_test.exs deleted file mode 100644 index 7ca82f2f..00000000 --- a/test/shared/multilevel_test.exs +++ /dev/null @@ -1,328 +0,0 @@ -defmodule Nebulex.MultilevelTest do - import Nebulex.CacheCase - - deftests do - describe "c:init/1" do - test "fails because missing levels config", %{cache: cache} do - assert {:error, {%ArgumentError{message: msg}, _}} = cache.start_link(name: :missing_levels) - - assert Regex.match?(~r"required :levels option not found", msg) - end - end - - describe "put/3" do - test "ok", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.get!(1, nil, level: 1) == 1 - assert cache.get!(1, nil, level: 2) == 1 - assert cache.get!(1, nil, level: 3) == 1 - - assert cache.put(2, 2, level: 2) == :ok - assert cache.get!(2, nil, level: 2) == 2 - refute cache.get!(2, nil, level: 1) - refute cache.get!(2, nil, level: 3) - - assert cache.put("foo", nil) == :ok - refute cache.get!("foo") - end - end - - describe "put_new/3" do - test "ok", %{cache: cache} do - assert cache.put_new!(1, 1) - refute cache.put_new!(1, 2) - assert cache.get!(1, nil, level: 1) == 1 - assert cache.get!(1, nil, level: 2) == 1 - assert cache.get!(1, nil, level: 3) == 1 - - assert cache.put_new!(2, 2, level: 2) - assert cache.get!(2, nil, level: 2) == 2 - refute cache.get!(2, nil, level: 1) - refute cache.get!(2, nil, level: 3) - - assert cache.put_new!("foo", nil) - refute cache.get!("foo") - end - end - - describe "put_all/2" do - test "ok", %{cache: cache} do - assert cache.put_all( - for x <- 1..3 do - {x, x} - end, - ttl: 1000 - ) == :ok - - for x <- 1..3, do: assert(cache.get!(x) == x) - :ok = Process.sleep(1100) - for x <- 1..3, do: refute(cache.get!(x)) - - assert cache.put_all(%{"apples" => 1, "bananas" => 3}) == :ok - assert cache.put_all(blueberries: 2, strawberries: 5) == :ok - assert cache.get!("apples") == 1 - assert cache.get!("bananas") == 3 - assert cache.get!(:blueberries) == 2 - assert cache.get!(:strawberries) == 5 - - assert cache.put_all([]) == :ok - assert cache.put_all(%{}) == :ok - - refute cache.put_new_all!(%{"apples" => 100}) - assert cache.get!("apples") == 1 - end - end - - describe "get_all/2" do - test "ok", %{cache: cache} do - assert cache.put_all(a: 1, c: 3) == :ok - assert cache.get_all!([:a, :b, :c]) == %{a: 1, c: 3} - end - end - - describe "delete/2" do - test "ok", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - - assert cache.delete(1) == :ok - refute cache.get!(1, nil, level: 1) - refute cache.get!(1, nil, level: 2) - refute cache.get!(1, nil, level: 3) - - assert cache.delete(2, level: 2) == :ok - refute cache.get!(2, nil, level: 1) - refute cache.get!(2, nil, level: 2) - refute cache.get!(2, nil, level: 3) - end - end - - describe "take/2" do - test "ok", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - assert cache.put(3, 3, level: 3) == :ok - - assert cache.take!(1) == 1 - assert cache.take!(2) == 2 - assert cache.take!(3) == 3 - - refute cache.get!(1, nil, level: 1) - refute cache.get!(1, nil, level: 2) - refute cache.get!(1, nil, level: 3) - refute cache.get!(2, nil, level: 2) - refute cache.get!(3, nil, level: 3) - end - end - - describe "has_key?/1" do - test "ok", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - assert cache.put(3, 3, level: 3) == :ok - - assert cache.has_key?(1) == {:ok, true} - assert cache.has_key?(2) == {:ok, true} - assert cache.has_key?(3) == {:ok, true} - assert cache.has_key?(4) == {:ok, false} - end - end - - describe "ttl/1" do - test "ok", %{cache: cache} do - assert cache.put(:a, 1, ttl: 1000) == :ok - assert cache.ttl!(:a) > 0 - assert cache.put(:b, 2) == :ok - - :ok = Process.sleep(10) - assert cache.ttl!(:a) > 0 - assert cache.ttl!(:b) == :infinity - - assert_raise Nebulex.KeyError, fn -> - cache.ttl!(:c) - end - - :ok = Process.sleep(1100) - - assert_raise Nebulex.KeyError, fn -> - cache.ttl!(:a) - end - end - - test "raises Nebulex.KeyError if key does not exist", %{cache: cache, name: name} do - msg = ~r"key :non_existent not found in cache: #{inspect(name)}" - - assert_raise Nebulex.KeyError, msg, fn -> - cache.ttl!(:non_existent) - end - end - end - - describe "expire/2" do - test "ok", %{cache: cache} do - assert cache.put(:a, 1) == :ok - assert cache.ttl!(:a) == :infinity - - assert cache.expire!(:a, 1000) - ttl = cache.ttl!(:a) - assert ttl > 0 and ttl <= 1000 - - assert cache.get!(:a, nil, level: 1) == 1 - assert cache.get!(:a, nil, level: 2) == 1 - assert cache.get!(:a, nil, level: 3) == 1 - - :ok = Process.sleep(1100) - refute cache.get!(:a) - refute cache.get!(:a, nil, level: 1) - refute cache.get!(:a, nil, level: 2) - refute cache.get!(:a, nil, level: 3) - end - - test "raises when ttl is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl to be a valid timeout", fn -> - cache.expire!(:a, "hello") - end - end - end - - describe "touch/1" do - test "ok", %{cache: cache} do - assert cache.put(:touch, 1, ttl: 1000, level: 2) == :ok - - :ok = Process.sleep(10) - assert cache.touch!(:touch) - - :ok = Process.sleep(200) - assert cache.touch!(:touch) - assert cache.get!(:touch) == 1 - - :ok = Process.sleep(1100) - refute cache.get!(:touch) - - refute cache.touch!(:non_existent) - end - end - - describe "get_and_update/3" do - test "ok", %{cache: cache} do - assert cache.put(1, 1, level: 1) == :ok - assert cache.put(2, 2) == :ok - - assert cache.get_and_update!(1, &{&1, &1 * 2}, level: 1) == {1, 2} - assert cache.get!(1, nil, level: 1) == 2 - refute cache.get!(1, nil, level: 3) - refute cache.get!(1, nil, level: 3) - - assert cache.get_and_update!(2, &{&1, &1 * 2}) == {2, 4} - assert cache.get!(2, nil, level: 1) == 4 - assert cache.get!(2, nil, level: 2) == 4 - assert cache.get!(2, nil, level: 3) == 4 - - assert cache.get_and_update!(1, fn _ -> :pop end, level: 1) == {2, nil} - refute cache.get!(1, nil, level: 1) - - assert cache.get_and_update!(2, fn _ -> :pop end) == {4, nil} - refute cache.get!(2, nil, level: 1) - refute cache.get!(2, nil, level: 2) - refute cache.get!(2, nil, level: 3) - end - end - - describe "update/4" do - test "ok", %{cache: cache} do - assert cache.put(1, 1, level: 1) == :ok - assert cache.put(2, 2) == :ok - - assert cache.update!(1, 1, &(&1 * 2), level: 1) == 2 - assert cache.get!(1, nil, level: 1) == 2 - refute cache.get!(1, nil, level: 2) - refute cache.get!(1, nil, level: 3) - - assert cache.update!(2, 1, &(&1 * 2)) == 4 - assert cache.get!(2, nil, level: 1) == 4 - assert cache.get!(2, nil, level: 2) == 4 - assert cache.get!(2, nil, level: 3) == 4 - end - end - - describe "incr/3" do - test "ok", %{cache: cache} do - assert cache.incr!(1) == 1 - assert cache.get!(1, nil, level: 1) == 1 - assert cache.get!(1, nil, level: 2) == 1 - assert cache.get!(1, nil, level: 3) == 1 - - assert cache.incr!(2, 2, level: 2) == 2 - assert cache.get!(2, nil, level: 2) == 2 - refute cache.get!(2, nil, level: 1) - refute cache.get!(2, nil, level: 3) - - assert cache.incr!(3, 3) == 3 - assert cache.get!(3, nil, level: 1) == 3 - assert cache.get!(3, nil, level: 2) == 3 - assert cache.get!(3, nil, level: 3) == 3 - - assert cache.incr!(4, 5) == 5 - assert cache.incr!(4, -5) == 0 - assert cache.get!(4, nil, level: 1) == 0 - assert cache.get!(4, nil, level: 2) == 0 - assert cache.get!(4, nil, level: 3) == 0 - end - end - - describe "queryable:" do - test "all/2 and stream/2", %{cache: cache} do - for x <- 1..30, do: cache.put(x, x, level: 1) - for x <- 20..60, do: cache.put(x, x, level: 2) - for x <- 50..100, do: cache.put(x, x, level: 3) - - expected = :lists.usort(for x <- 1..100, do: x) - assert cache.all!() |> :lists.usort() == expected - - stream = cache.stream!() - - assert stream - |> Enum.to_list() - |> :lists.usort() == expected - - del = - for x <- 20..60 do - assert cache.delete(x) == :ok - x - end - - expected = :lists.usort(expected -- del) - assert cache.all!() |> :lists.usort() == expected - end - - test "delete_all/2", %{cache: cache} do - for x <- 1..30, do: cache.put(x, x, level: 1) - for x <- 21..60, do: cache.put(x, x, level: 2) - for x <- 51..100, do: cache.put(x, x, level: 3) - - assert count = cache.count_all!() - assert cache.delete_all!() == count - assert cache.all!() == [] - end - - test "count_all/2", %{cache: cache} do - assert cache.count_all!() == 0 - - for x <- 1..10, do: cache.put(x, x, level: 1) - for x <- 11..20, do: cache.put(x, x, level: 2) - for x <- 21..30, do: cache.put(x, x, level: 3) - - assert cache.count_all!() == 30 - - for x <- [1, 11, 21], do: cache.delete(x, level: 1) - - assert cache.count_all!() == 29 - - assert cache.delete(1, level: 1) == :ok - assert cache.delete(11, level: 2) == :ok - assert cache.delete(21, level: 3) == :ok - assert cache.count_all!() == 27 - end - end - end -end diff --git a/test/support/cache_case.ex b/test/support/cache_case.exs similarity index 100% rename from test/support/cache_case.ex rename to test/support/cache_case.exs diff --git a/test/support/cluster.ex b/test/support/cluster.ex deleted file mode 100644 index b9fe6dfb..00000000 --- a/test/support/cluster.ex +++ /dev/null @@ -1,89 +0,0 @@ -defmodule Nebulex.Cluster do - @moduledoc """ - Taken from `Phoenix.PubSub.Cluster`. - Copyright (c) 2014 Chris McCord - """ - - def spawn(nodes) do - # Turn node into a distributed node with the given long name - _ = :net_kernel.start([:"primary@127.0.0.1"]) - - # Allow spawned nodes to fetch all code from this node - _ = :erl_boot_server.start([]) - _ = allow_boot(to_charlist("127.0.0.1")) - - nodes - |> Enum.map(&Task.async(fn -> spawn_node(&1) end)) - |> Enum.map(&Task.await(&1, 30_000)) - end - - def spawn_node(node_host) do - {:ok, node} = start_peer(node_host) - - _ = add_code_paths(node) - _ = transfer_configuration(node) - _ = ensure_applications_started(node) - - {:ok, node} - end - - if Code.ensure_loaded?(:peer) do - defp start_peer(node_host) do - {:ok, _pid, node} = - :peer.start(%{ - name: node_name(node_host), - host: to_charlist("127.0.0.1"), - args: [inet_loader_args()] - }) - - {:ok, node} - end - else - defp start_peer(node_host) do - :slave.start(to_charlist("127.0.0.1"), node_name(node_host), inet_loader_args()) - end - end - - defp rpc(node, module, function, args) do - :rpc.block_call(node, module, function, args) - end - - defp inet_loader_args do - to_charlist("-loader inet -hosts 127.0.0.1 -setcookie #{:erlang.get_cookie()}") - end - - defp allow_boot(host) do - {:ok, ipv4} = :inet.parse_ipv4_address(host) - - :erl_boot_server.add_slave(ipv4) - end - - defp add_code_paths(node) do - rpc(node, :code, :add_paths, [:code.get_path()]) - end - - defp transfer_configuration(node) do - for {app_name, _, _} <- Application.loaded_applications() do - for {key, val} <- Application.get_all_env(app_name) do - rpc(node, Application, :put_env, [app_name, key, val]) - end - end - end - - defp ensure_applications_started(node) do - rpc(node, Application, :ensure_all_started, [:mix]) - rpc(node, Mix, :env, [Mix.env()]) - - for {app_name, _, _} <- Application.loaded_applications(), app_name not in [:dialyxir] do - rpc(node, Application, :ensure_all_started, [app_name]) - end - end - - defp node_name(node_host) do - node_host - |> to_string() - |> String.split("@") - |> Enum.at(0) - |> String.to_atom() - end -end diff --git a/test/support/fake_adapter.ex b/test/support/fake_adapter.exs similarity index 97% rename from test/support/fake_adapter.ex rename to test/support/fake_adapter.exs index 0adb9596..849f6cd8 100644 --- a/test/support/fake_adapter.ex +++ b/test/support/fake_adapter.exs @@ -8,12 +8,12 @@ defmodule Nebulex.FakeAdapter do @doc false def init(_opts) do - child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: {Agent, 1}) + child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: Agent) {:ok, child_spec, %{}} end - ## Nebulex.Adapter.Entry + ## Nebulex.Adapter.KV @doc false def fetch(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} diff --git a/test/support/node_case.ex b/test/support/node_case.ex deleted file mode 100644 index 7a1644dd..00000000 --- a/test/support/node_case.ex +++ /dev/null @@ -1,45 +0,0 @@ -defmodule Nebulex.NodeCase do - @moduledoc """ - Based on `Phoenix.PubSub.NodeCase`. - Copyright (c) 2014 Chris McCord - """ - - @timeout 5000 - - defmacro __using__(_opts) do - quote do - use ExUnit.Case, async: true - import unquote(__MODULE__) - - @moduletag :clustered - - @timeout unquote(@timeout) - end - end - - def start_caches(nodes, caches) do - for node <- nodes, {cache, opts} <- caches do - {:ok, pid} = start_cache(node, cache, opts) - - {node, cache, pid} - end - end - - def start_cache(node, cache, opts \\ []) do - rpc(node, cache, :start_link, [opts]) - end - - def stop_caches(node_pid_list) do - Enum.each(node_pid_list, fn {node, _cache, pid} -> - stop_cache(node, pid) - end) - end - - def stop_cache(node, pid) do - rpc(node, Supervisor, :stop, [pid, :normal, @timeout]) - end - - def rpc(node, module, function, args) do - :rpc.block_call(node, module, function, args) - end -end diff --git a/test/support/test_adapter.exs b/test/support/test_adapter.exs new file mode 100644 index 00000000..d495dbfe --- /dev/null +++ b/test/support/test_adapter.exs @@ -0,0 +1,465 @@ +defmodule Nebulex.TestAdapter do + @moduledoc """ + Adapter for testing purposes. + """ + + defmodule Entry do + @moduledoc false + + defstruct value: nil, touched: nil, exp: nil + + alias Nebulex.Time + + @doc false + def new(value, ttl \\ :infinity, touched \\ Time.now()) do + %__MODULE__{ + value: value, + touched: touched, + exp: exp(ttl) + } + end + + @doc false + def exp(now \\ Time.now(), ttl) + + def exp(_now, :infinity), do: :infinity + def exp(now, ttl), do: now + ttl + end + + # Provide Cache Implementation + @behaviour Nebulex.Adapter + @behaviour Nebulex.Adapter.KV + @behaviour Nebulex.Adapter.Queryable + + # Inherit default transaction implementation + use Nebulex.Adapter.Transaction + + # Inherit default persistence implementation + use Nebulex.Adapter.Persistence + + # Inherit default stats implementation + use Nebulex.Adapter.Stats + + use Nebulex.Cache.Options + + import Nebulex.Adapter, only: [defspan: 2] + import Nebulex.Helpers + + alias Nebulex.Adapter.Stats + alias __MODULE__.{Entry, KV} + alias Nebulex.Time + + ## Nebulex.Adapter + + @impl true + defmacro __before_compile__(_env), do: :ok + + @impl true + def init(opts) do + # Validate options + opts = validate!(opts) + + # Required options + telemetry = Keyword.fetch!(opts, :telemetry) + telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) + + # Init stats_counter + stats_counter = Stats.init(opts) + + # Adapter meta + metadata = %{ + telemetry: telemetry, + telemetry_prefix: telemetry_prefix, + stats_counter: stats_counter, + started_at: DateTime.utc_now() + } + + # KV server + child_spec = Supervisor.child_spec({KV, [adapter_meta: metadata] ++ opts}, id: KV) + + {:ok, child_spec, metadata} + end + + ## Nebulex.Adapter.KV + + @impl true + defspan fetch(adapter_meta, key, _opts) do + with {:ok, %Entry{value: value}} <- do_fetch(adapter_meta, key) do + {:ok, value} + end + end + + defp do_fetch(_adapter_meta, {:eval, fun}) do + fun.() + end + + defp do_fetch(adapter_meta, key) do + adapter_meta.pid + |> GenServer.call({:fetch, key}) + |> validate_ttl(key, adapter_meta) + end + + defp validate_ttl({:ok, %Entry{exp: :infinity} = entry}, _key, _adapter_meta) do + {:ok, entry} + end + + defp validate_ttl( + {:ok, %Entry{exp: exp} = entry}, + key, + %{ + name: name, + cache: cache, + pid: pid + } = adapter_meta + ) + when is_integer(exp) do + if Time.now() >= exp do + :ok = delete(adapter_meta, key, []) + + wrap_error Nebulex.KeyError, key: key, cache: name || {cache, pid}, reason: :expired + else + {:ok, entry} + end + end + + defp validate_ttl(:error, key, %{name: name, cache: cache, pid: pid}) do + wrap_error Nebulex.KeyError, key: key, cache: name || {cache, pid}, reason: :not_found + end + + @impl true + defspan get_all(adapter_meta, keys, opts) do + adapter_meta = %{adapter_meta | telemetry: Map.get(adapter_meta, :in_span?, false)} + + keys + |> Enum.reduce(%{}, fn key, acc -> + case fetch(adapter_meta, key, opts) do + {:ok, val} -> Map.put(acc, key, val) + {:error, _} -> acc + end + end) + |> wrap_ok() + end + + @impl true + defspan put(adapter_meta, key, value, ttl, on_write, _opts) do + do_put(adapter_meta.pid, key, Entry.new(value, ttl), on_write) + end + + defp do_put(pid, key, entry, :put) do + GenServer.call(pid, {:put, key, entry}) + end + + defp do_put(pid, key, entry, :put_new) do + GenServer.call(pid, {:put_new, key, entry}) + end + + defp do_put(pid, key, entry, :replace) do + GenServer.call(pid, {:replace, key, entry}) + end + + @impl true + defspan put_all(adapter_meta, entries, ttl, on_write, _opts) do + entries = for {key, value} <- entries, into: %{}, do: {key, Entry.new(value, ttl)} + + do_put_all(adapter_meta.pid, entries, on_write) + end + + defp do_put_all(pid, entries, :put) do + GenServer.call(pid, {:put_all, entries}) + end + + defp do_put_all(pid, entries, :put_new) do + GenServer.call(pid, {:put_new_all, entries}) + end + + @impl true + defspan delete(adapter_meta, key, _opts) do + GenServer.call(adapter_meta.pid, {:delete, key}) + end + + @impl true + defspan take(adapter_meta, key, _opts) do + with {:ok, %Entry{value: value}} <- + adapter_meta.pid + |> GenServer.call({:pop, key}) + |> validate_ttl(key, adapter_meta) do + {:ok, value} + end + end + + @impl true + defspan update_counter(adapter_meta, key, amount, ttl, default, _opts) do + _ = do_fetch(adapter_meta, key) + + GenServer.call( + adapter_meta.pid, + {:update_counter, key, amount, Entry.new(default + amount, ttl)} + ) + end + + @impl true + def has_key?(adapter_meta, key, _opts) do + case fetch(adapter_meta, key, []) do + {:ok, _} -> {:ok, true} + {:error, _} -> {:ok, false} + end + end + + @impl true + defspan ttl(adapter_meta, key, _opts) do + with {:ok, entry} <- do_fetch(adapter_meta, key) do + {:ok, entry_ttl(entry)} + end + end + + @impl true + defspan expire(adapter_meta, key, ttl, _opts) do + GenServer.call(adapter_meta.pid, {:expire, key, ttl}) + end + + @impl true + defspan touch(adapter_meta, key, _opts) do + GenServer.call(adapter_meta.pid, {:touch, key}) + end + + ## Nebulex.Adapter.Queryable + + @impl true + defspan execute(adapter_meta, operation, query, opts) do + GenServer.call(adapter_meta.pid, {:q, operation, query, opts}) + end + + @impl true + defspan stream(adapter_meta, query, opts) do + GenServer.call(adapter_meta.pid, {:q, :stream, query, opts}) + end + + ## Nebulex.Adapter.Persistence + + @impl true + defspan dump(adapter_meta, path, opts) do + super(adapter_meta, path, opts) + end + + @impl true + defspan load(adapter_meta, path, opts) do + super(adapter_meta, path, opts) + end + + ## Nebulex.Adapter.Transaction + + @impl true + defspan transaction(adapter_meta, opts, fun) do + super(adapter_meta, opts, fun) + end + + @impl true + defspan in_transaction?(adapter_meta) do + super(adapter_meta) + end + + ## Nebulex.Adapter.Stats + + @impl true + defspan stats(adapter_meta) do + with {:ok, %Nebulex.Stats{} = stats} <- super(adapter_meta) do + {:ok, %{stats | metadata: Map.put(stats.metadata, :started_at, adapter_meta.started_at)}} + end + end + + ## Helpers + + defp entry_ttl(%Entry{exp: :infinity}), do: :infinity + defp entry_ttl(%Entry{exp: exp}), do: exp - Time.now() +end + +defmodule Nebulex.TestAdapter.KV do + @moduledoc false + + use GenServer + + import Nebulex.Helpers, only: [wrap_error: 2] + + alias Nebulex.Telemetry + alias Nebulex.Telemetry.StatsHandler + alias Nebulex.TestAdapter.Entry + alias Nebulex.Time + + ## Internals + + # Internal state + defstruct map: nil, telemetry_prefix: nil, stats_counter: nil + + ## API + + @spec start_link(keyword) :: GenServer.on_start() + def start_link(opts) do + GenServer.start_link(__MODULE__, opts) + end + + ## GenServer callbacks + + @impl true + def init(opts) do + state = struct(__MODULE__, Keyword.get(opts, :adapter_meta, %{})) + + {:ok, %{state | map: %{}}, {:continue, :attach_stats_handler}} + end + + @impl true + def handle_continue(message, state) + + def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: nil} = state) do + {:noreply, state} + end + + def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: stats_counter} = state) do + _ = + Telemetry.attach_many( + stats_counter, + [state.telemetry_prefix ++ [:command, :stop]], + &StatsHandler.handle_event/4, + stats_counter + ) + + {:noreply, state} + end + + @impl true + def handle_call(request, from, state) + + def handle_call({:fetch, key}, _from, %__MODULE__{map: map} = state) do + {:reply, Map.fetch(map, key), state} + end + + def handle_call({:put, key, value}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, true}, %{state | map: Map.put(map, key, value)}} + end + + def handle_call({:put_new, key, value}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, false}, state} + + false -> + {:reply, {:ok, true}, %{state | map: Map.put_new(map, key, value)}} + end + end + + def handle_call({:replace, key, value}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.replace(map, key, value)}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:put_all, entries}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, true}, %{state | map: Map.merge(map, entries)}} + end + + def handle_call({:put_new_all, entries}, _from, %__MODULE__{map: map} = state) do + case Enum.any?(map, fn {k, _} -> Map.has_key?(entries, k) end) do + true -> + {:reply, {:ok, false}, state} + + false -> + {:reply, {:ok, true}, %{state | map: Map.merge(map, entries)}} + end + end + + def handle_call({:delete, key}, _from, %__MODULE__{map: map} = state) do + {:reply, :ok, %{state | map: Map.delete(map, key)}} + end + + def handle_call({:pop, key}, _from, %__MODULE__{map: map} = state) do + ref = make_ref() + + case Map.pop(map, key, ref) do + {^ref, _map} -> + {:reply, :error, state} + + {value, map} -> + {:reply, {:ok, value}, %{state | map: map}} + end + end + + def handle_call({:update_counter, key, amount, default}, _from, %__MODULE__{map: map} = state) do + case Map.fetch(map, key) do + {:ok, %{value: value}} when not is_integer(value) -> + error = wrap_error Nebulex.Error, reason: :badarith + + {:reply, error, map} + + _other -> + map = Map.update(map, key, default, &%{&1 | value: &1.value + amount}) + counter = Map.fetch!(map, key) + + {:reply, {:ok, counter.value}, %{state | map: map}} + end + end + + def handle_call({:expire, key, ttl}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.update!(map, key, &%{&1 | exp: Entry.exp(ttl)})}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:touch, key}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.update!(map, key, &%{&1 | touched: Time.now()})}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:q, :all, nil, opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, return(Enum, map, opts)}, state} + end + + def handle_call({:q, :count_all, nil, _opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, map_size(map)}, state} + end + + def handle_call({:q, :delete_all, nil, _opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, map_size(map)}, %{state | map: %{}}} + end + + def handle_call({:q, :stream, nil, opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, return(Stream, map, opts)}, state} + end + + def handle_call({:q, _op, query, _opts}, _from, %__MODULE__{} = state) do + error = wrap_error Nebulex.QueryError, message: "invalid query", query: query + + {:reply, error, state} + end + + ## Private Functions + + defp return(module, map, opts) do + case Keyword.get(opts, :return, :key) do + :key -> + module.map(map, fn {k, _e} -> k end) + + :value -> + module.map(map, fn {_k, e} -> e.value end) + + {:key, :value} -> + module.map(map, fn {k, e} -> {k, e.value} end) + + :entry -> + module.map(map, fn {k, e} -> + %Nebulex.Entry{key: k, value: e.value, touched: e.touched, exp: e.exp} + end) + end + end +end diff --git a/test/support/test_cache.ex b/test/support/test_cache.exs similarity index 51% rename from test/support/test_cache.ex rename to test/support/test_cache.exs index 61c23e87..fb342142 100644 --- a/test/support/test_cache.ex +++ b/test/support/test_cache.exs @@ -18,90 +18,16 @@ defmodule Nebulex.TestCache do @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter use Nebulex.TestCache.Common end - defmodule Partitioned do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - - use Nebulex.TestCache.Common - end - - defmodule Replicated do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - - use Nebulex.TestCache.Common - end - - defmodule Multilevel do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - - defmodule L3 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - end - defmodule StatsCache do @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - - defmodule L4 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end + adapter: Nebulex.TestAdapter end ## Mocks @@ -109,7 +35,7 @@ defmodule Nebulex.TestCache do defmodule AdapterMock do @moduledoc false @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable @impl true @@ -194,20 +120,4 @@ defmodule Nebulex.TestCache do @impl true def stream(_, _, _), do: {:ok, 1..10} end - - defmodule PartitionedMock do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.TestCache.AdapterMock - end - - defmodule ReplicatedMock do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Nebulex.TestCache.AdapterMock - end end diff --git a/test/test_helper.exs b/test/test_helper.exs index 46ce2590..2adc0d1f 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,36 +1,31 @@ -# Mocks -[ - Nebulex.TestCache.Multilevel.L1, - Nebulex.TestCache.StatsCache.L1, - Nebulex.Cache.Registry, - Nebulex.Cache.Cluster, - Nebulex.RPC, - Mix.Project -] -|> Enum.each(&Mimic.copy/1) - -# Start Telemetry -_ = Application.start(:telemetry) - -# Set nodes -nodes = [:"node1@127.0.0.1", :"node2@127.0.0.1", :"node3@127.0.0.1", :"node4@127.0.0.1"] -:ok = Application.put_env(:nebulex, :nodes, nodes) +# Load support modules +Code.require_file("support/test_adapter.exs", __DIR__) +Code.require_file("support/fake_adapter.exs", __DIR__) +Code.require_file("support/test_cache.exs", __DIR__) +Code.require_file("support/cache_case.exs", __DIR__) -# Load shared tests +# Load shared test cases for file <- File.ls!("test/shared/cache") do Code.require_file("./shared/cache/" <> file, __DIR__) end +# Load shared test cases for file <- File.ls!("test/shared"), not File.dir?("test/shared/" <> file) do Code.require_file("./shared/" <> file, __DIR__) end -# Spawn remote nodes -unless :clustered in Keyword.get(ExUnit.configuration(), :exclude, []) do - Nebulex.Cluster.spawn(nodes) -end +# Mocks +[ + Mix.Project, + Nebulex.Cache.Registry +] +|> Enum.each(&Mimic.copy/1) + +# Start Telemetry +_ = Application.start(:telemetry) -# For mix tests +# For tasks/generators testing +Mix.start() Mix.shell(Mix.Shell.Process) # Start ExUnit