diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9d74dff42da..ed6335b7b9f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -31,6 +31,9 @@
- `InMemoryCache` now _guarantees_ that any two result objects returned by the cache (from `readQuery`, `readFragment`, etc.) will be referentially equal (`===`) if they are deeply equal. Previously, `===` equality was often achievable for results for the same query, on a best-effort basis. Now, equivalent result objects will be automatically shared among the result trees of completely different queries. This guarantee is important for taking full advantage of optimistic updates that correctly guess the final data, and for "pure" UI components that can skip re-rendering when their input data are unchanged.
[@benjamn](https://github.com/benjamn) in [#7439](https://github.com/apollographql/apollo-client/pull/7439)
+- In addition to `read` and `merge` functions, `InMemoryCache` field policies may now configure a `drop` function, which will be called just before the field in question is removed from the cache, facilitating any final field cleanup that may be needed by `read` and `merge`.
+ [@benjamn](https://github.com/benjamn) in [#8078](https://github.com/apollographql/apollo-client/pull/8078)
+
- `InMemoryCache` supports a new method called `batch`, which is similar to `performTransaction` but takes named options rather than positional parameters. One of these named options is an `onDirty(watch, diff)` callback, which can be used to determine which watched queries were invalidated by the `batch` operation.
[@benjamn](https://github.com/benjamn) in [#7819](https://github.com/apollographql/apollo-client/pull/7819)
diff --git a/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap
index 296372a0cd1..77a4add9107 100644
--- a/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap
+++ b/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap
@@ -1211,6 +1211,86 @@ Object {
}
`;
+exports[`type policies field policies custom field policy drop functions are called if merge function returns undefined 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "todoList": Object {
+ "__ref": "ToDoList:{}",
+ },
+ },
+ "Task:{\\"taskID\\":1}": Object {
+ "__typename": "Task",
+ "taskID": 1,
+ "text": "task #1",
+ },
+ "Task:{\\"taskID\\":2}": Object {
+ "__typename": "Task",
+ "taskID": 2,
+ "text": "task #2",
+ },
+ "ToDoList:{}": Object {
+ "__typename": "ToDoList",
+ "tasks": Array [
+ Object {
+ "__ref": "Task:{\\"taskID\\":1}",
+ },
+ Object {
+ "__ref": "Task:{\\"taskID\\":2}",
+ },
+ ],
+ },
+}
+`;
+
+exports[`type policies field policies custom field policy drop functions are called if merge function returns undefined 2`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "todoList": Object {
+ "__ref": "ToDoList:{}",
+ },
+ },
+ "Task:{\\"taskID\\":1}": Object {
+ "__typename": "Task",
+ "taskID": 1,
+ "text": "task #1",
+ },
+ "Task:{\\"taskID\\":2}": Object {
+ "__typename": "Task",
+ "taskID": 2,
+ "text": "task #2",
+ },
+ "Task:{\\"taskID\\":3}": Object {
+ "__typename": "Task",
+ "taskID": 3,
+ "text": "task #3",
+ },
+ "Task:{\\"taskID\\":4}": Object {
+ "__typename": "Task",
+ "taskID": 4,
+ "text": "task #4",
+ },
+ "ToDoList:{}": Object {
+ "__typename": "ToDoList",
+ "tasks": Array [
+ Object {
+ "__ref": "Task:{\\"taskID\\":1}",
+ },
+ Object {
+ "__ref": "Task:{\\"taskID\\":2}",
+ },
+ Object {
+ "__ref": "Task:{\\"taskID\\":3}",
+ },
+ Object {
+ "__ref": "Task:{\\"taskID\\":4}",
+ },
+ ],
+ },
+}
+`;
+
exports[`type policies field policies read, merge, and modify functions can access options.storage 1`] = `
Object {
"ROOT_QUERY": Object {
diff --git a/src/cache/inmemory/__tests__/policies.ts b/src/cache/inmemory/__tests__/policies.ts
index 0c7267553a2..665f1ec337a 100644
--- a/src/cache/inmemory/__tests__/policies.ts
+++ b/src/cache/inmemory/__tests__/policies.ts
@@ -1705,6 +1705,308 @@ describe("type policies", function () {
expect(cache.extract()).toMatchSnapshot();
});
+ describe("custom field policy drop functions", function () {
+ const makeCache = (resolve: () => void) => new InMemoryCache({
+ typePolicies: {
+ Parent: {
+ keyFields: false,
+ fields: {
+ deleteMe: {
+ read(existing, { storage }) {
+ expect(existing).toBe("merged value");
+ expect(storage.cached).toBe(existing);
+ return "read value";
+ },
+ merge(existing, incoming, { storage }) {
+ expect(existing).toBeUndefined();
+ expect(incoming).toBe("initial value");
+ return storage.cached = "merged value";
+ },
+ drop(existing, { storage }) {
+ expect(existing).toBe("merged value");
+ expect(storage.cached).toBe(existing);
+ delete storage.cached;
+ // Finish the test (success).
+ resolve();
+ },
+ },
+ },
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ parent {
+ deleteMe @client
+ }
+ }
+ `;
+
+ function testWriteAndRead(cache: InMemoryCache) {
+ cache.writeQuery({
+ query,
+ data: {
+ parent: {
+ __typename: "Parent",
+ deleteMe: "initial value",
+ },
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ parent: {
+ __typename: "Parent",
+ deleteMe: "merged value",
+ },
+ },
+ });
+
+ expect(cache.readQuery({ query })).toEqual({
+ parent: {
+ __typename: "Parent",
+ deleteMe: "read value",
+ },
+ });
+ }
+
+ itAsync("are called when a parent object is evicted from the cache", resolve => {
+ const cache = makeCache(resolve);
+ testWriteAndRead(cache);
+
+ const evicted = cache.evict({
+ // Note that we're removing Query.parent, not directly removing
+ // Parent.deleteMe, but we still expect the Parent.deleteMe drop
+ // function to be called.
+ fieldName: "parent",
+ });
+ expect(evicted).toBe(true);
+ });
+
+ itAsync("are called when cache.modify causes the parent object to lose fields", resolve => {
+ const cache = makeCache(resolve);
+ testWriteAndRead(cache);
+
+ const modified = cache.modify({
+ fields: {
+ parent(value: StoreObject) {
+ const { deleteMe, ...rest } = value;
+ expect(rest).toEqual({
+ __typename: "Parent",
+ });
+ return rest;
+ },
+ },
+ });
+ expect(modified).toBe(true);
+ });
+
+ itAsync("are called even if cache is cleared/restored", resolve => {
+ const cache = makeCache(resolve);
+ testWriteAndRead(cache);
+
+ const snapshot = cache.extract();
+ cache.reset();
+ expect(cache.extract()).toEqual({});
+ cache.restore(snapshot);
+ expect(cache.extract()).toEqual(snapshot);
+
+ cache.writeQuery({
+ query,
+ overwrite: true,
+ data: {
+ parent: {
+ __typename: "Parent",
+ deleteMe: void 0,
+ },
+ },
+ });
+ });
+
+ itAsync("are called if merge function returns undefined", resolve => {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ ToDoList: {
+ keyFields: [],
+ fields: {
+ tasks: {
+ keyArgs: false,
+
+ merge(existing: number[] | undefined, incoming: number[], { args }) {
+ if (args && args.deleteOnMerge) return;
+ return existing ? [
+ ...existing,
+ ...incoming,
+ ] : incoming;
+ },
+
+ drop(existing) {
+ expect(existing).toEqual([
+ { __ref: 'Task:{"taskID":1}' },
+ { __ref: 'Task:{"taskID":2}' },
+ { __ref: 'Task:{"taskID":3}' },
+ { __ref: 'Task:{"taskID":4}' },
+ ]);
+ // Finish the test (success).
+ resolve();
+ },
+ },
+ },
+ },
+
+ Task: {
+ keyFields: ["taskID"],
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ todoList {
+ tasks {
+ taskID
+ text
+ }
+ }
+ }
+ `;
+
+ const deleteQuery = gql`
+ query {
+ todoList {
+ tasks(deleteOnMerge: true) {
+ taskID
+ text
+ }
+ }
+ }
+ `;
+
+ const deleteData = {
+ todoList: {
+ __typename: "ToDoList",
+ tasks: [],
+ },
+ };
+
+ // This write will cause the merge function to return undefined, but
+ // since the field is already undefined, the undefined return from the
+ // merge function should not trigger the drop function.
+ cache.writeQuery({
+ query: deleteQuery,
+ data: deleteData,
+ });
+
+ cache.writeQuery({
+ query,
+ data: {
+ todoList: {
+ __typename: "ToDoList",
+ tasks: [
+ { __typename: "Task", taskID: 1, text: "task #1" },
+ { __typename: "Task", taskID: 2, text: "task #2" },
+ ],
+ },
+ },
+ });
+
+ expect(cache.extract()).toMatchSnapshot();
+
+ cache.writeQuery({
+ query,
+ data: {
+ todoList: {
+ __typename: "ToDoList",
+ tasks: [
+ { __typename: "Task", taskID: 3, text: "task #3" },
+ { __typename: "Task", taskID: 4, text: "task #4" },
+ ],
+ },
+ },
+ });
+
+ expect(cache.extract()).toMatchSnapshot();
+
+ // Since the ToDoList.tasks field has data now, this deletion should
+ // trigger the drop function, unlike the last time we used deleteQuery.
+ cache.writeQuery({
+ query: deleteQuery,
+ data: deleteData,
+ });
+ });
+
+ itAsync("are called for fields within garbage collected objects", (resolve, reject) => {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Garbage: {
+ keyFields: ["gid"],
+ fields: {
+ isToxic: {
+ drop(isToxic: boolean, { readField }) {
+ const gid = readField("gid")!;
+ if (expectedToxicities.has(gid)) {
+ expect(expectedToxicities.get(gid)).toBe(isToxic);
+ if (expectedToxicities.delete(gid) &&
+ expectedToxicities.size === 0) {
+ resolve();
+ }
+ } else {
+ reject(`unexpectedly dropped garbage ${gid}`);
+ }
+ },
+ },
+ },
+ },
+ },
+ });
+
+ const expectedToxicities = new Map();
+ expectedToxicities.set(234, true);
+ expectedToxicities.set(456, false);
+
+ const query = gql`
+ query {
+ garbages {
+ gid
+ isToxic
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ garbages: [
+ { __typename: "Garbage", gid: 123, isToxic: false },
+ { __typename: "Garbage", gid: 234, isToxic: true },
+ { __typename: "Garbage", gid: 345, isToxic: true },
+ { __typename: "Garbage", gid: 456, isToxic: false },
+ ],
+ },
+ });
+
+ expect(cache.gc()).toEqual([]);
+
+ cache.writeQuery({
+ query,
+ overwrite: true,
+ data: {
+ garbages: [
+ { __typename: "Garbage", gid: 123, isToxic: false },
+ { __typename: "Garbage", gid: 345, isToxic: true },
+ ],
+ },
+ });
+
+ expect(cache.gc().sort()).toEqual([
+ 'Garbage:{"gid":234}',
+ 'Garbage:{"gid":456}',
+ ]);
+ });
+ });
+
it("merge functions can deduplicate items using readField", function () {
const cache = new InMemoryCache({
typePolicies: {
diff --git a/src/cache/inmemory/entityStore.ts b/src/cache/inmemory/entityStore.ts
index 894069fe75a..558a41d7351 100644
--- a/src/cache/inmemory/entityStore.ts
+++ b/src/cache/inmemory/entityStore.ts
@@ -13,8 +13,8 @@ import {
maybeDeepFreeze,
canUseWeakMap,
} from '../../utilities';
-import { NormalizedCache, NormalizedCacheObject } from './types';
-import { hasOwn, fieldNameFromStoreName } from './helpers';
+import { NormalizedCache, NormalizedCacheObject, ReadMergeModifyContext } from './types';
+import { hasOwn, fieldNameFromStoreName, storeValueIsStoreObject } from './helpers';
import { Policies, StorageType } from './policies';
import { Cache } from '../core/types/Cache';
import {
@@ -99,7 +99,7 @@ export abstract class EntityStore implements NormalizedCache {
older: string | StoreObject,
newer: StoreObject | string,
): void {
- let dataId: string | undefined;
+ let dataId: string;
const existing: StoreObject | undefined =
typeof older === "string"
@@ -116,6 +116,7 @@ export abstract class EntityStore implements NormalizedCache {
if (!incoming) return;
invariant(
+ // @ts-ignore
typeof dataId === "string",
"store.merge expects a string ID",
);
@@ -123,10 +124,6 @@ export abstract class EntityStore implements NormalizedCache {
const merged: StoreObject =
new DeepMerger(storeObjectReconciler).merge(existing, incoming);
- // Even if merged === existing, existing may have come from a lower
- // layer, so we always need to set this.data[dataId] on this level.
- this.data[dataId] = merged;
-
if (merged !== existing) {
delete this.refs[dataId];
if (this.group.caching) {
@@ -157,13 +154,6 @@ export abstract class EntityStore implements NormalizedCache {
!this.policies.hasKeyArgs(merged.__typename, fieldName)) {
fieldsToDirty[fieldName] = 1;
}
-
- // If merged[storeFieldName] has become undefined, and this is the
- // Root layer, actually delete the property from the merged object,
- // which is guaranteed to have been created fresh in this method.
- if (merged[storeFieldName] === void 0 && !(this instanceof Layer)) {
- delete merged[storeFieldName];
- }
}
});
@@ -178,9 +168,110 @@ export abstract class EntityStore implements NormalizedCache {
}
Object.keys(fieldsToDirty).forEach(
- fieldName => this.group.dirty(dataId as string, fieldName));
+ fieldName => this.group.dirty(dataId, fieldName));
+ }
+
+ // Make sure we have a (string | number)[] path for every object in the
+ // merged object tree, including non-normalized non-Reference objects that
+ // are embedded/nested within normalized parent objects. The path of such
+ // objects will be an array starting with the string ID of the closest
+ // enclosing entity object, followed by the string and number properties
+ // that lead from the entity to the nested object within it.
+ this.group.assignPaths(dataId, merged);
+
+ if (existing) {
+ // Collect objects and field names removed by this merge, so we can run
+ // drop functions configured for the fields that are about to removed
+ // (before we finally set this.data[dataId] = merged, below).
+ const drops: [StoreObject, string][] = [];
+ const empty: StoreObject | any[] = Object.create(null);
+ const isLayer = this instanceof Layer;
+ const haveAnyDropFunctions = this.policies.dropCount > 0;
+
+ // This function object is created only if we have any drop functions.
+ const scanOldDataForFieldsToDrop = haveAnyDropFunctions ? (
+ oldVal: StoreValue,
+ newVal: StoreValue | undefined,
+ ) => {
+ if (oldVal === newVal) return;
+
+ if (Array.isArray(oldVal)) {
+ const newArray: any[] =
+ Array.isArray(newVal) ? newVal : empty as any[];
+
+ (oldVal as StoreValue[]).forEach((oldChild, i) => {
+ scanOldDataForFieldsToDrop!(oldChild, newArray[i]);
+ });
+
+ } else if (storeValueIsStoreObject(oldVal)) {
+ const newObject: StoreObject =
+ storeValueIsStoreObject(newVal) ? newVal : empty as StoreObject;
+
+ Object.keys(oldVal).forEach(storeFieldName => {
+ const oldChild = oldVal[storeFieldName];
+ const newChild = newObject[storeFieldName];
+
+ // Visit children before running dropField for eChild.
+ scanOldDataForFieldsToDrop!(oldChild, newChild);
+
+ if (newChild === void 0) {
+ drops.push([oldVal, storeFieldName]);
+ }
+ });
+ }
+ } : void 0;
+
+ // To detect field removals (in order to run drop functions), we can
+ // restrict our attention to the incoming fields, since those are the
+ // top-level fields that might have changed.
+ Object.keys(incoming).forEach(storeFieldName => {
+ // Although we're using the keys from incoming, we want to compare
+ // existing data to merged data, since the merged data have a much
+ // better chance of being partly === to the existing data, whereas
+ // incoming tends to be all fresh objects.
+ const newFieldValue = merged[storeFieldName];
+
+ // No point scanning the existing data for fields with drop functions
+ // if we happen to know the Policies object has no drop functions.
+ if (haveAnyDropFunctions) {
+ scanOldDataForFieldsToDrop!(
+ existing[storeFieldName],
+ newFieldValue,
+ );
+ }
+
+ if (newFieldValue === void 0) {
+ drops.push([existing, storeFieldName]);
+
+ // If merged[storeFieldName] has become undefined, and this is the
+ // Root layer, actually delete the property from the merged object,
+ // which is guaranteed to have been created fresh in store.merge.
+ if (hasOwn.call(merged, storeFieldName) &&
+ merged[storeFieldName] === void 0 &&
+ !isLayer) {
+ delete merged[storeFieldName];
+ }
+ }
+ });
+
+ if (haveAnyDropFunctions && drops.length) {
+ const context: ReadMergeModifyContext = { store: this };
+
+ drops.forEach(([storeObject, storeFieldName]) => {
+ this.policies.dropField(
+ storeObject.__typename,
+ storeObject,
+ storeFieldName,
+ context,
+ );
+ });
+ }
}
}
+
+ // Even if merged === existing, existing may have come from a lower
+ // layer, so we always need to set this.data[dataId] on this level.
+ this.data[dataId] = merged;
}
public modify(
@@ -225,7 +316,10 @@ export abstract class EntityStore implements NormalizedCache {
...sharedDetails,
fieldName,
storeFieldName,
- storage: this.getStorage(dataId, storeFieldName),
+ storage: this.group.getStorage(
+ makeReference(dataId),
+ storeFieldName,
+ ),
});
if (newValue === INVALIDATE) {
this.group.dirty(dataId, storeFieldName);
@@ -352,11 +446,6 @@ export abstract class EntityStore implements NormalizedCache {
return this;
}
- public abstract getStorage(
- idOrObj: string | StoreObject,
- ...storeFieldNames: (string | number)[]
- ): StorageType;
-
// Maps root entity IDs to the number of times they have been retained, minus
// the number of times they have been released. Retained entities keep other
// entities they reference (even indirectly) from being garbage collected.
@@ -449,7 +538,8 @@ export abstract class EntityStore implements NormalizedCache {
// Used to compute cache keys specific to this.group.
public makeCacheKey(...args: any[]): object;
public makeCacheKey() {
- return this.group.keyMaker.lookupArray(arguments);
+ const found = this.group.keyMaker.lookupArray(arguments);
+ return found.cacheKey || (found.cacheKey = Object.create(null));
}
// Bound function that can be passed around to provide easy access to fields
@@ -549,9 +639,98 @@ class CacheGroup {
}
}
+ // This WeakMap maps every non-normalized object reference contained by the
+ // store to the path of that object within the enclosing entity object. This
+ // information is collected by the assignPaths method after every store.merge,
+ // so store.data should never contain any un-pathed objects. As a reminder,
+ // these object references are handled immutably from here on, so the objects
+ // should not move around in a way that invalidates these paths. This path
+ // information is useful in the getStorage method, below.
+ private paths = new WeakMap