diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d74dff42da..ed6335b7b9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,9 @@ - `InMemoryCache` now _guarantees_ that any two result objects returned by the cache (from `readQuery`, `readFragment`, etc.) will be referentially equal (`===`) if they are deeply equal. Previously, `===` equality was often achievable for results for the same query, on a best-effort basis. Now, equivalent result objects will be automatically shared among the result trees of completely different queries. This guarantee is important for taking full advantage of optimistic updates that correctly guess the final data, and for "pure" UI components that can skip re-rendering when their input data are unchanged.
[@benjamn](https://github.com/benjamn) in [#7439](https://github.com/apollographql/apollo-client/pull/7439) +- In addition to `read` and `merge` functions, `InMemoryCache` field policies may now configure a `drop` function, which will be called just before the field in question is removed from the cache, facilitating any final field cleanup that may be needed by `read` and `merge`.
+ [@benjamn](https://github.com/benjamn) in [#8078](https://github.com/apollographql/apollo-client/pull/8078) + - `InMemoryCache` supports a new method called `batch`, which is similar to `performTransaction` but takes named options rather than positional parameters. One of these named options is an `onDirty(watch, diff)` callback, which can be used to determine which watched queries were invalidated by the `batch` operation.
[@benjamn](https://github.com/benjamn) in [#7819](https://github.com/apollographql/apollo-client/pull/7819) diff --git a/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap index 296372a0cd1..77a4add9107 100644 --- a/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap +++ b/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap @@ -1211,6 +1211,86 @@ Object { } `; +exports[`type policies field policies custom field policy drop functions are called if merge function returns undefined 1`] = ` +Object { + "ROOT_QUERY": Object { + "__typename": "Query", + "todoList": Object { + "__ref": "ToDoList:{}", + }, + }, + "Task:{\\"taskID\\":1}": Object { + "__typename": "Task", + "taskID": 1, + "text": "task #1", + }, + "Task:{\\"taskID\\":2}": Object { + "__typename": "Task", + "taskID": 2, + "text": "task #2", + }, + "ToDoList:{}": Object { + "__typename": "ToDoList", + "tasks": Array [ + Object { + "__ref": "Task:{\\"taskID\\":1}", + }, + Object { + "__ref": "Task:{\\"taskID\\":2}", + }, + ], + }, +} +`; + +exports[`type policies field policies custom field policy drop functions are called if merge function returns undefined 2`] = ` +Object { + "ROOT_QUERY": Object { + "__typename": "Query", + "todoList": Object { + "__ref": "ToDoList:{}", + }, + }, + "Task:{\\"taskID\\":1}": Object { + "__typename": "Task", + "taskID": 1, + "text": "task #1", + }, + "Task:{\\"taskID\\":2}": Object { + "__typename": "Task", + "taskID": 2, + "text": "task #2", + }, + "Task:{\\"taskID\\":3}": Object { + "__typename": "Task", + "taskID": 3, + "text": "task #3", + }, + "Task:{\\"taskID\\":4}": Object { + "__typename": "Task", + "taskID": 4, + "text": "task #4", + }, + "ToDoList:{}": Object { + "__typename": "ToDoList", + "tasks": Array [ + Object { + "__ref": "Task:{\\"taskID\\":1}", + }, + Object { + "__ref": "Task:{\\"taskID\\":2}", + }, + Object { + "__ref": "Task:{\\"taskID\\":3}", + }, + Object { + "__ref": "Task:{\\"taskID\\":4}", + }, + ], + }, +} +`; + exports[`type policies field policies read, merge, and modify functions can access options.storage 1`] = ` Object { "ROOT_QUERY": Object { diff --git a/src/cache/inmemory/__tests__/policies.ts b/src/cache/inmemory/__tests__/policies.ts index 0c7267553a2..665f1ec337a 100644 --- a/src/cache/inmemory/__tests__/policies.ts +++ b/src/cache/inmemory/__tests__/policies.ts @@ -1705,6 +1705,308 @@ describe("type policies", function () { expect(cache.extract()).toMatchSnapshot(); }); + describe("custom field policy drop functions", function () { + const makeCache = (resolve: () => void) => new InMemoryCache({ + typePolicies: { + Parent: { + keyFields: false, + fields: { + deleteMe: { + read(existing, { storage }) { + expect(existing).toBe("merged value"); + expect(storage.cached).toBe(existing); + return "read value"; + }, + merge(existing, incoming, { storage }) { + expect(existing).toBeUndefined(); + expect(incoming).toBe("initial value"); + return storage.cached = "merged value"; + }, + drop(existing, { storage }) { + expect(existing).toBe("merged value"); + expect(storage.cached).toBe(existing); + delete storage.cached; + // Finish the test (success). + resolve(); + }, + }, + }, + }, + }, + }); + + const query = gql` + query { + parent { + deleteMe @client + } + } + `; + + function testWriteAndRead(cache: InMemoryCache) { + cache.writeQuery({ + query, + data: { + parent: { + __typename: "Parent", + deleteMe: "initial value", + }, + }, + }); + + expect(cache.extract()).toEqual({ + ROOT_QUERY: { + __typename: "Query", + parent: { + __typename: "Parent", + deleteMe: "merged value", + }, + }, + }); + + expect(cache.readQuery({ query })).toEqual({ + parent: { + __typename: "Parent", + deleteMe: "read value", + }, + }); + } + + itAsync("are called when a parent object is evicted from the cache", resolve => { + const cache = makeCache(resolve); + testWriteAndRead(cache); + + const evicted = cache.evict({ + // Note that we're removing Query.parent, not directly removing + // Parent.deleteMe, but we still expect the Parent.deleteMe drop + // function to be called. + fieldName: "parent", + }); + expect(evicted).toBe(true); + }); + + itAsync("are called when cache.modify causes the parent object to lose fields", resolve => { + const cache = makeCache(resolve); + testWriteAndRead(cache); + + const modified = cache.modify({ + fields: { + parent(value: StoreObject) { + const { deleteMe, ...rest } = value; + expect(rest).toEqual({ + __typename: "Parent", + }); + return rest; + }, + }, + }); + expect(modified).toBe(true); + }); + + itAsync("are called even if cache is cleared/restored", resolve => { + const cache = makeCache(resolve); + testWriteAndRead(cache); + + const snapshot = cache.extract(); + cache.reset(); + expect(cache.extract()).toEqual({}); + cache.restore(snapshot); + expect(cache.extract()).toEqual(snapshot); + + cache.writeQuery({ + query, + overwrite: true, + data: { + parent: { + __typename: "Parent", + deleteMe: void 0, + }, + }, + }); + }); + + itAsync("are called if merge function returns undefined", resolve => { + const cache = new InMemoryCache({ + typePolicies: { + ToDoList: { + keyFields: [], + fields: { + tasks: { + keyArgs: false, + + merge(existing: number[] | undefined, incoming: number[], { args }) { + if (args && args.deleteOnMerge) return; + return existing ? [ + ...existing, + ...incoming, + ] : incoming; + }, + + drop(existing) { + expect(existing).toEqual([ + { __ref: 'Task:{"taskID":1}' }, + { __ref: 'Task:{"taskID":2}' }, + { __ref: 'Task:{"taskID":3}' }, + { __ref: 'Task:{"taskID":4}' }, + ]); + // Finish the test (success). + resolve(); + }, + }, + }, + }, + + Task: { + keyFields: ["taskID"], + }, + }, + }); + + const query = gql` + query { + todoList { + tasks { + taskID + text + } + } + } + `; + + const deleteQuery = gql` + query { + todoList { + tasks(deleteOnMerge: true) { + taskID + text + } + } + } + `; + + const deleteData = { + todoList: { + __typename: "ToDoList", + tasks: [], + }, + }; + + // This write will cause the merge function to return undefined, but + // since the field is already undefined, the undefined return from the + // merge function should not trigger the drop function. + cache.writeQuery({ + query: deleteQuery, + data: deleteData, + }); + + cache.writeQuery({ + query, + data: { + todoList: { + __typename: "ToDoList", + tasks: [ + { __typename: "Task", taskID: 1, text: "task #1" }, + { __typename: "Task", taskID: 2, text: "task #2" }, + ], + }, + }, + }); + + expect(cache.extract()).toMatchSnapshot(); + + cache.writeQuery({ + query, + data: { + todoList: { + __typename: "ToDoList", + tasks: [ + { __typename: "Task", taskID: 3, text: "task #3" }, + { __typename: "Task", taskID: 4, text: "task #4" }, + ], + }, + }, + }); + + expect(cache.extract()).toMatchSnapshot(); + + // Since the ToDoList.tasks field has data now, this deletion should + // trigger the drop function, unlike the last time we used deleteQuery. + cache.writeQuery({ + query: deleteQuery, + data: deleteData, + }); + }); + + itAsync("are called for fields within garbage collected objects", (resolve, reject) => { + const cache = new InMemoryCache({ + typePolicies: { + Garbage: { + keyFields: ["gid"], + fields: { + isToxic: { + drop(isToxic: boolean, { readField }) { + const gid = readField("gid")!; + if (expectedToxicities.has(gid)) { + expect(expectedToxicities.get(gid)).toBe(isToxic); + if (expectedToxicities.delete(gid) && + expectedToxicities.size === 0) { + resolve(); + } + } else { + reject(`unexpectedly dropped garbage ${gid}`); + } + }, + }, + }, + }, + }, + }); + + const expectedToxicities = new Map(); + expectedToxicities.set(234, true); + expectedToxicities.set(456, false); + + const query = gql` + query { + garbages { + gid + isToxic + } + } + `; + + cache.writeQuery({ + query, + data: { + garbages: [ + { __typename: "Garbage", gid: 123, isToxic: false }, + { __typename: "Garbage", gid: 234, isToxic: true }, + { __typename: "Garbage", gid: 345, isToxic: true }, + { __typename: "Garbage", gid: 456, isToxic: false }, + ], + }, + }); + + expect(cache.gc()).toEqual([]); + + cache.writeQuery({ + query, + overwrite: true, + data: { + garbages: [ + { __typename: "Garbage", gid: 123, isToxic: false }, + { __typename: "Garbage", gid: 345, isToxic: true }, + ], + }, + }); + + expect(cache.gc().sort()).toEqual([ + 'Garbage:{"gid":234}', + 'Garbage:{"gid":456}', + ]); + }); + }); + it("merge functions can deduplicate items using readField", function () { const cache = new InMemoryCache({ typePolicies: { diff --git a/src/cache/inmemory/entityStore.ts b/src/cache/inmemory/entityStore.ts index 894069fe75a..558a41d7351 100644 --- a/src/cache/inmemory/entityStore.ts +++ b/src/cache/inmemory/entityStore.ts @@ -13,8 +13,8 @@ import { maybeDeepFreeze, canUseWeakMap, } from '../../utilities'; -import { NormalizedCache, NormalizedCacheObject } from './types'; -import { hasOwn, fieldNameFromStoreName } from './helpers'; +import { NormalizedCache, NormalizedCacheObject, ReadMergeModifyContext } from './types'; +import { hasOwn, fieldNameFromStoreName, storeValueIsStoreObject } from './helpers'; import { Policies, StorageType } from './policies'; import { Cache } from '../core/types/Cache'; import { @@ -99,7 +99,7 @@ export abstract class EntityStore implements NormalizedCache { older: string | StoreObject, newer: StoreObject | string, ): void { - let dataId: string | undefined; + let dataId: string; const existing: StoreObject | undefined = typeof older === "string" @@ -116,6 +116,7 @@ export abstract class EntityStore implements NormalizedCache { if (!incoming) return; invariant( + // @ts-ignore typeof dataId === "string", "store.merge expects a string ID", ); @@ -123,10 +124,6 @@ export abstract class EntityStore implements NormalizedCache { const merged: StoreObject = new DeepMerger(storeObjectReconciler).merge(existing, incoming); - // Even if merged === existing, existing may have come from a lower - // layer, so we always need to set this.data[dataId] on this level. - this.data[dataId] = merged; - if (merged !== existing) { delete this.refs[dataId]; if (this.group.caching) { @@ -157,13 +154,6 @@ export abstract class EntityStore implements NormalizedCache { !this.policies.hasKeyArgs(merged.__typename, fieldName)) { fieldsToDirty[fieldName] = 1; } - - // If merged[storeFieldName] has become undefined, and this is the - // Root layer, actually delete the property from the merged object, - // which is guaranteed to have been created fresh in this method. - if (merged[storeFieldName] === void 0 && !(this instanceof Layer)) { - delete merged[storeFieldName]; - } } }); @@ -178,9 +168,110 @@ export abstract class EntityStore implements NormalizedCache { } Object.keys(fieldsToDirty).forEach( - fieldName => this.group.dirty(dataId as string, fieldName)); + fieldName => this.group.dirty(dataId, fieldName)); + } + + // Make sure we have a (string | number)[] path for every object in the + // merged object tree, including non-normalized non-Reference objects that + // are embedded/nested within normalized parent objects. The path of such + // objects will be an array starting with the string ID of the closest + // enclosing entity object, followed by the string and number properties + // that lead from the entity to the nested object within it. + this.group.assignPaths(dataId, merged); + + if (existing) { + // Collect objects and field names removed by this merge, so we can run + // drop functions configured for the fields that are about to removed + // (before we finally set this.data[dataId] = merged, below). + const drops: [StoreObject, string][] = []; + const empty: StoreObject | any[] = Object.create(null); + const isLayer = this instanceof Layer; + const haveAnyDropFunctions = this.policies.dropCount > 0; + + // This function object is created only if we have any drop functions. + const scanOldDataForFieldsToDrop = haveAnyDropFunctions ? ( + oldVal: StoreValue, + newVal: StoreValue | undefined, + ) => { + if (oldVal === newVal) return; + + if (Array.isArray(oldVal)) { + const newArray: any[] = + Array.isArray(newVal) ? newVal : empty as any[]; + + (oldVal as StoreValue[]).forEach((oldChild, i) => { + scanOldDataForFieldsToDrop!(oldChild, newArray[i]); + }); + + } else if (storeValueIsStoreObject(oldVal)) { + const newObject: StoreObject = + storeValueIsStoreObject(newVal) ? newVal : empty as StoreObject; + + Object.keys(oldVal).forEach(storeFieldName => { + const oldChild = oldVal[storeFieldName]; + const newChild = newObject[storeFieldName]; + + // Visit children before running dropField for eChild. + scanOldDataForFieldsToDrop!(oldChild, newChild); + + if (newChild === void 0) { + drops.push([oldVal, storeFieldName]); + } + }); + } + } : void 0; + + // To detect field removals (in order to run drop functions), we can + // restrict our attention to the incoming fields, since those are the + // top-level fields that might have changed. + Object.keys(incoming).forEach(storeFieldName => { + // Although we're using the keys from incoming, we want to compare + // existing data to merged data, since the merged data have a much + // better chance of being partly === to the existing data, whereas + // incoming tends to be all fresh objects. + const newFieldValue = merged[storeFieldName]; + + // No point scanning the existing data for fields with drop functions + // if we happen to know the Policies object has no drop functions. + if (haveAnyDropFunctions) { + scanOldDataForFieldsToDrop!( + existing[storeFieldName], + newFieldValue, + ); + } + + if (newFieldValue === void 0) { + drops.push([existing, storeFieldName]); + + // If merged[storeFieldName] has become undefined, and this is the + // Root layer, actually delete the property from the merged object, + // which is guaranteed to have been created fresh in store.merge. + if (hasOwn.call(merged, storeFieldName) && + merged[storeFieldName] === void 0 && + !isLayer) { + delete merged[storeFieldName]; + } + } + }); + + if (haveAnyDropFunctions && drops.length) { + const context: ReadMergeModifyContext = { store: this }; + + drops.forEach(([storeObject, storeFieldName]) => { + this.policies.dropField( + storeObject.__typename, + storeObject, + storeFieldName, + context, + ); + }); + } } } + + // Even if merged === existing, existing may have come from a lower + // layer, so we always need to set this.data[dataId] on this level. + this.data[dataId] = merged; } public modify( @@ -225,7 +316,10 @@ export abstract class EntityStore implements NormalizedCache { ...sharedDetails, fieldName, storeFieldName, - storage: this.getStorage(dataId, storeFieldName), + storage: this.group.getStorage( + makeReference(dataId), + storeFieldName, + ), }); if (newValue === INVALIDATE) { this.group.dirty(dataId, storeFieldName); @@ -352,11 +446,6 @@ export abstract class EntityStore implements NormalizedCache { return this; } - public abstract getStorage( - idOrObj: string | StoreObject, - ...storeFieldNames: (string | number)[] - ): StorageType; - // Maps root entity IDs to the number of times they have been retained, minus // the number of times they have been released. Retained entities keep other // entities they reference (even indirectly) from being garbage collected. @@ -449,7 +538,8 @@ export abstract class EntityStore implements NormalizedCache { // Used to compute cache keys specific to this.group. public makeCacheKey(...args: any[]): object; public makeCacheKey() { - return this.group.keyMaker.lookupArray(arguments); + const found = this.group.keyMaker.lookupArray(arguments); + return found.cacheKey || (found.cacheKey = Object.create(null)); } // Bound function that can be passed around to provide easy access to fields @@ -549,9 +639,98 @@ class CacheGroup { } } + // This WeakMap maps every non-normalized object reference contained by the + // store to the path of that object within the enclosing entity object. This + // information is collected by the assignPaths method after every store.merge, + // so store.data should never contain any un-pathed objects. As a reminder, + // these object references are handled immutably from here on, so the objects + // should not move around in a way that invalidates these paths. This path + // information is useful in the getStorage method, below. + private paths = new WeakMap(); + private pending = new Map(); + + public assignPaths(dataId: string, merged: StoreObject) { + // Since path assignment can be expensive, this method merely registers the + // pending assignment and returns immediately. The private assign method + // will be called with these arguments only if needed by getStorage. + this.pending.set(dataId, merged); + } + + private assign(dataId: string, merged: StoreObject) { + const paths = this.paths; + const path: (string | number)[] = [dataId]; + + function assign(this: void, obj: StoreValue) { + if (Array.isArray(obj)) { + obj.forEach(handleChild); + } else if (storeValueIsStoreObject(obj) && !paths.has(obj)) { + Object.keys(obj).forEach(handleObjectProperty, obj); + } + } + + function handleObjectProperty(this: StoreObject, storeFieldName: string) { + const child = this[storeFieldName]; + handleChild(child, storeFieldName); + } + + function handleChild(child: StoreValue, key: string | number) { + if (storeValueIsStoreObject(child)) { + if (paths.has(child)) return; + paths.set(child, path.concat(key)); + } + try { + path.push(key); + assign(child); + } finally { + invariant(path.pop() === key); + } + } + + assign(merged); + } + + public getStorage( + parentObjOrRef: StoreObject | Reference, + ...pathSuffix: (string | number)[] + ) { + const path: any[] = []; + const push = (key: StoreObject | string | number) => path.push(key); + + if (isReference(parentObjOrRef)) { + push(parentObjOrRef.__ref); + } else while (true) { + // See assignPaths to understand how this map is populated. + const assignedPath = this.paths.get(parentObjOrRef); + if (assignedPath) { + assignedPath.forEach(push); + break; + } + if (this.pending.size) { + // If we didn't find a path for this StoreObject, but we have some + // pending path assignments to do, do those assignments and try again. + this.pending.forEach((object, dataId) => this.assign(dataId, object)); + this.pending.clear(); + } else { + // If we can't find a path for this object, use the object reference + // itself as a key. + push(parentObjOrRef); + break; + } + } + + // Append the provided suffix to the path array. + pathSuffix.forEach(push); + + const found = this.keyMaker.lookupArray(path); + return found.storage || (found.storage = Object.create(null)); + } + // Used by the EntityStore#makeCacheKey method to compute cache keys // specific to this CacheGroup. - public readonly keyMaker = new Trie(canUseWeakMap); + public readonly keyMaker = new Trie<{ + cacheKey?: object; + storage?: StorageType; + }>(canUseWeakMap); } function makeDepKey(dataId: string, storeFieldName: string) { @@ -593,11 +772,6 @@ export namespace EntityStore { // Never remove the root layer. return this; } - - public readonly storageTrie = new Trie(canUseWeakMap); - public getStorage(): StorageType { - return this.storageTrie.lookupArray(arguments); - } } } @@ -662,12 +836,6 @@ class Layer extends EntityStore { ...super.findChildRefIds(dataId), } : fromParent; } - - public getStorage(): StorageType { - let p: EntityStore = this.parent; - while ((p as Layer).parent) p = (p as Layer).parent; - return p.getStorage.apply(p, arguments); - } } // Represents a Layer permanently installed just above the Root, which allows @@ -702,10 +870,11 @@ class Stump extends Layer { function storeObjectReconciler( existingObject: StoreObject, incomingObject: StoreObject, - property: string, + storeFieldName: string, ): StoreValue { - const existingValue = existingObject[property]; - const incomingValue = incomingObject[property]; + const existingValue = existingObject[storeFieldName]; + const incomingValue = incomingObject[storeFieldName]; + // Wherever there is a key collision, prefer the incoming value, unless // it is deeply equal to the existing value. It's worth checking deep // equality here (even though blindly returning incoming would be diff --git a/src/cache/inmemory/policies.ts b/src/cache/inmemory/policies.ts index f0a92829522..3df85b6b94f 100644 --- a/src/cache/inmemory/policies.ts +++ b/src/cache/inmemory/policies.ts @@ -47,6 +47,7 @@ import { CanReadFunction, } from '../core/types/common'; import { WriteContext } from './writeToStore'; +import { EntityStore } from './entityStore'; export type TypePolicies = { [__typename: string]: TypePolicy; @@ -131,6 +132,7 @@ export type FieldPolicy< keyArgs?: KeySpecifier | KeyArgsFunction | false; read?: FieldReadFunction; merge?: FieldMergeFunction | boolean; + drop?: FieldDropFunction; }; export type StorageType = Record; @@ -221,6 +223,11 @@ export type FieldMergeFunction = ( options: FieldFunctionOptions, ) => SafeReadonly; +export type FieldDropFunction = ( + existing: SafeReadonly | undefined, + options: FieldFunctionOptions, +) => void; + export const defaultDataIdFromObject = ( { __typename, id, _id }: Readonly, context?: KeyFieldsContext, @@ -256,6 +263,9 @@ export type PossibleTypesMap = { [supertype: string]: string[]; }; +type InternalTypePolicy = Policies["typePolicies"][string]; +type InternalFieldPolicy = InternalTypePolicy["fields"][string]; + export class Policies { private typePolicies: { [__typename: string]: { @@ -266,6 +276,7 @@ export class Policies { keyFn?: KeyArgsFunction; read?: FieldReadFunction; merge?: FieldMergeFunction; + drop?: FieldDropFunction; }; }; }; @@ -441,7 +452,7 @@ export class Policies { if (typeof incoming === "function") { existing.read = incoming; } else { - const { keyArgs, read, merge } = incoming; + const { keyArgs, read, merge, drop } = incoming; existing.keyFn = // Pass false to disable argument-based differentiation of @@ -460,6 +471,11 @@ export class Policies { } setMerge(existing, merge); + + if (typeof drop === "function") { + existing.drop = drop; + ++(this as { dropCount: number }).dropCount; + } } if (existing.read && existing.merge) { @@ -511,7 +527,7 @@ export class Policies { }); } - private getTypePolicy(typename: string): Policies["typePolicies"][string] { + private getTypePolicy(typename: string): InternalTypePolicy { if (!hasOwn.call(this.typePolicies, typename)) { const policy: Policies["typePolicies"][string] = this.typePolicies[typename] = Object.create(null); @@ -560,11 +576,7 @@ export class Policies { typename: string | undefined, fieldName: string, createIfMissing: boolean, - ): { - keyFn?: KeyArgsFunction; - read?: FieldReadFunction; - merge?: FieldMergeFunction; - } | undefined { + ): InternalFieldPolicy | undefined { if (typename) { const fieldPolicies = this.getTypePolicy(typename).fields; return fieldPolicies[fieldName] || ( @@ -753,12 +765,8 @@ export class Policies { objectOrReference, options, context, - context.store.getStorage( - isReference(objectOrReference) - ? objectOrReference.__ref - : objectOrReference, - storeFieldName, - ), + (context.store as EntityStore).group + .getStorage(objectOrReference, storeFieldName), ); // Call read(existing, readOptions) with cacheSlot holding this.cache. @@ -772,20 +780,45 @@ export class Policies { return existing; } + public readonly dropCount = 0; + + public dropField( + typename: string | undefined, + objectOrReference: StoreObject | Reference, + storeFieldName: string, + context: ReadMergeModifyContext, + ) { + if (this.dropCount) { + const fieldName = fieldNameFromStoreName(storeFieldName); + const policy = this.getFieldPolicy(typename, fieldName, false); + const drop = policy && policy.drop; + if (drop) { + drop( + context.store.getFieldValue(objectOrReference, storeFieldName), + // TODO Consolidate this code with similiar code in readField? + makeFieldFunctionOptions( + this, + objectOrReference, + { typename, fieldName }, + context, + (context.store as EntityStore).group + .getStorage(objectOrReference, storeFieldName), + ), + ); + } + } + } + public getMergeFunction( parentTypename: string | undefined, fieldName: string, childTypename: string | undefined, ): FieldMergeFunction | undefined { - let policy: - | Policies["typePolicies"][string] - | Policies["typePolicies"][string]["fields"][string] - | undefined = - this.getFieldPolicy(parentTypename, fieldName, false); - let merge = policy && policy.merge; + const fieldPolicy = this.getFieldPolicy(parentTypename, fieldName, false); + let merge = fieldPolicy && fieldPolicy.merge; if (!merge && childTypename) { - policy = this.getTypePolicy(childTypename); - merge = policy && policy.merge; + const typePolicy = this.getTypePolicy(childTypename); + merge = typePolicy && typePolicy.merge; } return merge; } diff --git a/src/cache/inmemory/types.ts b/src/cache/inmemory/types.ts index 13959a0c3c2..e1584ac2b79 100644 --- a/src/cache/inmemory/types.ts +++ b/src/cache/inmemory/types.ts @@ -7,7 +7,7 @@ import { Reference, } from '../../utilities'; import { FieldValueGetter } from './entityStore'; -import { KeyFieldsFunction, StorageType, FieldMergeFunction } from './policies'; +import { KeyFieldsFunction, FieldMergeFunction } from './policies'; import { Modifier, Modifiers, @@ -68,11 +68,6 @@ export interface NormalizedCache { getFieldValue: FieldValueGetter; toReference: ToReferenceFunction; canRead: CanReadFunction; - - getStorage( - idOrObj: string | StoreObject, - ...storeFieldNames: (string | number)[] - ): StorageType; } /** diff --git a/src/cache/inmemory/writeToStore.ts b/src/cache/inmemory/writeToStore.ts index 091e47884db..22dbe9e55d9 100644 --- a/src/cache/inmemory/writeToStore.ts +++ b/src/cache/inmemory/writeToStore.ts @@ -24,11 +24,13 @@ import { import { NormalizedCache, ReadMergeModifyContext, MergeTree } from './types'; import { makeProcessedFieldsMerger, fieldNameFromStoreName, storeValueIsStoreObject } from './helpers'; +import { EntityStore } from './entityStore'; import { StoreReader } from './readFromStore'; import { InMemoryCache } from './inMemoryCache'; -import { EntityStore } from './entityStore'; import { Cache } from '../../core'; +type GetStorageParams = Parameters; + export interface WriteContext extends ReadMergeModifyContext { readonly written: { [dataId: string]: SelectionSetNode[]; @@ -343,7 +345,7 @@ export class StoreWriter { existing: StoreValue, incoming: T, context: WriteContext, - getStorageArgs?: Parameters, + getStorageArgs?: GetStorageParams, ): T { if (mergeTree.map.size && !isReference(incoming)) { const e: StoreObject | Reference | undefined = ( @@ -367,7 +369,7 @@ export class StoreWriter { // sequence of storeFieldName strings/numbers identifying the nested // field name path of each field value to be merged. if (e && !getStorageArgs) { - getStorageArgs = [isReference(e) ? e.__ref : e]; + getStorageArgs = [e]; } // It's possible that applying merge functions to this subtree will @@ -423,7 +425,9 @@ export class StoreWriter { incoming, mergeTree.info, context, - getStorageArgs && context.store.getStorage(...getStorageArgs), + getStorageArgs && ( + context.store as EntityStore + ).group.getStorage(...getStorageArgs), ); }