1import type {
2 Exchange,
3 Operation,
4 OperationResult,
5 RequestPolicy,
6 CacheOutcome,
7} from '@urql/core';
8import { formatDocument, makeOperation } from '@urql/core';
9
10import type { Source } from 'wonka';
11import {
12 filter,
13 map,
14 merge,
15 pipe,
16 share,
17 fromArray,
18 mergeMap,
19 empty,
20} from 'wonka';
21
22import { _query } from './operations/query';
23import { _write } from './operations/write';
24import { addMetadata, toRequestPolicy } from './helpers/operation';
25import { filterVariables, getMainOperation } from './ast';
26import { Store } from './store/store';
27import type { Data, Dependencies, CacheExchangeOpts } from './types';
28
29import {
30 initDataState,
31 clearDataState,
32 noopDataState,
33 hydrateData,
34 reserveLayer,
35 hasLayer,
36} from './store/data';
37
38interface OperationResultWithMeta extends Partial<OperationResult> {
39 operation: Operation;
40 outcome: CacheOutcome;
41 dependencies: Dependencies;
42 hasNext: boolean;
43}
44
45type Operations = Set<number>;
46type OperationMap = Map<number, Operation>;
47type ResultMap = Map<number, Data | null>;
48type OptimisticDependencies = Map<number, Dependencies>;
49type DependentOperations = Map<string, Operations>;
50
51/** Exchange factory that creates a normalized cache exchange.
52 *
53 * @param opts - A {@link CacheExchangeOpts} configuration object.
54 * @returns the created normalized cache {@link Exchange}.
55 *
56 * @remarks
57 * Graphcache is a normalized cache, enabled by using the `cacheExchange`
58 * in place of `@urql/core`’s. A normalized GraphQL cache uses typenames
59 * and key fields in the result to share a single copy for each unique
60 * entity across all queries.
61 *
62 * The `cacheExchange` may be passed a {@link CacheExchangeOpts} object
63 * to define custom resolvers, custom updates for mutations,
64 * optimistic updates, or to add custom key fields per type.
65 *
66 * @see {@link https://urql.dev/goto/docs/graphcache} for the full Graphcache docs.
67 */
68export const cacheExchange =
69 <C extends Partial<CacheExchangeOpts>>(opts?: C): Exchange =>
70 ({ forward, client, dispatchDebug }) => {
71 const store = new Store<C>(opts);
72
73 if (opts && opts.storage) {
74 store.data.hydrating = true;
75 opts.storage.readData().then(entries => {
76 hydrateData(store.data, opts!.storage!, entries);
77 if (opts.storage!.onCacheHydrated) opts.storage!.onCacheHydrated();
78 });
79 }
80
81 const optimisticKeysToDependencies: OptimisticDependencies = new Map();
82 const mutationResultBuffer: OperationResult[] = [];
83 const operations: OperationMap = new Map();
84 const results: ResultMap = new Map();
85 const blockedDependencies: Dependencies = new Set();
86 const requestedRefetch: Operations = new Set();
87 const deps: DependentOperations = new Map();
88
89 let reexecutingOperations: Operations = new Set();
90 let dependentOperations: Operations = new Set();
91
92 const isBlockedByOptimisticUpdate = (
93 dependencies: Dependencies
94 ): boolean => {
95 for (const dep of dependencies.values())
96 if (blockedDependencies.has(dep)) return true;
97 return false;
98 };
99
100 const collectPendingOperations = (
101 pendingOperations: Operations,
102 dependencies: undefined | Dependencies
103 ) => {
104 if (dependencies) {
105 // Collect operations that will be updated due to cache changes
106 for (const dep of dependencies.values()) {
107 const keys = deps.get(dep);
108 if (keys) for (const key of keys.values()) pendingOperations.add(key);
109 }
110 }
111 };
112
113 const executePendingOperations = (
114 operation: Operation,
115 pendingOperations: Operations,
116 isOptimistic: boolean
117 ) => {
118 // Reexecute collected operations and delete them from the mapping
119 for (const key of pendingOperations.values()) {
120 if (key !== operation.key) {
121 const op = operations.get(key);
122 if (op) {
123 // Collect all dependent operations if the reexecuting operation is a query
124 if (operation.kind === 'query') dependentOperations.add(key);
125 let policy: RequestPolicy = 'cache-first';
126 if (requestedRefetch.has(key)) {
127 requestedRefetch.delete(key);
128 policy = 'cache-and-network';
129 }
130 client.reexecuteOperation(toRequestPolicy(op, policy));
131 }
132 }
133 }
134
135 if (!isOptimistic) {
136 // Upon completion, all dependent operations become reexecuting operations, preventing
137 // them from reexecuting prior operations again, causing infinite loops
138 const _reexecutingOperations = reexecutingOperations;
139 reexecutingOperations = dependentOperations;
140 if (operation.kind === 'query') {
141 reexecutingOperations.add(operation.key);
142 }
143 (dependentOperations = _reexecutingOperations).clear();
144 }
145 };
146
147 // This registers queries with the data layer to ensure commutativity
148 const prepareForwardedOperation = (operation: Operation) => {
149 let optimistic = false;
150 if (operation.kind === 'query') {
151 // Pre-reserve the position of the result layer
152 reserveLayer(store.data, operation.key);
153 operations.set(operation.key, operation);
154 } else if (operation.kind === 'teardown') {
155 // Delete reference to operation if any exists to release it
156 operations.delete(operation.key);
157 results.delete(operation.key);
158 reexecutingOperations.delete(operation.key);
159 // Mark operation layer as done
160 noopDataState(store.data, operation.key);
161 return operation;
162 } else if (
163 operation.kind === 'mutation' &&
164 operation.context.requestPolicy !== 'network-only'
165 ) {
166 operations.set(operation.key, operation);
167 // This executes an optimistic update for mutations and registers it if necessary
168 initDataState('write', store.data, operation.key, true, false);
169 const { dependencies } = _write(
170 store,
171 operation as any,
172 undefined,
173 undefined
174 );
175 clearDataState();
176 if (dependencies.size) {
177 // Update blocked optimistic dependencies
178 for (const dep of dependencies.values()) blockedDependencies.add(dep);
179 // Store optimistic dependencies for update
180 optimisticKeysToDependencies.set(operation.key, dependencies);
181 // Update related queries
182 const pendingOperations: Operations = new Set();
183 collectPendingOperations(pendingOperations, dependencies);
184 executePendingOperations(operation, pendingOperations, true);
185 // Mark operation as optimistic
186 optimistic = true;
187 }
188 }
189
190 return makeOperation(
191 operation.kind,
192 {
193 key: operation.key,
194 query: formatDocument(operation.query),
195 variables: operation.variables
196 ? filterVariables(
197 getMainOperation(operation.query),
198 operation.variables
199 )
200 : operation.variables,
201 },
202 { ...operation.context, optimistic }
203 );
204 };
205
206 // This updates the known dependencies for the passed operation
207 const updateDependencies = (op: Operation, dependencies: Dependencies) => {
208 for (const dep of dependencies.values()) {
209 let depOps = deps.get(dep);
210 if (!depOps) deps.set(dep, (depOps = new Set()));
211 depOps.add(op.key);
212 }
213 };
214
215 // Retrieves a query result from cache and adds an `isComplete` hint
216 // This hint indicates whether the result is "complete" or not
217 const operationResultFromCache = (
218 operation: Operation
219 ): OperationResultWithMeta => {
220 initDataState('read', store.data, undefined, false, false);
221 const result = _query(
222 store,
223 operation,
224 results.get(operation.key),
225 undefined
226 );
227 clearDataState();
228 const cacheOutcome: CacheOutcome = result.data
229 ? !result.partial && !result.hasNext
230 ? 'hit'
231 : 'partial'
232 : 'miss';
233
234 results.set(operation.key, result.data);
235 operations.set(operation.key, operation);
236 updateDependencies(operation, result.dependencies);
237
238 return {
239 outcome: cacheOutcome,
240 operation,
241 data: result.data,
242 dependencies: result.dependencies,
243 hasNext: result.hasNext,
244 };
245 };
246
247 // Take any OperationResult and update the cache with it
248 const updateCacheWithResult = (
249 result: OperationResult,
250 pendingOperations: Operations
251 ): OperationResult => {
252 // Retrieve the original operation to get unfiltered variables
253 const operation =
254 operations.get(result.operation.key) || result.operation;
255 if (operation.kind === 'mutation') {
256 // Collect previous dependencies that have been written for optimistic updates
257 const dependencies = optimisticKeysToDependencies.get(operation.key);
258 collectPendingOperations(pendingOperations, dependencies);
259 optimisticKeysToDependencies.delete(operation.key);
260 }
261
262 if (operation.kind === 'subscription' || result.hasNext)
263 reserveLayer(store.data, operation.key, true);
264
265 let queryDependencies: undefined | Dependencies;
266 let data: Data | null = result.data;
267 if (data) {
268 // Write the result to cache and collect all dependencies that need to be
269 // updated
270 initDataState('write', store.data, operation.key, false, false);
271 const writeDependencies = _write(
272 store,
273 operation,
274 data,
275 result.error
276 ).dependencies;
277 clearDataState();
278 collectPendingOperations(pendingOperations, writeDependencies);
279 const prevData =
280 operation.kind === 'query' ? results.get(operation.key) : null;
281 initDataState(
282 'read',
283 store.data,
284 operation.key,
285 false,
286 prevData !== data
287 );
288 const queryResult = _query(
289 store,
290 operation,
291 prevData || data,
292 result.error
293 );
294 clearDataState();
295 data = queryResult.data;
296 if (operation.kind === 'query') {
297 // Collect the query's dependencies for future pending operation updates
298 queryDependencies = queryResult.dependencies;
299 collectPendingOperations(pendingOperations, queryDependencies);
300 results.set(operation.key, data);
301 }
302 } else {
303 noopDataState(store.data, operation.key);
304 }
305
306 // Update this operation's dependencies if it's a query
307 if (queryDependencies) {
308 updateDependencies(result.operation, queryDependencies);
309 }
310
311 return {
312 operation,
313 data,
314 error: result.error,
315 extensions: result.extensions,
316 hasNext: result.hasNext,
317 stale: result.stale,
318 };
319 };
320
321 return operations$ => {
322 // Filter by operations that are cacheable and attempt to query them from the cache
323 const cacheOps$ = pipe(
324 operations$,
325 filter(
326 op =>
327 op.kind === 'query' && op.context.requestPolicy !== 'network-only'
328 ),
329 map(operationResultFromCache),
330 share
331 );
332
333 const nonCacheOps$ = pipe(
334 operations$,
335 filter(
336 op =>
337 op.kind !== 'query' || op.context.requestPolicy === 'network-only'
338 )
339 );
340
341 // Rebound operations that are incomplete, i.e. couldn't be queried just from the cache
342 const cacheMissOps$ = pipe(
343 cacheOps$,
344 filter(
345 res =>
346 res.outcome === 'miss' &&
347 res.operation.context.requestPolicy !== 'cache-only' &&
348 !isBlockedByOptimisticUpdate(res.dependencies) &&
349 !reexecutingOperations.has(res.operation.key)
350 ),
351 map(res => {
352 dispatchDebug({
353 type: 'cacheMiss',
354 message: 'The result could not be retrieved from the cache',
355 operation: res.operation,
356 });
357 return addMetadata(res.operation, { cacheOutcome: 'miss' });
358 })
359 );
360
361 // Resolve OperationResults that the cache was able to assemble completely and trigger
362 // a network request if the current operation's policy is cache-and-network
363 const cacheResult$ = pipe(
364 cacheOps$,
365 filter(
366 res =>
367 res.outcome !== 'miss' ||
368 res.operation.context.requestPolicy === 'cache-only'
369 ),
370 map((res: OperationResultWithMeta): OperationResult => {
371 const { requestPolicy } = res.operation.context;
372
373 // We reexecute requests marked as `cache-and-network`, and partial responses,
374 // if we wouldn't cause a request loop
375 const shouldReexecute =
376 requestPolicy !== 'cache-only' &&
377 (res.hasNext ||
378 requestPolicy === 'cache-and-network' ||
379 (requestPolicy === 'cache-first' &&
380 res.outcome === 'partial' &&
381 !reexecutingOperations.has(res.operation.key)));
382 // Set stale to true anyway, even if the reexecute will be blocked, if the operation
383 // is in progress. We can be reasonably sure of that if a layer has been reserved for it.
384 const stale =
385 requestPolicy !== 'cache-only' &&
386 (shouldReexecute ||
387 (res.outcome === 'partial' &&
388 reexecutingOperations.has(res.operation.key) &&
389 hasLayer(store.data, res.operation.key)));
390
391 const result: OperationResult = {
392 operation: addMetadata(res.operation, {
393 cacheOutcome: res.outcome,
394 }),
395 data: res.data,
396 error: res.error,
397 extensions: res.extensions,
398 stale: stale && !res.hasNext,
399 hasNext: shouldReexecute && res.hasNext,
400 };
401
402 if (!shouldReexecute) {
403 /*noop*/
404 } else if (!isBlockedByOptimisticUpdate(res.dependencies)) {
405 client.reexecuteOperation(
406 toRequestPolicy(
407 operations.get(res.operation.key) || res.operation,
408 'network-only'
409 )
410 );
411 } else if (requestPolicy === 'cache-and-network') {
412 requestedRefetch.add(res.operation.key);
413 }
414
415 dispatchDebug({
416 type: 'cacheHit',
417 message: `A requested operation was found and returned from the cache.`,
418 operation: res.operation,
419 data: {
420 value: result,
421 },
422 });
423
424 return result;
425 })
426 );
427
428 // Forward operations that aren't cacheable and rebound operations
429 // Also update the cache with any network results
430 const result$ = pipe(
431 merge([nonCacheOps$, cacheMissOps$]),
432 map(prepareForwardedOperation),
433 forward
434 );
435
436 // Results that can immediately be resolved
437 const nonOptimisticResults$ = pipe(
438 result$,
439 filter(
440 result => !optimisticKeysToDependencies.has(result.operation.key)
441 ),
442 map(result => {
443 const pendingOperations: Operations = new Set();
444 // Update the cache with the incoming API result
445 const cacheResult = updateCacheWithResult(result, pendingOperations);
446 // Execute all dependent queries
447 executePendingOperations(result.operation, pendingOperations, false);
448 return cacheResult;
449 })
450 );
451
452 // Prevent mutations that were previously optimistic from being flushed
453 // immediately and instead clear them out slowly
454 const optimisticMutationCompletion$ = pipe(
455 result$,
456 filter(result =>
457 optimisticKeysToDependencies.has(result.operation.key)
458 ),
459 mergeMap((result: OperationResult): Source<OperationResult> => {
460 const length = mutationResultBuffer.push(result);
461 if (length < optimisticKeysToDependencies.size) {
462 return empty;
463 }
464
465 for (let i = 0; i < mutationResultBuffer.length; i++) {
466 reserveLayer(store.data, mutationResultBuffer[i].operation.key);
467 }
468
469 blockedDependencies.clear();
470
471 const results: OperationResult[] = [];
472 const pendingOperations: Operations = new Set();
473
474 let bufferedResult: OperationResult | void;
475 while ((bufferedResult = mutationResultBuffer.shift()))
476 results.push(
477 updateCacheWithResult(bufferedResult, pendingOperations)
478 );
479
480 // Execute all dependent queries as a single batch
481 executePendingOperations(result.operation, pendingOperations, false);
482
483 return fromArray(results);
484 })
485 );
486
487 return merge([
488 nonOptimisticResults$,
489 optimisticMutationCompletion$,
490 cacheResult$,
491 ]);
492 };
493 };