initial update
This commit is contained in:
1
node_modules/.bin/acorn
generated
vendored
Symbolic link
1
node_modules/.bin/acorn
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../acorn/bin/acorn
|
||||
1
node_modules/.bin/apollo-pbjs
generated
vendored
Symbolic link
1
node_modules/.bin/apollo-pbjs
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../@apollo/protobufjs/bin/pbjs
|
||||
1
node_modules/.bin/apollo-pbts
generated
vendored
Symbolic link
1
node_modules/.bin/apollo-pbts
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../@apollo/protobufjs/bin/pbts
|
||||
1
node_modules/.bin/loose-envify
generated
vendored
Symbolic link
1
node_modules/.bin/loose-envify
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../loose-envify/cli.js
|
||||
1
node_modules/.bin/mime
generated
vendored
Symbolic link
1
node_modules/.bin/mime
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../mime/cli.js
|
||||
1
node_modules/.bin/sha.js
generated
vendored
Symbolic link
1
node_modules/.bin/sha.js
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../sha.js/bin.js
|
||||
1
node_modules/.bin/ts-node
generated
vendored
Symbolic link
1
node_modules/.bin/ts-node
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../ts-node/dist/bin.js
|
||||
1
node_modules/.bin/ts-node-cwd
generated
vendored
Symbolic link
1
node_modules/.bin/ts-node-cwd
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../ts-node/dist/bin-cwd.js
|
||||
1
node_modules/.bin/ts-node-esm
generated
vendored
Symbolic link
1
node_modules/.bin/ts-node-esm
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../ts-node/dist/bin-esm.js
|
||||
1
node_modules/.bin/ts-node-script
generated
vendored
Symbolic link
1
node_modules/.bin/ts-node-script
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../ts-node/dist/bin-script.js
|
||||
1
node_modules/.bin/ts-node-transpile-only
generated
vendored
Symbolic link
1
node_modules/.bin/ts-node-transpile-only
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../ts-node/dist/bin-transpile.js
|
||||
1
node_modules/.bin/ts-script
generated
vendored
Symbolic link
1
node_modules/.bin/ts-script
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../ts-node/dist/bin-script-deprecated.js
|
||||
1
node_modules/.bin/tsc
generated
vendored
Symbolic link
1
node_modules/.bin/tsc
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../typescript/bin/tsc
|
||||
1
node_modules/.bin/tsserver
generated
vendored
Symbolic link
1
node_modules/.bin/tsserver
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../typescript/bin/tsserver
|
||||
1
node_modules/.bin/uuid
generated
vendored
Symbolic link
1
node_modules/.bin/uuid
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../uuid/dist/bin/uuid
|
||||
1996
node_modules/.package-lock.json
generated
vendored
Normal file
1996
node_modules/.package-lock.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
21
node_modules/@apollo/cache-control-types/LICENSE
generated
vendored
Normal file
21
node_modules/@apollo/cache-control-types/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Apollo Graph, Inc. (Formerly Meteor Development Group, Inc.)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
6
node_modules/@apollo/cache-control-types/README.md
generated
vendored
Normal file
6
node_modules/@apollo/cache-control-types/README.md
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# Cache Control types
|
||||
|
||||
This package exports various TypeScript types related to Apollo Server's cache
|
||||
policy calculation.
|
||||
|
||||
Specifically, it gives a type-safe way to get the `info.cacheControl` field in resolvers. Either declare your resolver's `info` argument to be of type `GraphQLResolveInfoWithCacheControl` (perhaps with the graphql-code-generator typescript-resolvers customResolveInfo option), or use the `maybeCacheControlFromInfo` or `cacheControlFromInfo` functions to extract `info.cacheControl` in a type-safe way.
|
||||
22
node_modules/@apollo/cache-control-types/dist/cjs/index.d.ts
generated
vendored
Normal file
22
node_modules/@apollo/cache-control-types/dist/cjs/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
import type { GraphQLCompositeType, GraphQLResolveInfo } from 'graphql';
|
||||
export type CacheScope = 'PUBLIC' | 'PRIVATE';
|
||||
export interface CacheHint {
|
||||
maxAge?: number;
|
||||
scope?: CacheScope;
|
||||
}
|
||||
export interface CachePolicy extends CacheHint {
|
||||
replace(hint: CacheHint): void;
|
||||
restrict(hint: CacheHint): void;
|
||||
policyIfCacheable(): Required<CacheHint> | null;
|
||||
}
|
||||
export interface ResolveInfoCacheControl {
|
||||
cacheHint: CachePolicy;
|
||||
setCacheHint(hint: CacheHint): void;
|
||||
cacheHintFromType(t: GraphQLCompositeType): CacheHint | undefined;
|
||||
}
|
||||
export interface GraphQLResolveInfoWithCacheControl extends Omit<GraphQLResolveInfo, 'cacheControl'> {
|
||||
cacheControl: ResolveInfoCacheControl;
|
||||
}
|
||||
export declare function maybeCacheControlFromInfo(info: GraphQLResolveInfo): ResolveInfoCacheControl | null;
|
||||
export declare function cacheControlFromInfo(info: GraphQLResolveInfo): ResolveInfoCacheControl;
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
node_modules/@apollo/cache-control-types/dist/cjs/index.d.ts.map
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/cjs/index.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAE,oBAAoB,EAAE,kBAAkB,EAAE,MAAM,SAAS,CAAC;AAMxE,MAAM,MAAM,UAAU,GAAG,QAAQ,GAAG,SAAS,CAAC;AAM9C,MAAM,WAAW,SAAS;IACxB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,KAAK,CAAC,EAAE,UAAU,CAAC;CACpB;AAMD,MAAM,WAAW,WAAY,SAAQ,SAAS;IAK5C,OAAO,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI,CAAC;IAO/B,QAAQ,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI,CAAC;IAMhC,iBAAiB,IAAI,QAAQ,CAAC,SAAS,CAAC,GAAG,IAAI,CAAC;CACjD;AAMD,MAAM,WAAW,uBAAuB;IACtC,SAAS,EAAE,WAAW,CAAC;IAGvB,YAAY,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI,CAAC;IAEpC,iBAAiB,CAAC,CAAC,EAAE,oBAAoB,GAAG,SAAS,GAAG,SAAS,CAAC;CACnE;AAMD,MAAM,WAAW,kCACf,SAAQ,IAAI,CAAC,kBAAkB,EAAE,cAAc,CAAC;IAYhD,YAAY,EAAE,uBAAuB,CAAC;CACvC;AAKD,wBAAgB,yBAAyB,CACvC,IAAI,EAAE,kBAAkB,GACvB,uBAAuB,GAAG,IAAI,CAKhC;AAKD,wBAAgB,oBAAoB,CAClC,IAAI,EAAE,kBAAkB,GACvB,uBAAuB,CAgBzB"}
|
||||
25
node_modules/@apollo/cache-control-types/dist/cjs/index.js
generated
vendored
Normal file
25
node_modules/@apollo/cache-control-types/dist/cjs/index.js
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.cacheControlFromInfo = exports.maybeCacheControlFromInfo = void 0;
|
||||
function maybeCacheControlFromInfo(info) {
|
||||
if (info.cacheControl?.cacheHint?.restrict) {
|
||||
return info.cacheControl;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.maybeCacheControlFromInfo = maybeCacheControlFromInfo;
|
||||
function cacheControlFromInfo(info) {
|
||||
if (!('cacheControl' in info)) {
|
||||
throw new Error('The `info` argument does not appear to have a cacheControl field. ' +
|
||||
"Check that you are using Apollo Server 3 or newer and that you aren't using " +
|
||||
'ApolloServerPluginCacheControlDisabled.');
|
||||
}
|
||||
if (!info.cacheControl?.cacheHint?.restrict) {
|
||||
throw new Error('The `info` argument has a cacheControl field but it does not appear to be from Apollo' +
|
||||
"Server 3 or newer. Check that you are using Apollo Server 3 or newer and that you aren't using " +
|
||||
'ApolloServerPluginCacheControlDisabled.');
|
||||
}
|
||||
return info.cacheControl;
|
||||
}
|
||||
exports.cacheControlFromInfo = cacheControlFromInfo;
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
node_modules/@apollo/cache-control-types/dist/cjs/index.js.map
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/cjs/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":";;;AAmFA,SAAgB,yBAAyB,CACvC,IAAwB;IAExB,IAAK,IAAY,CAAC,YAAY,EAAE,SAAS,EAAE,QAAQ,EAAE;QACnD,OAAQ,IAAY,CAAC,YAAY,CAAC;KACnC;IACD,OAAO,IAAI,CAAC;AACd,CAAC;AAPD,8DAOC;AAKD,SAAgB,oBAAoB,CAClC,IAAwB;IAExB,IAAI,CAAC,CAAC,cAAc,IAAI,IAAI,CAAC,EAAE;QAC7B,MAAM,IAAI,KAAK,CACb,oEAAoE;YAClE,8EAA8E;YAC9E,yCAAyC,CAC5C,CAAC;KACH;IACD,IAAI,CAAE,IAAY,CAAC,YAAY,EAAE,SAAS,EAAE,QAAQ,EAAE;QACpD,MAAM,IAAI,KAAK,CACb,uFAAuF;YACrF,iGAAiG;YACjG,yCAAyC,CAC5C,CAAC;KACH;IACD,OAAQ,IAAY,CAAC,YAAY,CAAC;AACpC,CAAC;AAlBD,oDAkBC"}
|
||||
1
node_modules/@apollo/cache-control-types/dist/cjs/package.json
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/cjs/package.json
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"type":"commonjs"}
|
||||
22
node_modules/@apollo/cache-control-types/dist/esm/index.d.ts
generated
vendored
Normal file
22
node_modules/@apollo/cache-control-types/dist/esm/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
import type { GraphQLCompositeType, GraphQLResolveInfo } from 'graphql';
|
||||
export type CacheScope = 'PUBLIC' | 'PRIVATE';
|
||||
export interface CacheHint {
|
||||
maxAge?: number;
|
||||
scope?: CacheScope;
|
||||
}
|
||||
export interface CachePolicy extends CacheHint {
|
||||
replace(hint: CacheHint): void;
|
||||
restrict(hint: CacheHint): void;
|
||||
policyIfCacheable(): Required<CacheHint> | null;
|
||||
}
|
||||
export interface ResolveInfoCacheControl {
|
||||
cacheHint: CachePolicy;
|
||||
setCacheHint(hint: CacheHint): void;
|
||||
cacheHintFromType(t: GraphQLCompositeType): CacheHint | undefined;
|
||||
}
|
||||
export interface GraphQLResolveInfoWithCacheControl extends Omit<GraphQLResolveInfo, 'cacheControl'> {
|
||||
cacheControl: ResolveInfoCacheControl;
|
||||
}
|
||||
export declare function maybeCacheControlFromInfo(info: GraphQLResolveInfo): ResolveInfoCacheControl | null;
|
||||
export declare function cacheControlFromInfo(info: GraphQLResolveInfo): ResolveInfoCacheControl;
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
node_modules/@apollo/cache-control-types/dist/esm/index.d.ts.map
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/esm/index.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAE,oBAAoB,EAAE,kBAAkB,EAAE,MAAM,SAAS,CAAC;AAMxE,MAAM,MAAM,UAAU,GAAG,QAAQ,GAAG,SAAS,CAAC;AAM9C,MAAM,WAAW,SAAS;IACxB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,KAAK,CAAC,EAAE,UAAU,CAAC;CACpB;AAMD,MAAM,WAAW,WAAY,SAAQ,SAAS;IAK5C,OAAO,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI,CAAC;IAO/B,QAAQ,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI,CAAC;IAMhC,iBAAiB,IAAI,QAAQ,CAAC,SAAS,CAAC,GAAG,IAAI,CAAC;CACjD;AAMD,MAAM,WAAW,uBAAuB;IACtC,SAAS,EAAE,WAAW,CAAC;IAGvB,YAAY,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI,CAAC;IAEpC,iBAAiB,CAAC,CAAC,EAAE,oBAAoB,GAAG,SAAS,GAAG,SAAS,CAAC;CACnE;AAMD,MAAM,WAAW,kCACf,SAAQ,IAAI,CAAC,kBAAkB,EAAE,cAAc,CAAC;IAYhD,YAAY,EAAE,uBAAuB,CAAC;CACvC;AAKD,wBAAgB,yBAAyB,CACvC,IAAI,EAAE,kBAAkB,GACvB,uBAAuB,GAAG,IAAI,CAKhC;AAKD,wBAAgB,oBAAoB,CAClC,IAAI,EAAE,kBAAkB,GACvB,uBAAuB,CAgBzB"}
|
||||
20
node_modules/@apollo/cache-control-types/dist/esm/index.js
generated
vendored
Normal file
20
node_modules/@apollo/cache-control-types/dist/esm/index.js
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
export function maybeCacheControlFromInfo(info) {
|
||||
if (info.cacheControl?.cacheHint?.restrict) {
|
||||
return info.cacheControl;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
export function cacheControlFromInfo(info) {
|
||||
if (!('cacheControl' in info)) {
|
||||
throw new Error('The `info` argument does not appear to have a cacheControl field. ' +
|
||||
"Check that you are using Apollo Server 3 or newer and that you aren't using " +
|
||||
'ApolloServerPluginCacheControlDisabled.');
|
||||
}
|
||||
if (!info.cacheControl?.cacheHint?.restrict) {
|
||||
throw new Error('The `info` argument has a cacheControl field but it does not appear to be from Apollo' +
|
||||
"Server 3 or newer. Check that you are using Apollo Server 3 or newer and that you aren't using " +
|
||||
'ApolloServerPluginCacheControlDisabled.');
|
||||
}
|
||||
return info.cacheControl;
|
||||
}
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
node_modules/@apollo/cache-control-types/dist/esm/index.js.map
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/esm/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAmFA,MAAM,UAAU,yBAAyB,CACvC,IAAwB;IAExB,IAAK,IAAY,CAAC,YAAY,EAAE,SAAS,EAAE,QAAQ,EAAE;QACnD,OAAQ,IAAY,CAAC,YAAY,CAAC;KACnC;IACD,OAAO,IAAI,CAAC;AACd,CAAC;AAKD,MAAM,UAAU,oBAAoB,CAClC,IAAwB;IAExB,IAAI,CAAC,CAAC,cAAc,IAAI,IAAI,CAAC,EAAE;QAC7B,MAAM,IAAI,KAAK,CACb,oEAAoE;YAClE,8EAA8E;YAC9E,yCAAyC,CAC5C,CAAC;KACH;IACD,IAAI,CAAE,IAAY,CAAC,YAAY,EAAE,SAAS,EAAE,QAAQ,EAAE;QACpD,MAAM,IAAI,KAAK,CACb,uFAAuF;YACrF,iGAAiG;YACjG,yCAAyC,CAC5C,CAAC;KACH;IACD,OAAQ,IAAY,CAAC,YAAY,CAAC;AACpC,CAAC"}
|
||||
1
node_modules/@apollo/cache-control-types/dist/esm/package.json
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/esm/package.json
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"type":"module"}
|
||||
1
node_modules/@apollo/cache-control-types/dist/tsconfig.cjs.tsbuildinfo
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/tsconfig.cjs.tsbuildinfo
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
node_modules/@apollo/cache-control-types/dist/tsconfig.tsbuildinfo
generated
vendored
Normal file
1
node_modules/@apollo/cache-control-types/dist/tsconfig.tsbuildinfo
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
42
node_modules/@apollo/cache-control-types/package.json
generated
vendored
Normal file
42
node_modules/@apollo/cache-control-types/package.json
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"name": "@apollo/cache-control-types",
|
||||
"version": "1.0.3",
|
||||
"description": "TypeScript types for Apollo Server info.cacheControl",
|
||||
"type": "module",
|
||||
"main": "dist/cjs/index.js",
|
||||
"module": "dist/esm/index.js",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": {
|
||||
"require": "./dist/cjs/index.d.ts",
|
||||
"default": "./dist/esm/index.d.ts"
|
||||
},
|
||||
"import": "./dist/esm/index.js",
|
||||
"require": "./dist/cjs/index.js"
|
||||
}
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/apollographql/apollo-server.git",
|
||||
"directory": "packages/cache-control-types/"
|
||||
},
|
||||
"keywords": [
|
||||
"apollo",
|
||||
"graphql",
|
||||
"typescript",
|
||||
"node"
|
||||
],
|
||||
"author": "Apollo <packages@apollographql.com>",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/apollographql/apollo-server/issues"
|
||||
},
|
||||
"homepage": "https://github.com/apollographql/apollo-server#readme",
|
||||
"peerDependencies": {
|
||||
"graphql": "14.x || 15.x || 16.x"
|
||||
},
|
||||
"volta": {
|
||||
"extends": "../../package.json"
|
||||
}
|
||||
}
|
||||
114
node_modules/@apollo/cache-control-types/src/index.ts
generated
vendored
Normal file
114
node_modules/@apollo/cache-control-types/src/index.ts
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
// NOTE: Once Apollo Server 4 is released, move this package into the
|
||||
// apollo-server repo. We're placing it in the apollo-utils repo for now to
|
||||
// enable us to make non-alpha releases that can be used on the apollo-server
|
||||
// version-4 branch.
|
||||
|
||||
import type { GraphQLCompositeType, GraphQLResolveInfo } from 'graphql';
|
||||
|
||||
/**
|
||||
* CacheScope represents whether cacheable data should be shared across sessions
|
||||
* (PUBLIC) or considered session-specific (PRIVATE).
|
||||
*/
|
||||
export type CacheScope = 'PUBLIC' | 'PRIVATE';
|
||||
|
||||
/**
|
||||
* CacheHint represents a contribution to an overall cache policy. It can
|
||||
* specify a maxAge and/or a scope.
|
||||
*/
|
||||
export interface CacheHint {
|
||||
maxAge?: number;
|
||||
scope?: CacheScope;
|
||||
}
|
||||
|
||||
/**
|
||||
* CachePolicy is a mutable CacheHint with helpful methods for updating its
|
||||
* fields.
|
||||
*/
|
||||
export interface CachePolicy extends CacheHint {
|
||||
/**
|
||||
* Mutate this CachePolicy by replacing each field defined in `hint`. This can
|
||||
* make the policy more restrictive or less restrictive.
|
||||
*/
|
||||
replace(hint: CacheHint): void;
|
||||
|
||||
/**
|
||||
* Mutate this CachePolicy by restricting each field defined in `hint`. This
|
||||
* can only make the policy more restrictive: a previously defined `maxAge`
|
||||
* can only be reduced, and a previously Private scope cannot be made Public.
|
||||
*/
|
||||
restrict(hint: CacheHint): void;
|
||||
|
||||
/**
|
||||
* If this policy has a positive `maxAge`, then return a copy of itself as a
|
||||
* `CacheHint` with both fields defined. Otherwise return null.
|
||||
*/
|
||||
policyIfCacheable(): Required<CacheHint> | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* When using Apollo Server with the cache control plugin (on by default), an
|
||||
* object of this kind is available to resolvers on `info.cacheControl`.
|
||||
*/
|
||||
export interface ResolveInfoCacheControl {
|
||||
cacheHint: CachePolicy;
|
||||
// Shorthand for `cacheHint.replace(hint)`; also for compatibility with
|
||||
// the Apollo Server 2.x API.
|
||||
setCacheHint(hint: CacheHint): void;
|
||||
|
||||
cacheHintFromType(t: GraphQLCompositeType): CacheHint | undefined;
|
||||
}
|
||||
|
||||
/** When using Apollo Server with the cache control plugin (on by default), the
|
||||
* `info` argument to resolvers can be considered to be of this type. (You can
|
||||
* use this type with the customResolveInfo option to the graphql-code-generator
|
||||
* typescript-resolvers plugin, for example.) */
|
||||
export interface GraphQLResolveInfoWithCacheControl
|
||||
extends Omit<GraphQLResolveInfo, 'cacheControl'> {
|
||||
// Why the Omit above? If you happen to have AS2 `apollo-cache-control` or AS3
|
||||
// `apollo-server-core` in your TypeScript build, then there's an ambient
|
||||
// `declare module` floating around that monkey-patches GraphQLResolveInfo to
|
||||
// have a cacheControl field. This led to lots of problems, which is why in
|
||||
// AS4 we're moving towards the approach in this file where don't assume every
|
||||
// GraphQLResolveInfo is a GraphQLResolveInfoWithCacheControl. The AS3 type is
|
||||
// very slightly incompatible with the type in the file, since we changed
|
||||
// CacheScope to be a union of strings rather than an enum. They have the same
|
||||
// runtime representation so it's safe to ignore, but in order for the
|
||||
// `extends` to not error out if you're building with the old ambient
|
||||
// definition floating around too, we need the Omit.
|
||||
cacheControl: ResolveInfoCacheControl;
|
||||
}
|
||||
|
||||
/** Given an `info` resolver argument, returns the cacheControl field if it
|
||||
* exists and appears to be from Apollo Server 3 or newer; returns null
|
||||
* otherwise.*/
|
||||
export function maybeCacheControlFromInfo(
|
||||
info: GraphQLResolveInfo,
|
||||
): ResolveInfoCacheControl | null {
|
||||
if ((info as any).cacheControl?.cacheHint?.restrict) {
|
||||
return (info as any).cacheControl;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Given an `info` resolver argument, returns the cacheControl field if it
|
||||
* exists and appears to be from Apollo Server 3 or newer; throws
|
||||
* otherwise.*/
|
||||
export function cacheControlFromInfo(
|
||||
info: GraphQLResolveInfo,
|
||||
): ResolveInfoCacheControl {
|
||||
if (!('cacheControl' in info)) {
|
||||
throw new Error(
|
||||
'The `info` argument does not appear to have a cacheControl field. ' +
|
||||
"Check that you are using Apollo Server 3 or newer and that you aren't using " +
|
||||
'ApolloServerPluginCacheControlDisabled.',
|
||||
);
|
||||
}
|
||||
if (!(info as any).cacheControl?.cacheHint?.restrict) {
|
||||
throw new Error(
|
||||
'The `info` argument has a cacheControl field but it does not appear to be from Apollo' +
|
||||
"Server 3 or newer. Check that you are using Apollo Server 3 or newer and that you aren't using " +
|
||||
'ApolloServerPluginCacheControlDisabled.',
|
||||
);
|
||||
}
|
||||
return (info as any).cacheControl;
|
||||
}
|
||||
8
node_modules/@apollo/client/.changeset/README.md
generated
vendored
Normal file
8
node_modules/@apollo/client/.changeset/README.md
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Changesets
|
||||
|
||||
Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
|
||||
with multi-package repos, or single-package repos to help you version and publish your code. You can
|
||||
find the full documentation for it [in our repository](https://github.com/changesets/changesets)
|
||||
|
||||
We have a quick list of common questions to get you started engaging with this project in
|
||||
[our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md)
|
||||
14
node_modules/@apollo/client/.changeset/config.json
generated
vendored
Normal file
14
node_modules/@apollo/client/.changeset/config.json
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"$schema": "https://unpkg.com/@changesets/config@2.2.0/schema.json",
|
||||
"changelog": [
|
||||
"@changesets/changelog-github",
|
||||
{ "repo": "apollographql/apollo-client" }
|
||||
],
|
||||
"commit": false,
|
||||
"fixed": [],
|
||||
"linked": [],
|
||||
"access": "public",
|
||||
"baseBranch": "main",
|
||||
"updateInternalDependencies": "patch",
|
||||
"ignore": []
|
||||
}
|
||||
4719
node_modules/@apollo/client/CHANGELOG.md
generated
vendored
Normal file
4719
node_modules/@apollo/client/CHANGELOG.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
node_modules/@apollo/client/LICENSE
generated
vendored
Normal file
22
node_modules/@apollo/client/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2022 Apollo Graph, Inc. (Formerly Meteor Development Group, Inc.)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
57
node_modules/@apollo/client/README.md
generated
vendored
Normal file
57
node_modules/@apollo/client/README.md
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
<div align="center">
|
||||
|
||||
<p>
|
||||
<a href="https://www.apollographql.com/"><img src="https://raw.githubusercontent.com/apollographql/apollo-client-devtools/main/assets/apollo-wordmark.svg" height="100" alt="Apollo Client"></a>
|
||||
</p>
|
||||
<h1>Apollo Client</h1>
|
||||
|
||||
[](https://badge.fury.io/js/%40apollo%2Fclient) [](https://circleci.com/gh/apollographql/apollo-client) [](https://community.apollographql.com)
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
**Announcement:**
|
||||
Join 1000+ engineers at GraphQL Summit 2025 by Apollo for talks, workshops, and office hours. Oct 6-8, 2025 in San Francisco. [Get your pass here ->](https://www.apollographql.com/graphql-summit-2025?utm_campaign=2025-03-04_graphql-summit-github-announcement&utm_medium=github&utm_source=apollo-server)
|
||||
|
||||
---
|
||||
|
||||
Apollo Client is a fully-featured caching GraphQL client with integrations for React, Angular, and more. It allows you to easily build UI components that fetch data via GraphQL.
|
||||
|
||||
Apollo Client aims to comply with the [Working Draft of the GraphQL specification](https://spec.graphql.org/draft/).
|
||||
|
||||
| ☑️ Apollo Client User Survey |
|
||||
| :----- |
|
||||
| What do you like best about Apollo Client? What needs to be improved? Please tell us by taking a [one-minute survey](https://docs.google.com/forms/d/e/1FAIpQLSczNDXfJne3ZUOXjk9Ursm9JYvhTh1_nFTDfdq3XBAFWCzplQ/viewform?usp=pp_url&entry.1170701325=Apollo+Client&entry.204965213=Readme). Your responses will help us understand Apollo Client usage and allow us to serve you better. |
|
||||
|
||||
## Documentation
|
||||
|
||||
All Apollo Client documentation, including React integration articles and helpful recipes, can be found at: <br/>
|
||||
[https://www.apollographql.com/docs/react/](https://www.apollographql.com/docs/react/)
|
||||
|
||||
The Apollo Client API reference can be found at: <br/>
|
||||
[https://www.apollographql.com/docs/react/api/apollo-client/](https://www.apollographql.com/docs/react/api/apollo-client/)
|
||||
|
||||
Learn how to use Apollo Client with self-paced hands-on training on Odyssey, Apollo's official learning platform: <br/>
|
||||
[https://odyssey.apollographql.com/](https://odyssey.apollographql.com/)
|
||||
|
||||
## Maintainers
|
||||
|
||||
|Name|Username|
|
||||
|---|---|
|
||||
|Jeff Auriemma|[@bignimbus](https://github.com/bignimbus)|
|
||||
|Jerel Miller|[@jerelmiller](https://github.com/jerelmiller)|
|
||||
|Lenz Weber-Tronic|[@phryneas](https://github.com/phryneas)|
|
||||
|
||||
## Who is Apollo?
|
||||
|
||||
[Apollo](https://apollographql.com/) builds open-source tools and commercial services to make application development easier, better, and accessible to more people. We help you ship faster with:
|
||||
|
||||
- [GraphOS](https://www.apollographql.com/graphos) - The platform for building, managing, and scaling a supergraph: a unified network of your organization's microservices and their data sources—all composed into a single distributed API.
|
||||
- [Apollo Federation](https://www.apollographql.com/federation) – The industry-standard open architecture for building a distributed graph. Use Apollo’s gateway to compose a unified graph from multiple subgraphs, determine a query plan, and route requests across your services.
|
||||
- [Apollo Client](https://github.com/apollographql/apollo-client) – The most popular GraphQL client for the web. Apollo also builds and maintains [Apollo iOS](https://github.com/apollographql/apollo-ios) and [Apollo Kotlin](https://github.com/apollographql/apollo-kotlin).
|
||||
- [Apollo Server](https://github.com/apollographql/apollo-server) – A production-ready JavaScript GraphQL server that connects to any microservice, API, or database. Compatible with all popular JavaScript frameworks and deployable in serverless environments.
|
||||
|
||||
## Learn how to build with Apollo
|
||||
|
||||
Check out the [Odyssey](https://odyssey.apollographql.com/) learning platform, the perfect place to start your GraphQL journey with videos and interactive code challenges. Join the [Apollo Community](https://community.apollographql.com/) to interact with and get technical help from the GraphQL community.
|
||||
9474
node_modules/@apollo/client/apollo-client.cjs
generated
vendored
Normal file
9474
node_modules/@apollo/client/apollo-client.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
node_modules/@apollo/client/apollo-client.cjs.map
generated
vendored
Normal file
1
node_modules/@apollo/client/apollo-client.cjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
node_modules/@apollo/client/apollo-client.min.cjs
generated
vendored
Normal file
1
node_modules/@apollo/client/apollo-client.min.cjs
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2691
node_modules/@apollo/client/cache/cache.cjs
generated
vendored
Normal file
2691
node_modules/@apollo/client/cache/cache.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
node_modules/@apollo/client/cache/cache.cjs.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/cache.cjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2691
node_modules/@apollo/client/cache/cache.cjs.native.js
generated
vendored
Normal file
2691
node_modules/@apollo/client/cache/cache.cjs.native.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
node_modules/@apollo/client/cache/cache.d.cts
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/cache.d.cts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export * from "./index.d.ts";
|
||||
126
node_modules/@apollo/client/cache/core/cache.d.ts
generated
vendored
Normal file
126
node_modules/@apollo/client/cache/core/cache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
import type { DocumentNode, FragmentDefinitionNode, InlineFragmentNode } from "graphql";
|
||||
import type { StoreObject, Reference, DeepPartial, NoInfer } from "../../utilities/index.js";
|
||||
import { Observable } from "../../utilities/index.js";
|
||||
import type { DataProxy } from "./types/DataProxy.js";
|
||||
import type { Cache } from "./types/Cache.js";
|
||||
import { getApolloCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
import type { OperationVariables, TypedDocumentNode } from "../../core/types.js";
|
||||
import type { MissingTree } from "./types/common.js";
|
||||
import type { FragmentType, MaybeMasked, Unmasked } from "../../masking/index.js";
|
||||
export type Transaction<T> = (c: ApolloCache<T>) => void;
|
||||
/**
|
||||
* Watched fragment options.
|
||||
*/
|
||||
export interface WatchFragmentOptions<TData, TVars> {
|
||||
/**
|
||||
* A GraphQL fragment document parsed into an AST with the `gql`
|
||||
* template literal.
|
||||
*
|
||||
* @docGroup 1. Required options
|
||||
*/
|
||||
fragment: DocumentNode | TypedDocumentNode<TData, TVars>;
|
||||
/**
|
||||
* An object containing a `__typename` and primary key fields
|
||||
* (such as `id`) identifying the entity object from which the fragment will
|
||||
* be retrieved, or a `{ __ref: "..." }` reference, or a `string` ID
|
||||
* (uncommon).
|
||||
*
|
||||
* @docGroup 1. Required options
|
||||
*/
|
||||
from: StoreObject | Reference | FragmentType<NoInfer<TData>> | string;
|
||||
/**
|
||||
* Any variables that the GraphQL fragment may depend on.
|
||||
*
|
||||
* @docGroup 2. Cache options
|
||||
*/
|
||||
variables?: TVars;
|
||||
/**
|
||||
* The name of the fragment defined in the fragment document.
|
||||
*
|
||||
* Required if the fragment document includes more than one fragment,
|
||||
* optional otherwise.
|
||||
*
|
||||
* @docGroup 2. Cache options
|
||||
*/
|
||||
fragmentName?: string;
|
||||
/**
|
||||
* If `true`, `watchFragment` returns optimistic results.
|
||||
*
|
||||
* The default value is `true`.
|
||||
*
|
||||
* @docGroup 2. Cache options
|
||||
*/
|
||||
optimistic?: boolean;
|
||||
}
|
||||
/**
|
||||
* Watched fragment results.
|
||||
*/
|
||||
export type WatchFragmentResult<TData> = {
|
||||
data: MaybeMasked<TData>;
|
||||
complete: true;
|
||||
missing?: never;
|
||||
} | {
|
||||
data: DeepPartial<MaybeMasked<TData>>;
|
||||
complete: false;
|
||||
missing: MissingTree;
|
||||
};
|
||||
export declare abstract class ApolloCache<TSerialized> implements DataProxy {
|
||||
readonly assumeImmutableResults: boolean;
|
||||
abstract read<TData = any, TVariables = any>(query: Cache.ReadOptions<TVariables, TData>): Unmasked<TData> | null;
|
||||
abstract write<TData = any, TVariables = any>(write: Cache.WriteOptions<TData, TVariables>): Reference | undefined;
|
||||
abstract diff<T>(query: Cache.DiffOptions): Cache.DiffResult<T>;
|
||||
abstract watch<TData = any, TVariables = any>(watch: Cache.WatchOptions<TData, TVariables>): () => void;
|
||||
abstract reset(options?: Cache.ResetOptions): Promise<void>;
|
||||
abstract evict(options: Cache.EvictOptions): boolean;
|
||||
/**
|
||||
* Replaces existing state in the cache (if any) with the values expressed by
|
||||
* `serializedState`.
|
||||
*
|
||||
* Called when hydrating a cache (server side rendering, or offline storage),
|
||||
* and also (potentially) during hot reloads.
|
||||
*/
|
||||
abstract restore(serializedState: TSerialized): ApolloCache<TSerialized>;
|
||||
/**
|
||||
* Exposes the cache's complete state, in a serializable format for later restoration.
|
||||
*/
|
||||
abstract extract(optimistic?: boolean): TSerialized;
|
||||
abstract removeOptimistic(id: string): void;
|
||||
fragmentMatches?(fragment: InlineFragmentNode, typename: string): boolean;
|
||||
lookupFragment(fragmentName: string): FragmentDefinitionNode | null;
|
||||
batch<U>(options: Cache.BatchOptions<this, U>): U;
|
||||
abstract performTransaction(transaction: Transaction<TSerialized>, optimisticId?: string | null): void;
|
||||
recordOptimisticTransaction(transaction: Transaction<TSerialized>, optimisticId: string): void;
|
||||
transformDocument(document: DocumentNode): DocumentNode;
|
||||
transformForLink(document: DocumentNode): DocumentNode;
|
||||
identify(object: StoreObject | Reference): string | undefined;
|
||||
gc(): string[];
|
||||
modify<Entity extends Record<string, any> = Record<string, any>>(options: Cache.ModifyOptions<Entity>): boolean;
|
||||
readQuery<QueryType, TVariables = any>(options: Cache.ReadQueryOptions<QueryType, TVariables>, optimistic?: boolean): Unmasked<QueryType> | null;
|
||||
/**
|
||||
* Watches the cache store of the fragment according to the options specified and returns an `Observable`. We can subscribe to this `Observable` and receive updated results through an observer when the cache store changes.
|
||||
*
|
||||
* You must pass in a GraphQL document with a single fragment or a document with multiple fragments that represent what you are reading. If you pass in a document with multiple fragments then you must also specify a `fragmentName`.
|
||||
*
|
||||
* @param options - An object of type `WatchFragmentOptions` that allows the cache to identify the fragment and optionally specify whether to react to optimistic updates.
|
||||
*
|
||||
* @since
|
||||
*
|
||||
* 3.10.0
|
||||
*/
|
||||
watchFragment<TData = any, TVars = OperationVariables>(options: WatchFragmentOptions<TData, TVars>): Observable<WatchFragmentResult<TData>>;
|
||||
private getFragmentDoc;
|
||||
readFragment<FragmentType, TVariables = any>(options: Cache.ReadFragmentOptions<FragmentType, TVariables>, optimistic?: boolean): Unmasked<FragmentType> | null;
|
||||
writeQuery<TData = any, TVariables = any>({ id, data, ...options }: Cache.WriteQueryOptions<TData, TVariables>): Reference | undefined;
|
||||
writeFragment<TData = any, TVariables = any>({ id, data, fragment, fragmentName, ...options }: Cache.WriteFragmentOptions<TData, TVariables>): Reference | undefined;
|
||||
updateQuery<TData = any, TVariables = any>(options: Cache.UpdateQueryOptions<TData, TVariables>, update: (data: Unmasked<TData> | null) => Unmasked<TData> | null | void): Unmasked<TData> | null;
|
||||
updateFragment<TData = any, TVariables = any>(options: Cache.UpdateFragmentOptions<TData, TVariables>, update: (data: Unmasked<TData> | null) => Unmasked<TData> | null | void): Unmasked<TData> | null;
|
||||
/**
|
||||
* @experimental
|
||||
* @internal
|
||||
* This is not a stable API - it is used in development builds to expose
|
||||
* information to the DevTools.
|
||||
* Use at your own risk!
|
||||
*/
|
||||
getMemoryInternals?: typeof getApolloCacheMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=cache.d.ts.map
|
||||
167
node_modules/@apollo/client/cache/core/cache.js
generated
vendored
Normal file
167
node_modules/@apollo/client/cache/core/cache.js
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
import { __assign, __rest } from "tslib";
|
||||
import { wrap } from "optimism";
|
||||
import { Observable, cacheSizes, getFragmentDefinition, getFragmentQueryDocument, mergeDeepArray, } from "../../utilities/index.js";
|
||||
import { WeakCache } from "@wry/caches";
|
||||
import { getApolloCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
import { equalByQuery } from "../../core/equalByQuery.js";
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
import { maskFragment } from "../../masking/index.js";
|
||||
var ApolloCache = /** @class */ (function () {
|
||||
function ApolloCache() {
|
||||
this.assumeImmutableResults = false;
|
||||
// Make sure we compute the same (===) fragment query document every
|
||||
// time we receive the same fragment in readFragment.
|
||||
this.getFragmentDoc = wrap(getFragmentQueryDocument, {
|
||||
max: cacheSizes["cache.fragmentQueryDocuments"] ||
|
||||
1000 /* defaultCacheSizes["cache.fragmentQueryDocuments"] */,
|
||||
cache: WeakCache,
|
||||
});
|
||||
}
|
||||
// Function used to lookup a fragment when a fragment definition is not part
|
||||
// of the GraphQL document. This is useful for caches, such as InMemoryCache,
|
||||
// that register fragments ahead of time so they can be referenced by name.
|
||||
ApolloCache.prototype.lookupFragment = function (fragmentName) {
|
||||
return null;
|
||||
};
|
||||
// Transactional API
|
||||
// The batch method is intended to replace/subsume both performTransaction
|
||||
// and recordOptimisticTransaction, but performTransaction came first, so we
|
||||
// provide a default batch implementation that's just another way of calling
|
||||
// performTransaction. Subclasses of ApolloCache (such as InMemoryCache) can
|
||||
// override the batch method to do more interesting things with its options.
|
||||
ApolloCache.prototype.batch = function (options) {
|
||||
var _this = this;
|
||||
var optimisticId = typeof options.optimistic === "string" ? options.optimistic
|
||||
: options.optimistic === false ? null
|
||||
: void 0;
|
||||
var updateResult;
|
||||
this.performTransaction(function () { return (updateResult = options.update(_this)); }, optimisticId);
|
||||
return updateResult;
|
||||
};
|
||||
ApolloCache.prototype.recordOptimisticTransaction = function (transaction, optimisticId) {
|
||||
this.performTransaction(transaction, optimisticId);
|
||||
};
|
||||
// Optional API
|
||||
// Called once per input document, allowing the cache to make static changes
|
||||
// to the query, such as adding __typename fields.
|
||||
ApolloCache.prototype.transformDocument = function (document) {
|
||||
return document;
|
||||
};
|
||||
// Called before each ApolloLink request, allowing the cache to make dynamic
|
||||
// changes to the query, such as filling in missing fragment definitions.
|
||||
ApolloCache.prototype.transformForLink = function (document) {
|
||||
return document;
|
||||
};
|
||||
ApolloCache.prototype.identify = function (object) {
|
||||
return;
|
||||
};
|
||||
ApolloCache.prototype.gc = function () {
|
||||
return [];
|
||||
};
|
||||
ApolloCache.prototype.modify = function (options) {
|
||||
return false;
|
||||
};
|
||||
// DataProxy API
|
||||
ApolloCache.prototype.readQuery = function (options, optimistic) {
|
||||
if (optimistic === void 0) { optimistic = !!options.optimistic; }
|
||||
return this.read(__assign(__assign({}, options), { rootId: options.id || "ROOT_QUERY", optimistic: optimistic }));
|
||||
};
|
||||
/** {@inheritDoc @apollo/client!ApolloClient#watchFragment:member(1)} */
|
||||
ApolloCache.prototype.watchFragment = function (options) {
|
||||
var _this = this;
|
||||
var fragment = options.fragment, fragmentName = options.fragmentName, from = options.from, _a = options.optimistic, optimistic = _a === void 0 ? true : _a, otherOptions = __rest(options, ["fragment", "fragmentName", "from", "optimistic"]);
|
||||
var query = this.getFragmentDoc(fragment, fragmentName);
|
||||
// While our TypeScript types do not allow for `undefined` as a valid
|
||||
// `from`, its possible `useFragment` gives us an `undefined` since it
|
||||
// calls` cache.identify` and provides that value to `from`. We are
|
||||
// adding this fix here however to ensure those using plain JavaScript
|
||||
// and using `cache.identify` themselves will avoid seeing the obscure
|
||||
// warning.
|
||||
var id = typeof from === "undefined" || typeof from === "string" ?
|
||||
from
|
||||
: this.identify(from);
|
||||
var dataMasking = !!options[Symbol.for("apollo.dataMasking")];
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
var actualFragmentName = fragmentName || getFragmentDefinition(fragment).name.value;
|
||||
if (!id) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(1, actualFragmentName);
|
||||
}
|
||||
}
|
||||
var diffOptions = __assign(__assign({}, otherOptions), { returnPartialData: true, id: id, query: query, optimistic: optimistic });
|
||||
var latestDiff;
|
||||
return new Observable(function (observer) {
|
||||
return _this.watch(__assign(__assign({}, diffOptions), { immediate: true, callback: function (diff) {
|
||||
var data = dataMasking ?
|
||||
maskFragment(diff.result, fragment, _this, fragmentName)
|
||||
: diff.result;
|
||||
if (
|
||||
// Always ensure we deliver the first result
|
||||
latestDiff &&
|
||||
equalByQuery(query, { data: latestDiff.result }, { data: data },
|
||||
// TODO: Fix the type on WatchFragmentOptions so that TVars
|
||||
// extends OperationVariables
|
||||
options.variables)) {
|
||||
return;
|
||||
}
|
||||
var result = {
|
||||
data: data,
|
||||
complete: !!diff.complete,
|
||||
};
|
||||
if (diff.missing) {
|
||||
result.missing = mergeDeepArray(diff.missing.map(function (error) { return error.missing; }));
|
||||
}
|
||||
latestDiff = __assign(__assign({}, diff), { result: data });
|
||||
observer.next(result);
|
||||
} }));
|
||||
});
|
||||
};
|
||||
ApolloCache.prototype.readFragment = function (options, optimistic) {
|
||||
if (optimistic === void 0) { optimistic = !!options.optimistic; }
|
||||
return this.read(__assign(__assign({}, options), { query: this.getFragmentDoc(options.fragment, options.fragmentName), rootId: options.id, optimistic: optimistic }));
|
||||
};
|
||||
ApolloCache.prototype.writeQuery = function (_a) {
|
||||
var id = _a.id, data = _a.data, options = __rest(_a, ["id", "data"]);
|
||||
return this.write(Object.assign(options, {
|
||||
dataId: id || "ROOT_QUERY",
|
||||
result: data,
|
||||
}));
|
||||
};
|
||||
ApolloCache.prototype.writeFragment = function (_a) {
|
||||
var id = _a.id, data = _a.data, fragment = _a.fragment, fragmentName = _a.fragmentName, options = __rest(_a, ["id", "data", "fragment", "fragmentName"]);
|
||||
return this.write(Object.assign(options, {
|
||||
query: this.getFragmentDoc(fragment, fragmentName),
|
||||
dataId: id,
|
||||
result: data,
|
||||
}));
|
||||
};
|
||||
ApolloCache.prototype.updateQuery = function (options, update) {
|
||||
return this.batch({
|
||||
update: function (cache) {
|
||||
var value = cache.readQuery(options);
|
||||
var data = update(value);
|
||||
if (data === void 0 || data === null)
|
||||
return value;
|
||||
cache.writeQuery(__assign(__assign({}, options), { data: data }));
|
||||
return data;
|
||||
},
|
||||
});
|
||||
};
|
||||
ApolloCache.prototype.updateFragment = function (options, update) {
|
||||
return this.batch({
|
||||
update: function (cache) {
|
||||
var value = cache.readFragment(options);
|
||||
var data = update(value);
|
||||
if (data === void 0 || data === null)
|
||||
return value;
|
||||
cache.writeFragment(__assign(__assign({}, options), { data: data }));
|
||||
return data;
|
||||
},
|
||||
});
|
||||
};
|
||||
return ApolloCache;
|
||||
}());
|
||||
export { ApolloCache };
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
ApolloCache.prototype.getMemoryInternals = getApolloCacheMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=cache.js.map
|
||||
1
node_modules/@apollo/client/cache/core/cache.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/core/cache.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
63
node_modules/@apollo/client/cache/core/types/Cache.d.ts
generated
vendored
Normal file
63
node_modules/@apollo/client/cache/core/types/Cache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
import { DataProxy } from "./DataProxy.js";
|
||||
import type { AllFieldsModifier, Modifiers } from "./common.js";
|
||||
import type { ApolloCache } from "../cache.js";
|
||||
import type { Unmasked } from "../../../masking/index.js";
|
||||
export declare namespace Cache {
|
||||
type WatchCallback<TData = any> = (diff: Cache.DiffResult<TData>, lastDiff?: Cache.DiffResult<TData>) => void;
|
||||
interface ReadOptions<TVariables = any, TData = any> extends DataProxy.Query<TVariables, TData> {
|
||||
rootId?: string;
|
||||
previousResult?: any;
|
||||
optimistic: boolean;
|
||||
returnPartialData?: boolean;
|
||||
/**
|
||||
* @deprecated
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not
|
||||
* recommend using this option anymore.
|
||||
* A future version of Apollo Client will contain a similar feature without
|
||||
* the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
}
|
||||
interface WriteOptions<TResult = any, TVariables = any> extends Omit<DataProxy.Query<TVariables, TResult>, "id">, Omit<DataProxy.WriteOptions<TResult>, "data"> {
|
||||
dataId?: string;
|
||||
result: Unmasked<TResult>;
|
||||
}
|
||||
interface DiffOptions<TData = any, TVariables = any> extends Omit<ReadOptions<TVariables, TData>, "rootId"> {
|
||||
}
|
||||
interface WatchOptions<TData = any, TVariables = any> extends DiffOptions<TData, TVariables> {
|
||||
watcher?: object;
|
||||
immediate?: boolean;
|
||||
callback: WatchCallback<TData>;
|
||||
lastDiff?: DiffResult<TData>;
|
||||
}
|
||||
interface EvictOptions {
|
||||
id?: string;
|
||||
fieldName?: string;
|
||||
args?: Record<string, any>;
|
||||
broadcast?: boolean;
|
||||
}
|
||||
interface ResetOptions {
|
||||
discardWatches?: boolean;
|
||||
}
|
||||
interface ModifyOptions<Entity extends Record<string, any> = Record<string, any>> {
|
||||
id?: string;
|
||||
fields: Modifiers<Entity> | AllFieldsModifier<Entity>;
|
||||
optimistic?: boolean;
|
||||
broadcast?: boolean;
|
||||
}
|
||||
interface BatchOptions<TCache extends ApolloCache<any>, TUpdateResult = void> {
|
||||
update(cache: TCache): TUpdateResult;
|
||||
optimistic?: string | boolean;
|
||||
removeOptimistic?: string;
|
||||
onWatchUpdated?: (this: TCache, watch: Cache.WatchOptions, diff: Cache.DiffResult<any>, lastDiff?: Cache.DiffResult<any> | undefined) => any;
|
||||
}
|
||||
export import DiffResult = DataProxy.DiffResult;
|
||||
export import ReadQueryOptions = DataProxy.ReadQueryOptions;
|
||||
export import ReadFragmentOptions = DataProxy.ReadFragmentOptions;
|
||||
export import WriteQueryOptions = DataProxy.WriteQueryOptions;
|
||||
export import WriteFragmentOptions = DataProxy.WriteFragmentOptions;
|
||||
export import UpdateQueryOptions = DataProxy.UpdateQueryOptions;
|
||||
export import UpdateFragmentOptions = DataProxy.UpdateFragmentOptions;
|
||||
export import Fragment = DataProxy.Fragment;
|
||||
}
|
||||
//# sourceMappingURL=Cache.d.ts.map
|
||||
4
node_modules/@apollo/client/cache/core/types/Cache.js
generated
vendored
Normal file
4
node_modules/@apollo/client/cache/core/types/Cache.js
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export var Cache;
|
||||
(function (Cache) {
|
||||
})(Cache || (Cache = {}));
|
||||
//# sourceMappingURL=Cache.js.map
|
||||
1
node_modules/@apollo/client/cache/core/types/Cache.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/core/types/Cache.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"Cache.js","sourceRoot":"","sources":["../../../../src/cache/core/types/Cache.ts"],"names":[],"mappings":"AAKA,MAAM,KAAW,KAAK,CA8GrB;AA9GD,WAAiB,KAAK;AA8GtB,CAAC,EA9GgB,KAAK,KAAL,KAAK,QA8GrB","sourcesContent":["import { DataProxy } from \"./DataProxy.js\";\nimport type { AllFieldsModifier, Modifiers } from \"./common.js\";\nimport type { ApolloCache } from \"../cache.js\";\nimport type { Unmasked } from \"../../../masking/index.js\";\n\nexport namespace Cache {\n export type WatchCallback<TData = any> = (\n diff: Cache.DiffResult<TData>,\n lastDiff?: Cache.DiffResult<TData>\n ) => void;\n\n export interface ReadOptions<TVariables = any, TData = any>\n extends DataProxy.Query<TVariables, TData> {\n rootId?: string;\n previousResult?: any;\n optimistic: boolean;\n returnPartialData?: boolean;\n /**\n * @deprecated\n * Using `canonizeResults` can result in memory leaks so we generally do not\n * recommend using this option anymore.\n * A future version of Apollo Client will contain a similar feature without\n * the risk of memory leaks.\n */\n canonizeResults?: boolean;\n }\n\n export interface WriteOptions<TResult = any, TVariables = any>\n extends Omit<DataProxy.Query<TVariables, TResult>, \"id\">,\n Omit<DataProxy.WriteOptions<TResult>, \"data\"> {\n dataId?: string;\n result: Unmasked<TResult>;\n }\n\n export interface DiffOptions<TData = any, TVariables = any>\n extends Omit<ReadOptions<TVariables, TData>, \"rootId\"> {\n // The DiffOptions interface is currently just an alias for\n // ReadOptions, though DiffOptions used to be responsible for\n // declaring the returnPartialData option.\n }\n\n export interface WatchOptions<TData = any, TVariables = any>\n extends DiffOptions<TData, TVariables> {\n watcher?: object;\n immediate?: boolean;\n callback: WatchCallback<TData>;\n lastDiff?: DiffResult<TData>;\n }\n\n export interface EvictOptions {\n id?: string;\n fieldName?: string;\n args?: Record<string, any>;\n broadcast?: boolean;\n }\n\n // Although you can call cache.reset() without options, its behavior can be\n // configured by passing a Cache.ResetOptions object.\n export interface ResetOptions {\n discardWatches?: boolean;\n }\n\n export interface ModifyOptions<\n Entity extends Record<string, any> = Record<string, any>,\n > {\n id?: string;\n fields: Modifiers<Entity> | AllFieldsModifier<Entity>;\n optimistic?: boolean;\n broadcast?: boolean;\n }\n\n export interface BatchOptions<\n TCache extends ApolloCache<any>,\n TUpdateResult = void,\n > {\n // Same as the first parameter of performTransaction, except the cache\n // argument will have the subclass type rather than ApolloCache.\n update(cache: TCache): TUpdateResult;\n\n // Passing a string for this option creates a new optimistic layer, with the\n // given string as its layer.id, just like passing a string for the\n // optimisticId parameter of performTransaction. Passing true is the same as\n // passing undefined to performTransaction (running the batch operation\n // against the current top layer of the cache), and passing false is the\n // same as passing null (running the operation against root/non-optimistic\n // cache data).\n optimistic?: string | boolean;\n\n // If you specify the ID of an optimistic layer using this option, that\n // layer will be removed as part of the batch transaction, triggering at\n // most one broadcast for both the transaction and the removal of the layer.\n // Note: this option is needed because calling cache.removeOptimistic during\n // the transaction function may not be not safe, since any modifications to\n // cache layers may be discarded after the transaction finishes.\n removeOptimistic?: string;\n\n // If you want to find out which watched queries were invalidated during\n // this batch operation, pass this optional callback function. Returning\n // false from the callback will prevent broadcasting this result.\n onWatchUpdated?: (\n this: TCache,\n watch: Cache.WatchOptions,\n diff: Cache.DiffResult<any>,\n lastDiff?: Cache.DiffResult<any> | undefined\n ) => any;\n }\n\n export import DiffResult = DataProxy.DiffResult;\n export import ReadQueryOptions = DataProxy.ReadQueryOptions;\n export import ReadFragmentOptions = DataProxy.ReadFragmentOptions;\n export import WriteQueryOptions = DataProxy.WriteQueryOptions;\n export import WriteFragmentOptions = DataProxy.WriteFragmentOptions;\n export import UpdateQueryOptions = DataProxy.UpdateQueryOptions;\n export import UpdateFragmentOptions = DataProxy.UpdateFragmentOptions;\n export import Fragment = DataProxy.Fragment;\n}\n"]}
|
||||
150
node_modules/@apollo/client/cache/core/types/DataProxy.d.ts
generated
vendored
Normal file
150
node_modules/@apollo/client/cache/core/types/DataProxy.d.ts
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
import type { DocumentNode } from "graphql";
|
||||
import type { TypedDocumentNode } from "@graphql-typed-document-node/core";
|
||||
import type { MissingFieldError } from "./common.js";
|
||||
import type { Reference } from "../../../utilities/index.js";
|
||||
import type { Unmasked } from "../../../masking/index.js";
|
||||
export declare namespace DataProxy {
|
||||
interface Query<TVariables, TData> {
|
||||
/**
|
||||
* The GraphQL query shape to be used constructed using the `gql` template
|
||||
* string tag from `graphql-tag`. The query will be used to determine the
|
||||
* shape of the data to be read.
|
||||
*/
|
||||
query: DocumentNode | TypedDocumentNode<TData, TVariables>;
|
||||
/**
|
||||
* Any variables that the GraphQL query may depend on.
|
||||
*/
|
||||
variables?: TVariables;
|
||||
/**
|
||||
* The root id to be used. Defaults to "ROOT_QUERY", which is the ID of the
|
||||
* root query object. This property makes writeQuery capable of writing data
|
||||
* to any object in the cache.
|
||||
*/
|
||||
id?: string;
|
||||
}
|
||||
interface Fragment<TVariables, TData> {
|
||||
/**
|
||||
* The root id to be used. This id should take the same form as the
|
||||
* value returned by your `dataIdFromObject` function. If a value with your
|
||||
* id does not exist in the store, `null` will be returned.
|
||||
*/
|
||||
id?: string;
|
||||
/**
|
||||
* A GraphQL document created using the `gql` template string tag from
|
||||
* `graphql-tag` with one or more fragments which will be used to determine
|
||||
* the shape of data to read. If you provide more than one fragment in this
|
||||
* document then you must also specify `fragmentName` to select a single.
|
||||
*/
|
||||
fragment: DocumentNode | TypedDocumentNode<TData, TVariables>;
|
||||
/**
|
||||
* The name of the fragment in your GraphQL document to be used. If you do
|
||||
* not provide a `fragmentName` and there is only one fragment in your
|
||||
* `fragment` document then that fragment will be used.
|
||||
*/
|
||||
fragmentName?: string;
|
||||
/**
|
||||
* Any variables that your GraphQL fragments depend on.
|
||||
*/
|
||||
variables?: TVariables;
|
||||
}
|
||||
interface ReadQueryOptions<TData, TVariables> extends Query<TVariables, TData> {
|
||||
/**
|
||||
* Whether to return incomplete data rather than null.
|
||||
* Defaults to false.
|
||||
*/
|
||||
returnPartialData?: boolean;
|
||||
/**
|
||||
* Whether to read from optimistic or non-optimistic cache data. If
|
||||
* this named option is provided, the optimistic parameter of the
|
||||
* readQuery method can be omitted. Defaults to false.
|
||||
*/
|
||||
optimistic?: boolean;
|
||||
/**
|
||||
* Whether to canonize cache results before returning them. Canonization takes some extra time, but it speeds up future deep equality comparisons. Defaults to false.
|
||||
*
|
||||
* @deprecated
|
||||
*
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not recommend using this option anymore. A future version of Apollo Client will contain a similar feature without the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
}
|
||||
interface ReadFragmentOptions<TData, TVariables> extends Fragment<TVariables, TData> {
|
||||
/**
|
||||
* Whether to return incomplete data rather than null.
|
||||
* Defaults to false.
|
||||
*/
|
||||
returnPartialData?: boolean;
|
||||
/**
|
||||
* Whether to read from optimistic or non-optimistic cache data. If
|
||||
* this named option is provided, the optimistic parameter of the
|
||||
* readQuery method can be omitted. Defaults to false.
|
||||
*/
|
||||
optimistic?: boolean;
|
||||
/**
|
||||
* Whether to canonize cache results before returning them. Canonization takes some extra time, but it speeds up future deep equality comparisons. Defaults to false.
|
||||
*
|
||||
* @deprecated
|
||||
*
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not recommend using this option anymore. A future version of Apollo Client will contain a similar feature without the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
}
|
||||
interface WriteOptions<TData> {
|
||||
/**
|
||||
* The data you will be writing to the store.
|
||||
*/
|
||||
data: Unmasked<TData>;
|
||||
/**
|
||||
* Whether to notify query watchers (default: true).
|
||||
*/
|
||||
broadcast?: boolean;
|
||||
/**
|
||||
* When true, ignore existing field data rather than merging it with
|
||||
* incoming data (default: false).
|
||||
*/
|
||||
overwrite?: boolean;
|
||||
}
|
||||
interface WriteQueryOptions<TData, TVariables> extends Query<TVariables, TData>, WriteOptions<TData> {
|
||||
}
|
||||
interface WriteFragmentOptions<TData, TVariables> extends Fragment<TVariables, TData>, WriteOptions<TData> {
|
||||
}
|
||||
interface UpdateQueryOptions<TData, TVariables> extends Omit<ReadQueryOptions<TData, TVariables> & WriteQueryOptions<TData, TVariables>, "data"> {
|
||||
}
|
||||
interface UpdateFragmentOptions<TData, TVariables> extends Omit<ReadFragmentOptions<TData, TVariables> & WriteFragmentOptions<TData, TVariables>, "data"> {
|
||||
}
|
||||
type DiffResult<T> = {
|
||||
result?: T;
|
||||
complete?: boolean;
|
||||
missing?: MissingFieldError[];
|
||||
fromOptimisticTransaction?: boolean;
|
||||
};
|
||||
}
|
||||
/**
|
||||
* A proxy to the normalized data living in our store. This interface allows a
|
||||
* user to read and write denormalized data which feels natural to the user
|
||||
* whilst in the background this data is being converted into the normalized
|
||||
* store format.
|
||||
*/
|
||||
export interface DataProxy {
|
||||
/**
|
||||
* Reads a GraphQL query from the root query id.
|
||||
*/
|
||||
readQuery<QueryType, TVariables = any>(options: DataProxy.ReadQueryOptions<QueryType, TVariables>, optimistic?: boolean): Unmasked<QueryType> | null;
|
||||
/**
|
||||
* Reads a GraphQL fragment from any arbitrary id. If there is more than
|
||||
* one fragment in the provided document then a `fragmentName` must be
|
||||
* provided to select the correct fragment.
|
||||
*/
|
||||
readFragment<FragmentType, TVariables = any>(options: DataProxy.ReadFragmentOptions<FragmentType, TVariables>, optimistic?: boolean): Unmasked<FragmentType> | null;
|
||||
/**
|
||||
* Writes a GraphQL query to the root query id.
|
||||
*/
|
||||
writeQuery<TData = any, TVariables = any>(options: DataProxy.WriteQueryOptions<TData, TVariables>): Reference | undefined;
|
||||
/**
|
||||
* Writes a GraphQL fragment to any arbitrary id. If there is more than
|
||||
* one fragment in the provided document then a `fragmentName` must be
|
||||
* provided to select the correct fragment.
|
||||
*/
|
||||
writeFragment<TData = any, TVariables = any>(options: DataProxy.WriteFragmentOptions<TData, TVariables>): Reference | undefined;
|
||||
}
|
||||
//# sourceMappingURL=DataProxy.d.ts.map
|
||||
2
node_modules/@apollo/client/cache/core/types/DataProxy.js
generated
vendored
Normal file
2
node_modules/@apollo/client/cache/core/types/DataProxy.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=DataProxy.js.map
|
||||
1
node_modules/@apollo/client/cache/core/types/DataProxy.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/core/types/DataProxy.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
64
node_modules/@apollo/client/cache/core/types/common.d.ts
generated
vendored
Normal file
64
node_modules/@apollo/client/cache/core/types/common.d.ts
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
import type { DocumentNode, FieldNode } from "graphql";
|
||||
import type { Reference, StoreObject, StoreValue, isReference, AsStoreObject, DeepPartial } from "../../../utilities/index.js";
|
||||
import type { StorageType } from "../../inmemory/policies.js";
|
||||
export type SafeReadonly<T> = T extends object ? Readonly<T> : T;
|
||||
export type MissingTree = string | {
|
||||
readonly [key: string]: MissingTree;
|
||||
};
|
||||
export declare class MissingFieldError extends Error {
|
||||
readonly message: string;
|
||||
readonly path: MissingTree | Array<string | number>;
|
||||
readonly query: DocumentNode;
|
||||
readonly variables?: Record<string, any> | undefined;
|
||||
constructor(message: string, path: MissingTree | Array<string | number>, query: DocumentNode, variables?: Record<string, any> | undefined);
|
||||
readonly missing: MissingTree;
|
||||
}
|
||||
export interface FieldSpecifier {
|
||||
typename?: string;
|
||||
fieldName: string;
|
||||
field?: FieldNode;
|
||||
args?: Record<string, any>;
|
||||
variables?: Record<string, any>;
|
||||
}
|
||||
export interface ReadFieldOptions extends FieldSpecifier {
|
||||
from?: StoreObject | Reference;
|
||||
}
|
||||
export interface ReadFieldFunction {
|
||||
<V = StoreValue>(options: ReadFieldOptions): SafeReadonly<V> | undefined;
|
||||
<V = StoreValue>(fieldName: string, from?: StoreObject | Reference): SafeReadonly<V> | undefined;
|
||||
}
|
||||
export type ToReferenceFunction = (objOrIdOrRef: StoreObject | string | Reference, mergeIntoStore?: boolean) => Reference | undefined;
|
||||
export type CanReadFunction = (value: StoreValue) => boolean;
|
||||
declare const _deleteModifier: unique symbol;
|
||||
export interface DeleteModifier {
|
||||
[_deleteModifier]: true;
|
||||
}
|
||||
declare const _invalidateModifier: unique symbol;
|
||||
export interface InvalidateModifier {
|
||||
[_invalidateModifier]: true;
|
||||
}
|
||||
declare const _ignoreModifier: unique symbol;
|
||||
export interface IgnoreModifier {
|
||||
[_ignoreModifier]: true;
|
||||
}
|
||||
export type ModifierDetails = {
|
||||
DELETE: DeleteModifier;
|
||||
INVALIDATE: InvalidateModifier;
|
||||
fieldName: string;
|
||||
storeFieldName: string;
|
||||
readField: ReadFieldFunction;
|
||||
canRead: CanReadFunction;
|
||||
isReference: typeof isReference;
|
||||
toReference: ToReferenceFunction;
|
||||
storage: StorageType;
|
||||
};
|
||||
export type Modifier<T> = (value: T, details: ModifierDetails) => DeepPartial<T> | DeleteModifier | InvalidateModifier | undefined;
|
||||
type StoreObjectValueMaybeReference<StoreVal> = StoreVal extends Array<Record<string, any>> ? StoreVal extends Array<infer Item> ? [
|
||||
Item
|
||||
] extends [Record<string, any>] ? ReadonlyArray<AsStoreObject<Item> | Reference> : never : never : StoreVal extends Record<string, any> ? AsStoreObject<StoreVal> | Reference : StoreVal;
|
||||
export type AllFieldsModifier<Entity extends Record<string, any>> = Modifier<Entity[keyof Entity] extends infer Value ? StoreObjectValueMaybeReference<Exclude<Value, undefined>> : never>;
|
||||
export type Modifiers<T extends Record<string, any> = Record<string, unknown>> = Partial<{
|
||||
[FieldName in keyof T]: Modifier<StoreObjectValueMaybeReference<Exclude<T[FieldName], undefined>>>;
|
||||
}>;
|
||||
export {};
|
||||
//# sourceMappingURL=common.d.ts.map
|
||||
29
node_modules/@apollo/client/cache/core/types/common.js
generated
vendored
Normal file
29
node_modules/@apollo/client/cache/core/types/common.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { __extends } from "tslib";
|
||||
var MissingFieldError = /** @class */ (function (_super) {
|
||||
__extends(MissingFieldError, _super);
|
||||
function MissingFieldError(message, path, query, variables) {
|
||||
var _a;
|
||||
// 'Error' breaks prototype chain here
|
||||
var _this = _super.call(this, message) || this;
|
||||
_this.message = message;
|
||||
_this.path = path;
|
||||
_this.query = query;
|
||||
_this.variables = variables;
|
||||
if (Array.isArray(_this.path)) {
|
||||
_this.missing = _this.message;
|
||||
for (var i = _this.path.length - 1; i >= 0; --i) {
|
||||
_this.missing = (_a = {}, _a[_this.path[i]] = _this.missing, _a);
|
||||
}
|
||||
}
|
||||
else {
|
||||
_this.missing = _this.path;
|
||||
}
|
||||
// We're not using `Object.setPrototypeOf` here as it isn't fully supported
|
||||
// on Android (see issue #3236).
|
||||
_this.__proto__ = MissingFieldError.prototype;
|
||||
return _this;
|
||||
}
|
||||
return MissingFieldError;
|
||||
}(Error));
|
||||
export { MissingFieldError };
|
||||
//# sourceMappingURL=common.js.map
|
||||
1
node_modules/@apollo/client/cache/core/types/common.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/core/types/common.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"common.js","sourceRoot":"","sources":["../../../../src/cache/core/types/common.ts"],"names":[],"mappings":";AA4BA;IAAuC,qCAAK;IAC1C,2BACkB,OAAe,EACf,IAA0C,EAC1C,KAAmB,EACnB,SAA+B;;QAE/C,sCAAsC;QACtC,YAAA,MAAK,YAAC,OAAO,CAAC,SAAC;QANC,aAAO,GAAP,OAAO,CAAQ;QACf,UAAI,GAAJ,IAAI,CAAsC;QAC1C,WAAK,GAAL,KAAK,CAAc;QACnB,eAAS,GAAT,SAAS,CAAsB;QAK/C,IAAI,KAAK,CAAC,OAAO,CAAC,KAAI,CAAC,IAAI,CAAC,EAAE,CAAC;YAC7B,KAAI,CAAC,OAAO,GAAG,KAAI,CAAC,OAAO,CAAC;YAC5B,KAAK,IAAI,CAAC,GAAG,KAAI,CAAC,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC;gBAC/C,KAAI,CAAC,OAAO,aAAK,GAAC,KAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAG,KAAI,CAAC,OAAO,KAAE,CAAC;YAClD,CAAC;QACH,CAAC;aAAM,CAAC;YACN,KAAI,CAAC,OAAO,GAAG,KAAI,CAAC,IAAI,CAAC;QAC3B,CAAC;QAED,2EAA2E;QAC3E,gCAAgC;QAC/B,KAAY,CAAC,SAAS,GAAG,iBAAiB,CAAC,SAAS,CAAC;;IACxD,CAAC;IAGH,wBAAC;AAAD,CAAC,AAzBD,CAAuC,KAAK,GAyB3C","sourcesContent":["import type { DocumentNode, FieldNode } from \"graphql\";\n\nimport type {\n Reference,\n StoreObject,\n StoreValue,\n isReference,\n AsStoreObject,\n DeepPartial,\n} from \"../../../utilities/index.js\";\n\nimport type { StorageType } from \"../../inmemory/policies.js\";\n\n// The Readonly<T> type only really works for object types, since it marks\n// all of the object's properties as readonly, but there are many cases when\n// a generic type parameter like TExisting might be a string or some other\n// primitive type, in which case we need to avoid wrapping it with Readonly.\n// SafeReadonly<string> collapses to just string, which makes string\n// assignable to SafeReadonly<any>, whereas string is not assignable to\n// Readonly<any>, somewhat surprisingly.\nexport type SafeReadonly<T> = T extends object ? Readonly<T> : T;\n\nexport type MissingTree =\n | string\n | {\n readonly [key: string]: MissingTree;\n };\n\nexport class MissingFieldError extends Error {\n constructor(\n public readonly message: string,\n public readonly path: MissingTree | Array<string | number>,\n public readonly query: DocumentNode,\n public readonly variables?: Record<string, any>\n ) {\n // 'Error' breaks prototype chain here\n super(message);\n\n if (Array.isArray(this.path)) {\n this.missing = this.message;\n for (let i = this.path.length - 1; i >= 0; --i) {\n this.missing = { [this.path[i]]: this.missing };\n }\n } else {\n this.missing = this.path;\n }\n\n // We're not using `Object.setPrototypeOf` here as it isn't fully supported\n // on Android (see issue #3236).\n (this as any).__proto__ = MissingFieldError.prototype;\n }\n\n public readonly missing: MissingTree;\n}\n\nexport interface FieldSpecifier {\n typename?: string;\n fieldName: string;\n field?: FieldNode;\n args?: Record<string, any>;\n variables?: Record<string, any>;\n}\n\nexport interface ReadFieldOptions extends FieldSpecifier {\n from?: StoreObject | Reference;\n}\n\nexport interface ReadFieldFunction {\n <V = StoreValue>(options: ReadFieldOptions): SafeReadonly<V> | undefined;\n <V = StoreValue>(\n fieldName: string,\n from?: StoreObject | Reference\n ): SafeReadonly<V> | undefined;\n}\n\nexport type ToReferenceFunction = (\n objOrIdOrRef: StoreObject | string | Reference,\n mergeIntoStore?: boolean\n) => Reference | undefined;\n\nexport type CanReadFunction = (value: StoreValue) => boolean;\n\ndeclare const _deleteModifier: unique symbol;\nexport interface DeleteModifier {\n [_deleteModifier]: true;\n}\ndeclare const _invalidateModifier: unique symbol;\nexport interface InvalidateModifier {\n [_invalidateModifier]: true;\n}\ndeclare const _ignoreModifier: unique symbol;\nexport interface IgnoreModifier {\n [_ignoreModifier]: true;\n}\n\nexport type ModifierDetails = {\n DELETE: DeleteModifier;\n INVALIDATE: InvalidateModifier;\n fieldName: string;\n storeFieldName: string;\n readField: ReadFieldFunction;\n canRead: CanReadFunction;\n isReference: typeof isReference;\n toReference: ToReferenceFunction;\n storage: StorageType;\n};\n\nexport type Modifier<T> = (\n value: T,\n details: ModifierDetails\n) => DeepPartial<T> | DeleteModifier | InvalidateModifier | undefined;\n\ntype StoreObjectValueMaybeReference<StoreVal> =\n StoreVal extends Array<Record<string, any>> ?\n StoreVal extends Array<infer Item> ?\n [Item] extends [Record<string, any>] ?\n ReadonlyArray<AsStoreObject<Item> | Reference>\n : never\n : never\n : StoreVal extends Record<string, any> ? AsStoreObject<StoreVal> | Reference\n : StoreVal;\n\nexport type AllFieldsModifier<Entity extends Record<string, any>> = Modifier<\n Entity[keyof Entity] extends infer Value ?\n StoreObjectValueMaybeReference<Exclude<Value, undefined>>\n : never\n>;\n\nexport type Modifiers<T extends Record<string, any> = Record<string, unknown>> =\n Partial<{\n [FieldName in keyof T]: Modifier<\n StoreObjectValueMaybeReference<Exclude<T[FieldName], undefined>>\n >;\n }>;\n"]}
|
||||
20
node_modules/@apollo/client/cache/index.d.ts
generated
vendored
Normal file
20
node_modules/@apollo/client/cache/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import "../utilities/globals/index.js";
|
||||
export type { Transaction, WatchFragmentOptions, WatchFragmentResult, } from "./core/cache.js";
|
||||
export { ApolloCache } from "./core/cache.js";
|
||||
export { Cache } from "./core/types/Cache.js";
|
||||
export type { DataProxy } from "./core/types/DataProxy.js";
|
||||
export type { MissingTree, Modifier, Modifiers, ModifierDetails, ReadFieldOptions, } from "./core/types/common.js";
|
||||
export { MissingFieldError } from "./core/types/common.js";
|
||||
export type { Reference } from "../utilities/index.js";
|
||||
export { isReference, makeReference, canonicalStringify, } from "../utilities/index.js";
|
||||
export { EntityStore } from "./inmemory/entityStore.js";
|
||||
export { fieldNameFromStoreName, defaultDataIdFromObject, } from "./inmemory/helpers.js";
|
||||
export { InMemoryCache } from "./inmemory/inMemoryCache.js";
|
||||
export type { ReactiveVar } from "./inmemory/reactiveVars.js";
|
||||
export { makeVar, cacheSlot } from "./inmemory/reactiveVars.js";
|
||||
export type { TypePolicies, TypePolicy, FieldPolicy, FieldReadFunction, FieldMergeFunction, FieldFunctionOptions, PossibleTypesMap, } from "./inmemory/policies.js";
|
||||
export { Policies } from "./inmemory/policies.js";
|
||||
export type { FragmentRegistryAPI } from "./inmemory/fragmentRegistry.js";
|
||||
export { createFragmentRegistry } from "./inmemory/fragmentRegistry.js";
|
||||
export type * from "./inmemory/types.js";
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
12
node_modules/@apollo/client/cache/index.js
generated
vendored
Normal file
12
node_modules/@apollo/client/cache/index.js
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
import "../utilities/globals/index.js";
|
||||
export { ApolloCache } from "./core/cache.js";
|
||||
export { Cache } from "./core/types/Cache.js";
|
||||
export { MissingFieldError } from "./core/types/common.js";
|
||||
export { isReference, makeReference, canonicalStringify, } from "../utilities/index.js";
|
||||
export { EntityStore } from "./inmemory/entityStore.js";
|
||||
export { fieldNameFromStoreName, defaultDataIdFromObject, } from "./inmemory/helpers.js";
|
||||
export { InMemoryCache } from "./inmemory/inMemoryCache.js";
|
||||
export { makeVar, cacheSlot } from "./inmemory/reactiveVars.js";
|
||||
export { Policies } from "./inmemory/policies.js";
|
||||
export { createFragmentRegistry } from "./inmemory/fragmentRegistry.js";
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
node_modules/@apollo/client/cache/index.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/cache/index.ts"],"names":[],"mappings":"AAAA,OAAO,+BAA+B,CAAC;AAOvC,OAAO,EAAE,WAAW,EAAE,MAAM,iBAAiB,CAAC;AAC9C,OAAO,EAAE,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAS9C,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAG3D,OAAO,EACL,WAAW,EACX,aAAa,EACb,kBAAkB,GACnB,MAAM,uBAAuB,CAAC;AAE/B,OAAO,EAAE,WAAW,EAAE,MAAM,2BAA2B,CAAC;AACxD,OAAO,EACL,sBAAsB,EACtB,uBAAuB,GACxB,MAAM,uBAAuB,CAAC;AAE/B,OAAO,EAAE,aAAa,EAAE,MAAM,6BAA6B,CAAC;AAG5D,OAAO,EAAE,OAAO,EAAE,SAAS,EAAE,MAAM,4BAA4B,CAAC;AAWhE,OAAO,EAAE,QAAQ,EAAE,MAAM,wBAAwB,CAAC;AAGlD,OAAO,EAAE,sBAAsB,EAAE,MAAM,gCAAgC,CAAC","sourcesContent":["import \"../utilities/globals/index.js\";\n\nexport type {\n Transaction,\n WatchFragmentOptions,\n WatchFragmentResult,\n} from \"./core/cache.js\";\nexport { ApolloCache } from \"./core/cache.js\";\nexport { Cache } from \"./core/types/Cache.js\";\nexport type { DataProxy } from \"./core/types/DataProxy.js\";\nexport type {\n MissingTree,\n Modifier,\n Modifiers,\n ModifierDetails,\n ReadFieldOptions,\n} from \"./core/types/common.js\";\nexport { MissingFieldError } from \"./core/types/common.js\";\n\nexport type { Reference } from \"../utilities/index.js\";\nexport {\n isReference,\n makeReference,\n canonicalStringify,\n} from \"../utilities/index.js\";\n\nexport { EntityStore } from \"./inmemory/entityStore.js\";\nexport {\n fieldNameFromStoreName,\n defaultDataIdFromObject,\n} from \"./inmemory/helpers.js\";\n\nexport { InMemoryCache } from \"./inmemory/inMemoryCache.js\";\n\nexport type { ReactiveVar } from \"./inmemory/reactiveVars.js\";\nexport { makeVar, cacheSlot } from \"./inmemory/reactiveVars.js\";\n\nexport type {\n TypePolicies,\n TypePolicy,\n FieldPolicy,\n FieldReadFunction,\n FieldMergeFunction,\n FieldFunctionOptions,\n PossibleTypesMap,\n} from \"./inmemory/policies.js\";\nexport { Policies } from \"./inmemory/policies.js\";\n\nexport type { FragmentRegistryAPI } from \"./inmemory/fragmentRegistry.js\";\nexport { createFragmentRegistry } from \"./inmemory/fragmentRegistry.js\";\n\nexport type * from \"./inmemory/types.js\";\n"]}
|
||||
92
node_modules/@apollo/client/cache/inmemory/entityStore.d.ts
generated
vendored
Normal file
92
node_modules/@apollo/client/cache/inmemory/entityStore.d.ts
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
import { Trie } from "@wry/trie";
|
||||
import type { StoreValue, StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { NormalizedCache, NormalizedCacheObject } from "./types.js";
|
||||
import type { Policies, StorageType } from "./policies.js";
|
||||
import type { Cache } from "../core/types/Cache.js";
|
||||
import type { SafeReadonly, Modifier, Modifiers, ToReferenceFunction, CanReadFunction } from "../core/types/common.js";
|
||||
import type { DocumentNode, FieldNode, SelectionSetNode } from "graphql";
|
||||
export declare abstract class EntityStore implements NormalizedCache {
|
||||
readonly policies: Policies;
|
||||
readonly group: CacheGroup;
|
||||
protected data: NormalizedCacheObject;
|
||||
constructor(policies: Policies, group: CacheGroup);
|
||||
abstract addLayer(layerId: string, replay: (layer: EntityStore) => any): Layer;
|
||||
abstract removeLayer(layerId: string): EntityStore;
|
||||
toObject(): NormalizedCacheObject;
|
||||
has(dataId: string): boolean;
|
||||
get(dataId: string, fieldName: string): StoreValue;
|
||||
protected lookup(dataId: string, dependOnExistence?: boolean): StoreObject | undefined;
|
||||
merge(older: string | StoreObject, newer: StoreObject | string): void;
|
||||
modify(dataId: string, fields: Modifier<any> | Modifiers<Record<string, any>>): boolean;
|
||||
delete(dataId: string, fieldName?: string, args?: Record<string, any>): boolean;
|
||||
evict(options: Cache.EvictOptions, limit: EntityStore): boolean;
|
||||
clear(): void;
|
||||
extract(): NormalizedCacheObject;
|
||||
replace(newData: NormalizedCacheObject | null): void;
|
||||
abstract getStorage(idOrObj: string | StoreObject, ...storeFieldNames: (string | number)[]): StorageType;
|
||||
private rootIds;
|
||||
retain(rootId: string): number;
|
||||
release(rootId: string): number;
|
||||
getRootIdSet(ids?: Set<string>): Set<string>;
|
||||
gc(): string[];
|
||||
private refs;
|
||||
findChildRefIds(dataId: string): Record<string, true>;
|
||||
/** overload for `InMemoryCache.maybeBroadcastWatch` */
|
||||
makeCacheKey(document: DocumentNode, callback: Cache.WatchCallback<any>, details: string): object;
|
||||
/** overload for `StoreReader.executeSelectionSet` */
|
||||
makeCacheKey(selectionSet: SelectionSetNode, parent: string | StoreObject, varString: string | undefined, canonizeResults: boolean): object;
|
||||
/** overload for `StoreReader.executeSubSelectedArray` */
|
||||
makeCacheKey(field: FieldNode, array: readonly any[], varString: string | undefined): object;
|
||||
/** @deprecated This is only meant for internal usage,
|
||||
* in your own code please use a `Trie` instance instead. */
|
||||
makeCacheKey(...args: any[]): object;
|
||||
getFieldValue: <T = StoreValue>(objectOrReference: StoreObject | Reference | undefined, storeFieldName: string) => SafeReadonly<T>;
|
||||
canRead: CanReadFunction;
|
||||
toReference: ToReferenceFunction;
|
||||
}
|
||||
export type FieldValueGetter = EntityStore["getFieldValue"];
|
||||
declare class CacheGroup {
|
||||
readonly caching: boolean;
|
||||
private parent;
|
||||
private d;
|
||||
keyMaker: Trie<object>;
|
||||
constructor(caching: boolean, parent?: CacheGroup | null);
|
||||
resetCaching(): void;
|
||||
depend(dataId: string, storeFieldName: string): void;
|
||||
dirty(dataId: string, storeFieldName: string): void;
|
||||
}
|
||||
export declare function maybeDependOnExistenceOfEntity(store: NormalizedCache, entityId: string): void;
|
||||
export declare namespace EntityStore {
|
||||
class Root extends EntityStore {
|
||||
constructor({ policies, resultCaching, seed, }: {
|
||||
policies: Policies;
|
||||
resultCaching?: boolean;
|
||||
seed?: NormalizedCacheObject;
|
||||
});
|
||||
readonly stump: Stump;
|
||||
addLayer(layerId: string, replay: (layer: EntityStore) => any): Layer;
|
||||
removeLayer(): Root;
|
||||
readonly storageTrie: Trie<StorageType>;
|
||||
getStorage(): StorageType;
|
||||
}
|
||||
}
|
||||
declare class Layer extends EntityStore {
|
||||
readonly id: string;
|
||||
readonly parent: EntityStore;
|
||||
readonly replay: (layer: EntityStore) => any;
|
||||
readonly group: CacheGroup;
|
||||
constructor(id: string, parent: EntityStore, replay: (layer: EntityStore) => any, group: CacheGroup);
|
||||
addLayer(layerId: string, replay: (layer: EntityStore) => any): Layer;
|
||||
removeLayer(layerId: string): EntityStore;
|
||||
toObject(): NormalizedCacheObject;
|
||||
findChildRefIds(dataId: string): Record<string, true>;
|
||||
getStorage(): StorageType;
|
||||
}
|
||||
declare class Stump extends Layer {
|
||||
constructor(root: EntityStore.Root);
|
||||
removeLayer(): this;
|
||||
merge(older: string | StoreObject, newer: string | StoreObject): void;
|
||||
}
|
||||
export declare function supportsResultCaching(store: any): store is EntityStore;
|
||||
export {};
|
||||
//# sourceMappingURL=entityStore.d.ts.map
|
||||
671
node_modules/@apollo/client/cache/inmemory/entityStore.js
generated
vendored
Normal file
671
node_modules/@apollo/client/cache/inmemory/entityStore.js
generated
vendored
Normal file
@@ -0,0 +1,671 @@
|
||||
import { __assign, __extends, __rest } from "tslib";
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
import { dep } from "optimism";
|
||||
import { equal } from "@wry/equality";
|
||||
import { Trie } from "@wry/trie";
|
||||
import { isReference, makeReference, DeepMerger, maybeDeepFreeze, canUseWeakMap, isNonNullObject, } from "../../utilities/index.js";
|
||||
import { hasOwn, fieldNameFromStoreName } from "./helpers.js";
|
||||
var DELETE = Object.create(null);
|
||||
var delModifier = function () { return DELETE; };
|
||||
var INVALIDATE = Object.create(null);
|
||||
var EntityStore = /** @class */ (function () {
|
||||
function EntityStore(policies, group) {
|
||||
var _this = this;
|
||||
this.policies = policies;
|
||||
this.group = group;
|
||||
this.data = Object.create(null);
|
||||
// Maps root entity IDs to the number of times they have been retained, minus
|
||||
// the number of times they have been released. Retained entities keep other
|
||||
// entities they reference (even indirectly) from being garbage collected.
|
||||
this.rootIds = Object.create(null);
|
||||
// Lazily tracks { __ref: <dataId> } strings contained by this.data[dataId].
|
||||
this.refs = Object.create(null);
|
||||
// Bound function that can be passed around to provide easy access to fields
|
||||
// of Reference objects as well as ordinary objects.
|
||||
this.getFieldValue = function (objectOrReference, storeFieldName) {
|
||||
return maybeDeepFreeze(isReference(objectOrReference) ?
|
||||
_this.get(objectOrReference.__ref, storeFieldName)
|
||||
: objectOrReference && objectOrReference[storeFieldName]);
|
||||
};
|
||||
// Returns true for non-normalized StoreObjects and non-dangling
|
||||
// References, indicating that readField(name, objOrRef) has a chance of
|
||||
// working. Useful for filtering out dangling references from lists.
|
||||
this.canRead = function (objOrRef) {
|
||||
return isReference(objOrRef) ?
|
||||
_this.has(objOrRef.__ref)
|
||||
: typeof objOrRef === "object";
|
||||
};
|
||||
// Bound function that converts an id or an object with a __typename and
|
||||
// primary key fields to a Reference object. If called with a Reference object,
|
||||
// that same Reference object is returned. Pass true for mergeIntoStore to persist
|
||||
// an object into the store.
|
||||
this.toReference = function (objOrIdOrRef, mergeIntoStore) {
|
||||
if (typeof objOrIdOrRef === "string") {
|
||||
return makeReference(objOrIdOrRef);
|
||||
}
|
||||
if (isReference(objOrIdOrRef)) {
|
||||
return objOrIdOrRef;
|
||||
}
|
||||
var id = _this.policies.identify(objOrIdOrRef)[0];
|
||||
if (id) {
|
||||
var ref = makeReference(id);
|
||||
if (mergeIntoStore) {
|
||||
_this.merge(id, objOrIdOrRef);
|
||||
}
|
||||
return ref;
|
||||
}
|
||||
};
|
||||
}
|
||||
// Although the EntityStore class is abstract, it contains concrete
|
||||
// implementations of the various NormalizedCache interface methods that
|
||||
// are inherited by the Root and Layer subclasses.
|
||||
EntityStore.prototype.toObject = function () {
|
||||
return __assign({}, this.data);
|
||||
};
|
||||
EntityStore.prototype.has = function (dataId) {
|
||||
return this.lookup(dataId, true) !== void 0;
|
||||
};
|
||||
EntityStore.prototype.get = function (dataId, fieldName) {
|
||||
this.group.depend(dataId, fieldName);
|
||||
if (hasOwn.call(this.data, dataId)) {
|
||||
var storeObject = this.data[dataId];
|
||||
if (storeObject && hasOwn.call(storeObject, fieldName)) {
|
||||
return storeObject[fieldName];
|
||||
}
|
||||
}
|
||||
if (fieldName === "__typename" &&
|
||||
hasOwn.call(this.policies.rootTypenamesById, dataId)) {
|
||||
return this.policies.rootTypenamesById[dataId];
|
||||
}
|
||||
if (this instanceof Layer) {
|
||||
return this.parent.get(dataId, fieldName);
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.lookup = function (dataId, dependOnExistence) {
|
||||
// The has method (above) calls lookup with dependOnExistence = true, so
|
||||
// that it can later be invalidated when we add or remove a StoreObject for
|
||||
// this dataId. Any consumer who cares about the contents of the StoreObject
|
||||
// should not rely on this dependency, since the contents could change
|
||||
// without the object being added or removed.
|
||||
if (dependOnExistence)
|
||||
this.group.depend(dataId, "__exists");
|
||||
if (hasOwn.call(this.data, dataId)) {
|
||||
return this.data[dataId];
|
||||
}
|
||||
if (this instanceof Layer) {
|
||||
return this.parent.lookup(dataId, dependOnExistence);
|
||||
}
|
||||
if (this.policies.rootTypenamesById[dataId]) {
|
||||
return Object.create(null);
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.merge = function (older, newer) {
|
||||
var _this = this;
|
||||
var dataId;
|
||||
// Convert unexpected references to ID strings.
|
||||
if (isReference(older))
|
||||
older = older.__ref;
|
||||
if (isReference(newer))
|
||||
newer = newer.__ref;
|
||||
var existing = typeof older === "string" ? this.lookup((dataId = older)) : older;
|
||||
var incoming = typeof newer === "string" ? this.lookup((dataId = newer)) : newer;
|
||||
// If newer was a string ID, but that ID was not defined in this store,
|
||||
// then there are no fields to be merged, so we're done.
|
||||
if (!incoming)
|
||||
return;
|
||||
invariant(typeof dataId === "string", 2);
|
||||
var merged = new DeepMerger(storeObjectReconciler).merge(existing, incoming);
|
||||
// Even if merged === existing, existing may have come from a lower
|
||||
// layer, so we always need to set this.data[dataId] on this level.
|
||||
this.data[dataId] = merged;
|
||||
if (merged !== existing) {
|
||||
delete this.refs[dataId];
|
||||
if (this.group.caching) {
|
||||
var fieldsToDirty_1 = Object.create(null);
|
||||
// If we added a new StoreObject where there was previously none, dirty
|
||||
// anything that depended on the existence of this dataId, such as the
|
||||
// EntityStore#has method.
|
||||
if (!existing)
|
||||
fieldsToDirty_1.__exists = 1;
|
||||
// Now invalidate dependents who called getFieldValue for any fields
|
||||
// that are changing as a result of this merge.
|
||||
Object.keys(incoming).forEach(function (storeFieldName) {
|
||||
if (!existing ||
|
||||
existing[storeFieldName] !== merged[storeFieldName]) {
|
||||
// Always dirty the full storeFieldName, which may include
|
||||
// serialized arguments following the fieldName prefix.
|
||||
fieldsToDirty_1[storeFieldName] = 1;
|
||||
// Also dirty fieldNameFromStoreName(storeFieldName) if it's
|
||||
// different from storeFieldName and this field does not have
|
||||
// keyArgs configured, because that means the cache can't make
|
||||
// any assumptions about how field values with the same field
|
||||
// name but different arguments might be interrelated, so it
|
||||
// must err on the side of invalidating all field values that
|
||||
// share the same short fieldName, regardless of arguments.
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
if (fieldName !== storeFieldName &&
|
||||
!_this.policies.hasKeyArgs(merged.__typename, fieldName)) {
|
||||
fieldsToDirty_1[fieldName] = 1;
|
||||
}
|
||||
// If merged[storeFieldName] has become undefined, and this is the
|
||||
// Root layer, actually delete the property from the merged object,
|
||||
// which is guaranteed to have been created fresh in this method.
|
||||
if (merged[storeFieldName] === void 0 && !(_this instanceof Layer)) {
|
||||
delete merged[storeFieldName];
|
||||
}
|
||||
}
|
||||
});
|
||||
if (fieldsToDirty_1.__typename &&
|
||||
!(existing && existing.__typename) &&
|
||||
// Since we return default root __typename strings
|
||||
// automatically from store.get, we don't need to dirty the
|
||||
// ROOT_QUERY.__typename field if merged.__typename is equal
|
||||
// to the default string (usually "Query").
|
||||
this.policies.rootTypenamesById[dataId] === merged.__typename) {
|
||||
delete fieldsToDirty_1.__typename;
|
||||
}
|
||||
Object.keys(fieldsToDirty_1).forEach(function (fieldName) {
|
||||
return _this.group.dirty(dataId, fieldName);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.modify = function (dataId, fields) {
|
||||
var _this = this;
|
||||
var storeObject = this.lookup(dataId);
|
||||
if (storeObject) {
|
||||
var changedFields_1 = Object.create(null);
|
||||
var needToMerge_1 = false;
|
||||
var allDeleted_1 = true;
|
||||
var sharedDetails_1 = {
|
||||
DELETE: DELETE,
|
||||
INVALIDATE: INVALIDATE,
|
||||
isReference: isReference,
|
||||
toReference: this.toReference,
|
||||
canRead: this.canRead,
|
||||
readField: function (fieldNameOrOptions, from) {
|
||||
return _this.policies.readField(typeof fieldNameOrOptions === "string" ?
|
||||
{
|
||||
fieldName: fieldNameOrOptions,
|
||||
from: from || makeReference(dataId),
|
||||
}
|
||||
: fieldNameOrOptions, { store: _this });
|
||||
},
|
||||
};
|
||||
Object.keys(storeObject).forEach(function (storeFieldName) {
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var fieldValue = storeObject[storeFieldName];
|
||||
if (fieldValue === void 0)
|
||||
return;
|
||||
var modify = typeof fields === "function" ? fields : (fields[storeFieldName] || fields[fieldName]);
|
||||
if (modify) {
|
||||
var newValue = modify === delModifier ? DELETE : (modify(maybeDeepFreeze(fieldValue), __assign(__assign({}, sharedDetails_1), { fieldName: fieldName, storeFieldName: storeFieldName, storage: _this.getStorage(dataId, storeFieldName) })));
|
||||
if (newValue === INVALIDATE) {
|
||||
_this.group.dirty(dataId, storeFieldName);
|
||||
}
|
||||
else {
|
||||
if (newValue === DELETE)
|
||||
newValue = void 0;
|
||||
if (newValue !== fieldValue) {
|
||||
changedFields_1[storeFieldName] = newValue;
|
||||
needToMerge_1 = true;
|
||||
fieldValue = newValue;
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
var checkReference = function (ref) {
|
||||
if (_this.lookup(ref.__ref) === undefined) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(3, ref);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
if (isReference(newValue)) {
|
||||
checkReference(newValue);
|
||||
}
|
||||
else if (Array.isArray(newValue)) {
|
||||
// Warn about writing "mixed" arrays of Reference and non-Reference objects
|
||||
var seenReference = false;
|
||||
var someNonReference = void 0;
|
||||
for (var _i = 0, newValue_1 = newValue; _i < newValue_1.length; _i++) {
|
||||
var value = newValue_1[_i];
|
||||
if (isReference(value)) {
|
||||
seenReference = true;
|
||||
if (checkReference(value))
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// Do not warn on primitive values, since those could never be represented
|
||||
// by a reference. This is a valid (albeit uncommon) use case.
|
||||
if (typeof value === "object" && !!value) {
|
||||
var id = _this.policies.identify(value)[0];
|
||||
// check if object could even be referenced, otherwise we are not interested in it for this warning
|
||||
if (id) {
|
||||
someNonReference = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (seenReference && someNonReference !== undefined) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(4, someNonReference);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (fieldValue !== void 0) {
|
||||
allDeleted_1 = false;
|
||||
}
|
||||
});
|
||||
if (needToMerge_1) {
|
||||
this.merge(dataId, changedFields_1);
|
||||
if (allDeleted_1) {
|
||||
if (this instanceof Layer) {
|
||||
this.data[dataId] = void 0;
|
||||
}
|
||||
else {
|
||||
delete this.data[dataId];
|
||||
}
|
||||
this.group.dirty(dataId, "__exists");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
// If called with only one argument, removes the entire entity
|
||||
// identified by dataId. If called with a fieldName as well, removes all
|
||||
// fields of that entity whose names match fieldName according to the
|
||||
// fieldNameFromStoreName helper function. If called with a fieldName
|
||||
// and variables, removes all fields of that entity whose names match fieldName
|
||||
// and whose arguments when cached exactly match the variables passed.
|
||||
EntityStore.prototype.delete = function (dataId, fieldName, args) {
|
||||
var _a;
|
||||
var storeObject = this.lookup(dataId);
|
||||
if (storeObject) {
|
||||
var typename = this.getFieldValue(storeObject, "__typename");
|
||||
var storeFieldName = fieldName && args ?
|
||||
this.policies.getStoreFieldName({ typename: typename, fieldName: fieldName, args: args })
|
||||
: fieldName;
|
||||
return this.modify(dataId, storeFieldName ? (_a = {},
|
||||
_a[storeFieldName] = delModifier,
|
||||
_a) : delModifier);
|
||||
}
|
||||
return false;
|
||||
};
|
||||
EntityStore.prototype.evict = function (options, limit) {
|
||||
var evicted = false;
|
||||
if (options.id) {
|
||||
if (hasOwn.call(this.data, options.id)) {
|
||||
evicted = this.delete(options.id, options.fieldName, options.args);
|
||||
}
|
||||
if (this instanceof Layer && this !== limit) {
|
||||
evicted = this.parent.evict(options, limit) || evicted;
|
||||
}
|
||||
// Always invalidate the field to trigger rereading of watched
|
||||
// queries, even if no cache data was modified by the eviction,
|
||||
// because queries may depend on computed fields with custom read
|
||||
// functions, whose values are not stored in the EntityStore.
|
||||
if (options.fieldName || evicted) {
|
||||
this.group.dirty(options.id, options.fieldName || "__exists");
|
||||
}
|
||||
}
|
||||
return evicted;
|
||||
};
|
||||
EntityStore.prototype.clear = function () {
|
||||
this.replace(null);
|
||||
};
|
||||
EntityStore.prototype.extract = function () {
|
||||
var _this = this;
|
||||
var obj = this.toObject();
|
||||
var extraRootIds = [];
|
||||
this.getRootIdSet().forEach(function (id) {
|
||||
if (!hasOwn.call(_this.policies.rootTypenamesById, id)) {
|
||||
extraRootIds.push(id);
|
||||
}
|
||||
});
|
||||
if (extraRootIds.length) {
|
||||
obj.__META = { extraRootIds: extraRootIds.sort() };
|
||||
}
|
||||
return obj;
|
||||
};
|
||||
EntityStore.prototype.replace = function (newData) {
|
||||
var _this = this;
|
||||
Object.keys(this.data).forEach(function (dataId) {
|
||||
if (!(newData && hasOwn.call(newData, dataId))) {
|
||||
_this.delete(dataId);
|
||||
}
|
||||
});
|
||||
if (newData) {
|
||||
var __META = newData.__META, rest_1 = __rest(newData, ["__META"]);
|
||||
Object.keys(rest_1).forEach(function (dataId) {
|
||||
_this.merge(dataId, rest_1[dataId]);
|
||||
});
|
||||
if (__META) {
|
||||
__META.extraRootIds.forEach(this.retain, this);
|
||||
}
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.retain = function (rootId) {
|
||||
return (this.rootIds[rootId] = (this.rootIds[rootId] || 0) + 1);
|
||||
};
|
||||
EntityStore.prototype.release = function (rootId) {
|
||||
if (this.rootIds[rootId] > 0) {
|
||||
var count = --this.rootIds[rootId];
|
||||
if (!count)
|
||||
delete this.rootIds[rootId];
|
||||
return count;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
// Return a Set<string> of all the ID strings that have been retained by
|
||||
// this layer/root *and* any layers/roots beneath it.
|
||||
EntityStore.prototype.getRootIdSet = function (ids) {
|
||||
if (ids === void 0) { ids = new Set(); }
|
||||
Object.keys(this.rootIds).forEach(ids.add, ids);
|
||||
if (this instanceof Layer) {
|
||||
this.parent.getRootIdSet(ids);
|
||||
}
|
||||
else {
|
||||
// Official singleton IDs like ROOT_QUERY and ROOT_MUTATION are
|
||||
// always considered roots for garbage collection, regardless of
|
||||
// their retainment counts in this.rootIds.
|
||||
Object.keys(this.policies.rootTypenamesById).forEach(ids.add, ids);
|
||||
}
|
||||
return ids;
|
||||
};
|
||||
// The goal of garbage collection is to remove IDs from the Root layer of the
|
||||
// store that are no longer reachable starting from any IDs that have been
|
||||
// explicitly retained (see retain and release, above). Returns an array of
|
||||
// dataId strings that were removed from the store.
|
||||
EntityStore.prototype.gc = function () {
|
||||
var _this = this;
|
||||
var ids = this.getRootIdSet();
|
||||
var snapshot = this.toObject();
|
||||
ids.forEach(function (id) {
|
||||
if (hasOwn.call(snapshot, id)) {
|
||||
// Because we are iterating over an ECMAScript Set, the IDs we add here
|
||||
// will be visited in later iterations of the forEach loop only if they
|
||||
// were not previously contained by the Set.
|
||||
Object.keys(_this.findChildRefIds(id)).forEach(ids.add, ids);
|
||||
// By removing IDs from the snapshot object here, we protect them from
|
||||
// getting removed from the root store layer below.
|
||||
delete snapshot[id];
|
||||
}
|
||||
});
|
||||
var idsToRemove = Object.keys(snapshot);
|
||||
if (idsToRemove.length) {
|
||||
var root_1 = this;
|
||||
while (root_1 instanceof Layer)
|
||||
root_1 = root_1.parent;
|
||||
idsToRemove.forEach(function (id) { return root_1.delete(id); });
|
||||
}
|
||||
return idsToRemove;
|
||||
};
|
||||
EntityStore.prototype.findChildRefIds = function (dataId) {
|
||||
if (!hasOwn.call(this.refs, dataId)) {
|
||||
var found_1 = (this.refs[dataId] = Object.create(null));
|
||||
var root = this.data[dataId];
|
||||
if (!root)
|
||||
return found_1;
|
||||
var workSet_1 = new Set([root]);
|
||||
// Within the store, only arrays and objects can contain child entity
|
||||
// references, so we can prune the traversal using this predicate:
|
||||
workSet_1.forEach(function (obj) {
|
||||
if (isReference(obj)) {
|
||||
found_1[obj.__ref] = true;
|
||||
// In rare cases, a { __ref } Reference object may have other fields.
|
||||
// This often indicates a mismerging of References with StoreObjects,
|
||||
// but garbage collection should not be fooled by a stray __ref
|
||||
// property in a StoreObject (ignoring all the other fields just
|
||||
// because the StoreObject looks like a Reference). To avoid this
|
||||
// premature termination of findChildRefIds recursion, we fall through
|
||||
// to the code below, which will handle any other properties of obj.
|
||||
}
|
||||
if (isNonNullObject(obj)) {
|
||||
Object.keys(obj).forEach(function (key) {
|
||||
var child = obj[key];
|
||||
// No need to add primitive values to the workSet, since they cannot
|
||||
// contain reference objects.
|
||||
if (isNonNullObject(child)) {
|
||||
workSet_1.add(child);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return this.refs[dataId];
|
||||
};
|
||||
EntityStore.prototype.makeCacheKey = function () {
|
||||
return this.group.keyMaker.lookupArray(arguments);
|
||||
};
|
||||
return EntityStore;
|
||||
}());
|
||||
export { EntityStore };
|
||||
// A single CacheGroup represents a set of one or more EntityStore objects,
|
||||
// typically the Root store in a CacheGroup by itself, and all active Layer
|
||||
// stores in a group together. A single EntityStore object belongs to only
|
||||
// one CacheGroup, store.group. The CacheGroup is responsible for tracking
|
||||
// dependencies, so store.group is helpful for generating unique keys for
|
||||
// cached results that need to be invalidated when/if those dependencies
|
||||
// change. If we used the EntityStore objects themselves as cache keys (that
|
||||
// is, store rather than store.group), the cache would become unnecessarily
|
||||
// fragmented by all the different Layer objects. Instead, the CacheGroup
|
||||
// approach allows all optimistic Layer objects in the same linked list to
|
||||
// belong to one CacheGroup, with the non-optimistic Root object belonging
|
||||
// to another CacheGroup, allowing resultCaching dependencies to be tracked
|
||||
// separately for optimistic and non-optimistic entity data.
|
||||
var CacheGroup = /** @class */ (function () {
|
||||
function CacheGroup(caching, parent) {
|
||||
if (parent === void 0) { parent = null; }
|
||||
this.caching = caching;
|
||||
this.parent = parent;
|
||||
this.d = null;
|
||||
this.resetCaching();
|
||||
}
|
||||
CacheGroup.prototype.resetCaching = function () {
|
||||
this.d = this.caching ? dep() : null;
|
||||
this.keyMaker = new Trie(canUseWeakMap);
|
||||
};
|
||||
CacheGroup.prototype.depend = function (dataId, storeFieldName) {
|
||||
if (this.d) {
|
||||
this.d(makeDepKey(dataId, storeFieldName));
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
if (fieldName !== storeFieldName) {
|
||||
// Fields with arguments that contribute extra identifying
|
||||
// information to the fieldName (thus forming the storeFieldName)
|
||||
// depend not only on the full storeFieldName but also on the
|
||||
// short fieldName, so the field can be invalidated using either
|
||||
// level of specificity.
|
||||
this.d(makeDepKey(dataId, fieldName));
|
||||
}
|
||||
if (this.parent) {
|
||||
this.parent.depend(dataId, storeFieldName);
|
||||
}
|
||||
}
|
||||
};
|
||||
CacheGroup.prototype.dirty = function (dataId, storeFieldName) {
|
||||
if (this.d) {
|
||||
this.d.dirty(makeDepKey(dataId, storeFieldName),
|
||||
// When storeFieldName === "__exists", that means the entity identified
|
||||
// by dataId has either disappeared from the cache or was newly added,
|
||||
// so the result caching system would do well to "forget everything it
|
||||
// knows" about that object. To achieve that kind of invalidation, we
|
||||
// not only dirty the associated result cache entry, but also remove it
|
||||
// completely from the dependency graph. For the optimism implementation
|
||||
// details, see https://github.com/benjamn/optimism/pull/195.
|
||||
storeFieldName === "__exists" ? "forget" : "setDirty");
|
||||
}
|
||||
};
|
||||
return CacheGroup;
|
||||
}());
|
||||
function makeDepKey(dataId, storeFieldName) {
|
||||
// Since field names cannot have '#' characters in them, this method
|
||||
// of joining the field name and the ID should be unambiguous, and much
|
||||
// cheaper than JSON.stringify([dataId, fieldName]).
|
||||
return storeFieldName + "#" + dataId;
|
||||
}
|
||||
export function maybeDependOnExistenceOfEntity(store, entityId) {
|
||||
if (supportsResultCaching(store)) {
|
||||
// We use this pseudo-field __exists elsewhere in the EntityStore code to
|
||||
// represent changes in the existence of the entity object identified by
|
||||
// entityId. This dependency gets reliably dirtied whenever an object with
|
||||
// this ID is deleted (or newly created) within this group, so any result
|
||||
// cache entries (for example, StoreReader#executeSelectionSet results) that
|
||||
// depend on __exists for this entityId will get dirtied as well, leading to
|
||||
// the eventual recomputation (instead of reuse) of those result objects the
|
||||
// next time someone reads them from the cache.
|
||||
store.group.depend(entityId, "__exists");
|
||||
}
|
||||
}
|
||||
(function (EntityStore) {
|
||||
// Refer to this class as EntityStore.Root outside this namespace.
|
||||
var Root = /** @class */ (function (_super) {
|
||||
__extends(Root, _super);
|
||||
function Root(_a) {
|
||||
var policies = _a.policies, _b = _a.resultCaching, resultCaching = _b === void 0 ? true : _b, seed = _a.seed;
|
||||
var _this = _super.call(this, policies, new CacheGroup(resultCaching)) || this;
|
||||
_this.stump = new Stump(_this);
|
||||
_this.storageTrie = new Trie(canUseWeakMap);
|
||||
if (seed)
|
||||
_this.replace(seed);
|
||||
return _this;
|
||||
}
|
||||
Root.prototype.addLayer = function (layerId, replay) {
|
||||
// Adding an optimistic Layer on top of the Root actually adds the Layer
|
||||
// on top of the Stump, so the Stump always comes between the Root and
|
||||
// any Layer objects that we've added.
|
||||
return this.stump.addLayer(layerId, replay);
|
||||
};
|
||||
Root.prototype.removeLayer = function () {
|
||||
// Never remove the root layer.
|
||||
return this;
|
||||
};
|
||||
Root.prototype.getStorage = function () {
|
||||
return this.storageTrie.lookupArray(arguments);
|
||||
};
|
||||
return Root;
|
||||
}(EntityStore));
|
||||
EntityStore.Root = Root;
|
||||
})(EntityStore || (EntityStore = {}));
|
||||
// Not exported, since all Layer instances are created by the addLayer method
|
||||
// of the EntityStore.Root class.
|
||||
var Layer = /** @class */ (function (_super) {
|
||||
__extends(Layer, _super);
|
||||
function Layer(id, parent, replay, group) {
|
||||
var _this = _super.call(this, parent.policies, group) || this;
|
||||
_this.id = id;
|
||||
_this.parent = parent;
|
||||
_this.replay = replay;
|
||||
_this.group = group;
|
||||
replay(_this);
|
||||
return _this;
|
||||
}
|
||||
Layer.prototype.addLayer = function (layerId, replay) {
|
||||
return new Layer(layerId, this, replay, this.group);
|
||||
};
|
||||
Layer.prototype.removeLayer = function (layerId) {
|
||||
var _this = this;
|
||||
// Remove all instances of the given id, not just the first one.
|
||||
var parent = this.parent.removeLayer(layerId);
|
||||
if (layerId === this.id) {
|
||||
if (this.group.caching) {
|
||||
// Dirty every ID we're removing. Technically we might be able to avoid
|
||||
// dirtying fields that have values in higher layers, but we don't have
|
||||
// easy access to higher layers here, and we're about to recreate those
|
||||
// layers anyway (see parent.addLayer below).
|
||||
Object.keys(this.data).forEach(function (dataId) {
|
||||
var ownStoreObject = _this.data[dataId];
|
||||
var parentStoreObject = parent["lookup"](dataId);
|
||||
if (!parentStoreObject) {
|
||||
// The StoreObject identified by dataId was defined in this layer
|
||||
// but will be undefined in the parent layer, so we can delete the
|
||||
// whole entity using this.delete(dataId). Since we're about to
|
||||
// throw this layer away, the only goal of this deletion is to dirty
|
||||
// the removed fields.
|
||||
_this.delete(dataId);
|
||||
}
|
||||
else if (!ownStoreObject) {
|
||||
// This layer had an entry for dataId but it was undefined, which
|
||||
// means the entity was deleted in this layer, and it's about to
|
||||
// become undeleted when we remove this layer, so we need to dirty
|
||||
// all fields that are about to be reexposed.
|
||||
_this.group.dirty(dataId, "__exists");
|
||||
Object.keys(parentStoreObject).forEach(function (storeFieldName) {
|
||||
_this.group.dirty(dataId, storeFieldName);
|
||||
});
|
||||
}
|
||||
else if (ownStoreObject !== parentStoreObject) {
|
||||
// If ownStoreObject is not exactly the same as parentStoreObject,
|
||||
// dirty any fields whose values will change as a result of this
|
||||
// removal.
|
||||
Object.keys(ownStoreObject).forEach(function (storeFieldName) {
|
||||
if (!equal(ownStoreObject[storeFieldName], parentStoreObject[storeFieldName])) {
|
||||
_this.group.dirty(dataId, storeFieldName);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return parent;
|
||||
}
|
||||
// No changes are necessary if the parent chain remains identical.
|
||||
if (parent === this.parent)
|
||||
return this;
|
||||
// Recreate this layer on top of the new parent.
|
||||
return parent.addLayer(this.id, this.replay);
|
||||
};
|
||||
Layer.prototype.toObject = function () {
|
||||
return __assign(__assign({}, this.parent.toObject()), this.data);
|
||||
};
|
||||
Layer.prototype.findChildRefIds = function (dataId) {
|
||||
var fromParent = this.parent.findChildRefIds(dataId);
|
||||
return hasOwn.call(this.data, dataId) ? __assign(__assign({}, fromParent), _super.prototype.findChildRefIds.call(this, dataId)) : fromParent;
|
||||
};
|
||||
Layer.prototype.getStorage = function () {
|
||||
var p = this.parent;
|
||||
while (p.parent)
|
||||
p = p.parent;
|
||||
return p.getStorage.apply(p,
|
||||
// @ts-expect-error
|
||||
arguments);
|
||||
};
|
||||
return Layer;
|
||||
}(EntityStore));
|
||||
// Represents a Layer permanently installed just above the Root, which allows
|
||||
// reading optimistically (and registering optimistic dependencies) even when
|
||||
// no optimistic layers are currently active. The stump.group CacheGroup object
|
||||
// is shared by any/all Layer objects added on top of the Stump.
|
||||
var Stump = /** @class */ (function (_super) {
|
||||
__extends(Stump, _super);
|
||||
function Stump(root) {
|
||||
return _super.call(this, "EntityStore.Stump", root, function () { }, new CacheGroup(root.group.caching, root.group)) || this;
|
||||
}
|
||||
Stump.prototype.removeLayer = function () {
|
||||
// Never remove the Stump layer.
|
||||
return this;
|
||||
};
|
||||
Stump.prototype.merge = function (older, newer) {
|
||||
// We never want to write any data into the Stump, so we forward any merge
|
||||
// calls to the Root instead. Another option here would be to throw an
|
||||
// exception, but the toReference(object, true) function can sometimes
|
||||
// trigger Stump writes (which used to be Root writes, before the Stump
|
||||
// concept was introduced).
|
||||
return this.parent.merge(older, newer);
|
||||
};
|
||||
return Stump;
|
||||
}(Layer));
|
||||
function storeObjectReconciler(existingObject, incomingObject, property) {
|
||||
var existingValue = existingObject[property];
|
||||
var incomingValue = incomingObject[property];
|
||||
// Wherever there is a key collision, prefer the incoming value, unless
|
||||
// it is deeply equal to the existing value. It's worth checking deep
|
||||
// equality here (even though blindly returning incoming would be
|
||||
// logically correct) because preserving the referential identity of
|
||||
// existing data can prevent needless rereading and rerendering.
|
||||
return equal(existingValue, incomingValue) ? existingValue : incomingValue;
|
||||
}
|
||||
export function supportsResultCaching(store) {
|
||||
// When result caching is disabled, store.depend will be null.
|
||||
return !!(store instanceof EntityStore && store.group.caching);
|
||||
}
|
||||
//# sourceMappingURL=entityStore.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/entityStore.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/entityStore.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
node_modules/@apollo/client/cache/inmemory/fixPolyfills.d.ts
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/fixPolyfills.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
//# sourceMappingURL=fixPolyfills.d.ts.map
|
||||
10
node_modules/@apollo/client/cache/inmemory/fixPolyfills.js
generated
vendored
Normal file
10
node_modules/@apollo/client/cache/inmemory/fixPolyfills.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
"use strict";
|
||||
// Most JavaScript environments do not need the workarounds implemented in
|
||||
// fixPolyfills.native.ts, so importing fixPolyfills.ts merely imports
|
||||
// this empty module, adding nothing to bundle sizes or execution times.
|
||||
// When bundling for React Native, we substitute fixPolyfills.native.js
|
||||
// for fixPolyfills.js (see the "react-native" section of package.json),
|
||||
// to work around problems with Map and Set polyfills in older versions of
|
||||
// React Native (which should have been fixed in react-native@0.59.0):
|
||||
// https://github.com/apollographql/apollo-client/pull/5962
|
||||
//# sourceMappingURL=fixPolyfills.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/fixPolyfills.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/fixPolyfills.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"fixPolyfills.js","sourceRoot":"","sources":["../../../src/cache/inmemory/fixPolyfills.ts"],"names":[],"mappings":";AAAA,0EAA0E;AAC1E,sEAAsE;AACtE,wEAAwE;AACxE,uEAAuE;AACvE,wEAAwE;AACxE,0EAA0E;AAC1E,sEAAsE;AACtE,2DAA2D","sourcesContent":["// Most JavaScript environments do not need the workarounds implemented in\n// fixPolyfills.native.ts, so importing fixPolyfills.ts merely imports\n// this empty module, adding nothing to bundle sizes or execution times.\n// When bundling for React Native, we substitute fixPolyfills.native.js\n// for fixPolyfills.js (see the \"react-native\" section of package.json),\n// to work around problems with Map and Set polyfills in older versions of\n// React Native (which should have been fixed in react-native@0.59.0):\n// https://github.com/apollographql/apollo-client/pull/5962\n"]}
|
||||
2
node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.d.ts
generated
vendored
Normal file
2
node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=fixPolyfills.native.d.ts.map
|
||||
61
node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js
generated
vendored
Normal file
61
node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Make sure Map.prototype.set returns the Map instance, per spec.
|
||||
// https://github.com/apollographql/apollo-client/issues/4024
|
||||
var testMap = new Map();
|
||||
if (testMap.set(1, 2) !== testMap) {
|
||||
var set_1 = testMap.set;
|
||||
Map.prototype.set = function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
set_1.apply(this, args);
|
||||
return this;
|
||||
};
|
||||
}
|
||||
// Make sure Set.prototype.add returns the Set instance, per spec.
|
||||
var testSet = new Set();
|
||||
if (testSet.add(3) !== testSet) {
|
||||
var add_1 = testSet.add;
|
||||
Set.prototype.add = function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
add_1.apply(this, args);
|
||||
return this;
|
||||
};
|
||||
}
|
||||
var frozen = {};
|
||||
if (typeof Object.freeze === "function") {
|
||||
Object.freeze(frozen);
|
||||
}
|
||||
try {
|
||||
// If non-extensible objects can't be stored as keys in a Map, make sure we
|
||||
// do not freeze/seal/etc. an object without first attempting to put it in a
|
||||
// Map. For example, this gives the React Native Map polyfill a chance to tag
|
||||
// objects before they become non-extensible:
|
||||
// https://github.com/facebook/react-native/blob/98a6f19d7c/Libraries/vendor/core/Map.js#L44-L50
|
||||
// https://github.com/apollographql/react-apollo/issues/2442#issuecomment-426489517
|
||||
testMap.set(frozen, frozen).delete(frozen);
|
||||
}
|
||||
catch (_a) {
|
||||
var wrap = function (method) {
|
||||
return (method &&
|
||||
(function (obj) {
|
||||
try {
|
||||
// If .set succeeds, also call .delete to avoid leaking memory.
|
||||
testMap.set(obj, obj).delete(obj);
|
||||
}
|
||||
finally {
|
||||
// If .set or .delete fails, the exception will be silently swallowed
|
||||
// by this return-from-finally statement:
|
||||
return method.call(Object, obj);
|
||||
}
|
||||
}));
|
||||
};
|
||||
Object.freeze = wrap(Object.freeze);
|
||||
Object.seal = wrap(Object.seal);
|
||||
Object.preventExtensions = wrap(Object.preventExtensions);
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=fixPolyfills.native.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"fixPolyfills.native.js","sourceRoot":"","sources":["../../../src/cache/inmemory/fixPolyfills.native.ts"],"names":[],"mappings":"AAAA,kEAAkE;AAClE,6DAA6D;AAC7D,IAAM,OAAO,GAAG,IAAI,GAAG,EAAE,CAAC;AAC1B,IAAI,OAAO,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,OAAO,EAAE,CAAC;IAC1B,IAAA,KAAG,GAAK,OAAO,IAAZ,CAAa;IACxB,GAAG,CAAC,SAAS,CAAC,GAAG,GAAG;QAAU,cAAO;aAAP,UAAO,EAAP,qBAAO,EAAP,IAAO;YAAP,yBAAO;;QACnC,KAAG,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QACtB,OAAO,IAAI,CAAC;IACd,CAAC,CAAC;AACJ,CAAC;AAED,kEAAkE;AAClE,IAAM,OAAO,GAAG,IAAI,GAAG,EAAE,CAAC;AAC1B,IAAI,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,OAAO,EAAE,CAAC;IACvB,IAAA,KAAG,GAAK,OAAO,IAAZ,CAAa;IACxB,GAAG,CAAC,SAAS,CAAC,GAAG,GAAG;QAAU,cAAO;aAAP,UAAO,EAAP,qBAAO,EAAP,IAAO;YAAP,yBAAO;;QACnC,KAAG,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QACtB,OAAO,IAAI,CAAC;IACd,CAAC,CAAC;AACJ,CAAC;AAED,IAAM,MAAM,GAAG,EAAE,CAAC;AAClB,IAAI,OAAO,MAAM,CAAC,MAAM,KAAK,UAAU,EAAE,CAAC;IACxC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;AACxB,CAAC;AAED,IAAI,CAAC;IACH,2EAA2E;IAC3E,4EAA4E;IAC5E,6EAA6E;IAC7E,6CAA6C;IAC7C,gGAAgG;IAChG,mFAAmF;IACnF,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;AAC7C,CAAC;AAAC,WAAM,CAAC;IACP,IAAM,IAAI,GAAG,UAA6B,MAAS;QACjD,OAAO,CACL,MAAM;YACL,CAAC,UAAC,GAAG;gBACJ,IAAI,CAAC;oBACH,+DAA+D;oBAC/D,OAAO,CAAC,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;gBACpC,CAAC;wBAAS,CAAC;oBACT,qEAAqE;oBACrE,yCAAyC;oBACzC,OAAO,MAAM,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;gBAClC,CAAC;YACH,CAAC,CAAO,CACT,CAAC;IACJ,CAAC,CAAC;IACF,MAAM,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;IACpC,MAAM,CAAC,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;IAChC,MAAM,CAAC,iBAAiB,GAAG,IAAI,CAAC,MAAM,CAAC,iBAAiB,CAAC,CAAC;AAC5D,CAAC","sourcesContent":["// Make sure Map.prototype.set returns the Map instance, per spec.\n// https://github.com/apollographql/apollo-client/issues/4024\nconst testMap = new Map();\nif (testMap.set(1, 2) !== testMap) {\n const { set } = testMap;\n Map.prototype.set = function (...args) {\n set.apply(this, args);\n return this;\n };\n}\n\n// Make sure Set.prototype.add returns the Set instance, per spec.\nconst testSet = new Set();\nif (testSet.add(3) !== testSet) {\n const { add } = testSet;\n Set.prototype.add = function (...args) {\n add.apply(this, args);\n return this;\n };\n}\n\nconst frozen = {};\nif (typeof Object.freeze === \"function\") {\n Object.freeze(frozen);\n}\n\ntry {\n // If non-extensible objects can't be stored as keys in a Map, make sure we\n // do not freeze/seal/etc. an object without first attempting to put it in a\n // Map. For example, this gives the React Native Map polyfill a chance to tag\n // objects before they become non-extensible:\n // https://github.com/facebook/react-native/blob/98a6f19d7c/Libraries/vendor/core/Map.js#L44-L50\n // https://github.com/apollographql/react-apollo/issues/2442#issuecomment-426489517\n testMap.set(frozen, frozen).delete(frozen);\n} catch {\n const wrap = <M extends <T>(obj: T) => T>(method: M): M => {\n return (\n method &&\n (((obj) => {\n try {\n // If .set succeeds, also call .delete to avoid leaking memory.\n testMap.set(obj, obj).delete(obj);\n } finally {\n // If .set or .delete fails, the exception will be silently swallowed\n // by this return-from-finally statement:\n return method.call(Object, obj);\n }\n }) as M)\n );\n };\n Object.freeze = wrap(Object.freeze);\n Object.seal = wrap(Object.seal);\n Object.preventExtensions = wrap(Object.preventExtensions);\n}\n\nexport {};\n"]}
|
||||
9
node_modules/@apollo/client/cache/inmemory/fragmentRegistry.d.ts
generated
vendored
Normal file
9
node_modules/@apollo/client/cache/inmemory/fragmentRegistry.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import type { DocumentNode, FragmentDefinitionNode } from "graphql";
|
||||
export interface FragmentRegistryAPI {
|
||||
register(...fragments: DocumentNode[]): this;
|
||||
lookup(fragmentName: string): FragmentDefinitionNode | null;
|
||||
transform<D extends DocumentNode>(document: D): D;
|
||||
resetCaches(): void;
|
||||
}
|
||||
export declare function createFragmentRegistry(...fragments: DocumentNode[]): FragmentRegistryAPI;
|
||||
//# sourceMappingURL=fragmentRegistry.d.ts.map
|
||||
140
node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js
generated
vendored
Normal file
140
node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
import { __assign, __spreadArray } from "tslib";
|
||||
import { visit } from "graphql";
|
||||
import { wrap } from "optimism";
|
||||
import { cacheSizes, getFragmentDefinitions, } from "../../utilities/index.js";
|
||||
import { WeakCache } from "@wry/caches";
|
||||
// As long as createFragmentRegistry is not imported or used, the
|
||||
// FragmentRegistry example implementation provided below should not be bundled
|
||||
// (by tree-shaking bundlers like Rollup), because the implementation of
|
||||
// InMemoryCache refers only to the TypeScript interface FragmentRegistryAPI,
|
||||
// never the concrete implementation FragmentRegistry (which is deliberately not
|
||||
// exported from this module).
|
||||
export function createFragmentRegistry() {
|
||||
var fragments = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
fragments[_i] = arguments[_i];
|
||||
}
|
||||
return new (FragmentRegistry.bind.apply(FragmentRegistry, __spreadArray([void 0], fragments, false)))();
|
||||
}
|
||||
var FragmentRegistry = /** @class */ (function () {
|
||||
// Call `createFragmentRegistry` instead of invoking the
|
||||
// FragmentRegistry constructor directly. This reserves the constructor for
|
||||
// future configuration of the FragmentRegistry.
|
||||
function FragmentRegistry() {
|
||||
var fragments = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
fragments[_i] = arguments[_i];
|
||||
}
|
||||
this.registry = Object.create(null);
|
||||
this.resetCaches();
|
||||
if (fragments.length) {
|
||||
this.register.apply(this, fragments);
|
||||
}
|
||||
}
|
||||
FragmentRegistry.prototype.register = function () {
|
||||
var _this = this;
|
||||
var fragments = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
fragments[_i] = arguments[_i];
|
||||
}
|
||||
var definitions = new Map();
|
||||
fragments.forEach(function (doc) {
|
||||
getFragmentDefinitions(doc).forEach(function (node) {
|
||||
definitions.set(node.name.value, node);
|
||||
});
|
||||
});
|
||||
definitions.forEach(function (node, name) {
|
||||
if (node !== _this.registry[name]) {
|
||||
_this.registry[name] = node;
|
||||
_this.invalidate(name);
|
||||
}
|
||||
});
|
||||
return this;
|
||||
};
|
||||
// Overridden in the resetCaches method below.
|
||||
FragmentRegistry.prototype.invalidate = function (name) { };
|
||||
FragmentRegistry.prototype.resetCaches = function () {
|
||||
var proto = FragmentRegistry.prototype;
|
||||
this.invalidate = (this.lookup = wrap(proto.lookup.bind(this), {
|
||||
makeCacheKey: function (arg) { return arg; },
|
||||
max: cacheSizes["fragmentRegistry.lookup"] ||
|
||||
1000 /* defaultCacheSizes["fragmentRegistry.lookup"] */,
|
||||
})).dirty; // This dirty function is bound to the wrapped lookup method.
|
||||
this.transform = wrap(proto.transform.bind(this), {
|
||||
cache: WeakCache,
|
||||
max: cacheSizes["fragmentRegistry.transform"] ||
|
||||
2000 /* defaultCacheSizes["fragmentRegistry.transform"] */,
|
||||
});
|
||||
this.findFragmentSpreads = wrap(proto.findFragmentSpreads.bind(this), {
|
||||
cache: WeakCache,
|
||||
max: cacheSizes["fragmentRegistry.findFragmentSpreads"] ||
|
||||
4000 /* defaultCacheSizes["fragmentRegistry.findFragmentSpreads"] */,
|
||||
});
|
||||
};
|
||||
/*
|
||||
* Note:
|
||||
* This method is only memoized so it can serve as a dependency to `tranform`,
|
||||
* so calling `invalidate` will invalidate cache entries for `transform`.
|
||||
*/
|
||||
FragmentRegistry.prototype.lookup = function (fragmentName) {
|
||||
return this.registry[fragmentName] || null;
|
||||
};
|
||||
FragmentRegistry.prototype.transform = function (document) {
|
||||
var _this = this;
|
||||
var defined = new Map();
|
||||
getFragmentDefinitions(document).forEach(function (def) {
|
||||
defined.set(def.name.value, def);
|
||||
});
|
||||
var unbound = new Set();
|
||||
var enqueue = function (spreadName) {
|
||||
if (!defined.has(spreadName)) {
|
||||
unbound.add(spreadName);
|
||||
}
|
||||
};
|
||||
var enqueueChildSpreads = function (node) {
|
||||
return Object.keys(_this.findFragmentSpreads(node)).forEach(enqueue);
|
||||
};
|
||||
enqueueChildSpreads(document);
|
||||
var missing = [];
|
||||
var map = Object.create(null);
|
||||
// This Set forEach loop can be extended during iteration by adding
|
||||
// additional strings to the unbound set.
|
||||
unbound.forEach(function (fragmentName) {
|
||||
var knownFragmentDef = defined.get(fragmentName);
|
||||
if (knownFragmentDef) {
|
||||
enqueueChildSpreads((map[fragmentName] = knownFragmentDef));
|
||||
}
|
||||
else {
|
||||
missing.push(fragmentName);
|
||||
var def = _this.lookup(fragmentName);
|
||||
if (def) {
|
||||
enqueueChildSpreads((map[fragmentName] = def));
|
||||
}
|
||||
}
|
||||
});
|
||||
if (missing.length) {
|
||||
var defsToAppend_1 = [];
|
||||
missing.forEach(function (name) {
|
||||
var def = map[name];
|
||||
if (def) {
|
||||
defsToAppend_1.push(def);
|
||||
}
|
||||
});
|
||||
if (defsToAppend_1.length) {
|
||||
document = __assign(__assign({}, document), { definitions: document.definitions.concat(defsToAppend_1) });
|
||||
}
|
||||
}
|
||||
return document;
|
||||
};
|
||||
FragmentRegistry.prototype.findFragmentSpreads = function (root) {
|
||||
var spreads = Object.create(null);
|
||||
visit(root, {
|
||||
FragmentSpread: function (node) {
|
||||
spreads[node.name.value] = node;
|
||||
},
|
||||
});
|
||||
return spreads;
|
||||
};
|
||||
return FragmentRegistry;
|
||||
}());
|
||||
//# sourceMappingURL=fragmentRegistry.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
28
node_modules/@apollo/client/cache/inmemory/helpers.d.ts
generated
vendored
Normal file
28
node_modules/@apollo/client/cache/inmemory/helpers.d.ts
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import type { DocumentNode, SelectionSetNode } from "graphql";
|
||||
import type { NormalizedCache, InMemoryCacheConfig } from "./types.js";
|
||||
import type { KeyFieldsContext } from "./policies.js";
|
||||
import type { FragmentRegistryAPI } from "./fragmentRegistry.js";
|
||||
import type { Reference, StoreValue, StoreObject, FragmentMap, FragmentMapFunction } from "../../utilities/index.js";
|
||||
import { DeepMerger, isArray } from "../../utilities/index.js";
|
||||
export declare const hasOwn: (v: PropertyKey) => boolean;
|
||||
export declare function isNullish(value: any): value is null | undefined;
|
||||
export { isArray };
|
||||
export declare function defaultDataIdFromObject({ __typename, id, _id }: Readonly<StoreObject>, context?: KeyFieldsContext): string | undefined;
|
||||
export declare function normalizeConfig(config: InMemoryCacheConfig): {
|
||||
dataIdFromObject: typeof defaultDataIdFromObject;
|
||||
addTypename: boolean;
|
||||
resultCaching: boolean;
|
||||
canonizeResults: boolean;
|
||||
} & InMemoryCacheConfig;
|
||||
export declare function shouldCanonizeResults(config: Pick<InMemoryCacheConfig, "canonizeResults">): boolean;
|
||||
export declare function getTypenameFromStoreObject(store: NormalizedCache, objectOrReference: StoreObject | Reference): string | undefined;
|
||||
export declare const TypeOrFieldNameRegExp: RegExp;
|
||||
export declare function fieldNameFromStoreName(storeFieldName: string): string;
|
||||
export declare function selectionSetMatchesResult(selectionSet: SelectionSetNode, result: Record<string, any>, variables?: Record<string, any>): boolean;
|
||||
export declare function storeValueIsStoreObject(value: StoreValue): value is StoreObject;
|
||||
export declare function makeProcessedFieldsMerger(): DeepMerger<any[]>;
|
||||
export declare function extractFragmentContext(document: DocumentNode, fragments?: FragmentRegistryAPI): {
|
||||
fragmentMap: FragmentMap;
|
||||
lookupFragment: FragmentMapFunction;
|
||||
};
|
||||
//# sourceMappingURL=helpers.d.ts.map
|
||||
96
node_modules/@apollo/client/cache/inmemory/helpers.js
generated
vendored
Normal file
96
node_modules/@apollo/client/cache/inmemory/helpers.js
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
import { isReference, isField, DeepMerger, resultKeyNameFromField, shouldInclude, isNonNullObject, compact, createFragmentMap, getFragmentDefinitions, isArray, } from "../../utilities/index.js";
|
||||
export var hasOwn = Object.prototype.hasOwnProperty;
|
||||
export function isNullish(value) {
|
||||
return value === null || value === void 0;
|
||||
}
|
||||
export { isArray };
|
||||
export function defaultDataIdFromObject(_a, context) {
|
||||
var __typename = _a.__typename, id = _a.id, _id = _a._id;
|
||||
if (typeof __typename === "string") {
|
||||
if (context) {
|
||||
context.keyObject =
|
||||
!isNullish(id) ? { id: id }
|
||||
: !isNullish(_id) ? { _id: _id }
|
||||
: void 0;
|
||||
}
|
||||
// If there is no object.id, fall back to object._id.
|
||||
if (isNullish(id) && !isNullish(_id)) {
|
||||
id = _id;
|
||||
}
|
||||
if (!isNullish(id)) {
|
||||
return "".concat(__typename, ":").concat(typeof id === "number" || typeof id === "string" ?
|
||||
id
|
||||
: JSON.stringify(id));
|
||||
}
|
||||
}
|
||||
}
|
||||
var defaultConfig = {
|
||||
dataIdFromObject: defaultDataIdFromObject,
|
||||
addTypename: true,
|
||||
resultCaching: true,
|
||||
// Thanks to the shouldCanonizeResults helper, this should be the only line
|
||||
// you have to change to reenable canonization by default in the future.
|
||||
canonizeResults: false,
|
||||
};
|
||||
export function normalizeConfig(config) {
|
||||
return compact(defaultConfig, config);
|
||||
}
|
||||
export function shouldCanonizeResults(config) {
|
||||
var value = config.canonizeResults;
|
||||
return value === void 0 ? defaultConfig.canonizeResults : value;
|
||||
}
|
||||
export function getTypenameFromStoreObject(store, objectOrReference) {
|
||||
return isReference(objectOrReference) ?
|
||||
store.get(objectOrReference.__ref, "__typename")
|
||||
: objectOrReference && objectOrReference.__typename;
|
||||
}
|
||||
export var TypeOrFieldNameRegExp = /^[_a-z][_0-9a-z]*/i;
|
||||
export function fieldNameFromStoreName(storeFieldName) {
|
||||
var match = storeFieldName.match(TypeOrFieldNameRegExp);
|
||||
return match ? match[0] : storeFieldName;
|
||||
}
|
||||
export function selectionSetMatchesResult(selectionSet, result, variables) {
|
||||
if (isNonNullObject(result)) {
|
||||
return isArray(result) ?
|
||||
result.every(function (item) {
|
||||
return selectionSetMatchesResult(selectionSet, item, variables);
|
||||
})
|
||||
: selectionSet.selections.every(function (field) {
|
||||
if (isField(field) && shouldInclude(field, variables)) {
|
||||
var key = resultKeyNameFromField(field);
|
||||
return (hasOwn.call(result, key) &&
|
||||
(!field.selectionSet ||
|
||||
selectionSetMatchesResult(field.selectionSet, result[key], variables)));
|
||||
}
|
||||
// If the selection has been skipped with @skip(true) or
|
||||
// @include(false), it should not count against the matching. If
|
||||
// the selection is not a field, it must be a fragment (inline or
|
||||
// named). We will determine if selectionSetMatchesResult for that
|
||||
// fragment when we get to it, so for now we return true.
|
||||
return true;
|
||||
});
|
||||
}
|
||||
return false;
|
||||
}
|
||||
export function storeValueIsStoreObject(value) {
|
||||
return isNonNullObject(value) && !isReference(value) && !isArray(value);
|
||||
}
|
||||
export function makeProcessedFieldsMerger() {
|
||||
return new DeepMerger();
|
||||
}
|
||||
export function extractFragmentContext(document, fragments) {
|
||||
// FragmentMap consisting only of fragments defined directly in document, not
|
||||
// including other fragments registered in the FragmentRegistry.
|
||||
var fragmentMap = createFragmentMap(getFragmentDefinitions(document));
|
||||
return {
|
||||
fragmentMap: fragmentMap,
|
||||
lookupFragment: function (name) {
|
||||
var def = fragmentMap[name];
|
||||
if (!def && fragments) {
|
||||
def = fragments.lookup(name);
|
||||
}
|
||||
return def || null;
|
||||
},
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=helpers.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/helpers.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/helpers.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
65
node_modules/@apollo/client/cache/inmemory/inMemoryCache.d.ts
generated
vendored
Normal file
65
node_modules/@apollo/client/cache/inmemory/inMemoryCache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
import "./fixPolyfills.js";
|
||||
import type { DocumentNode, FragmentDefinitionNode, InlineFragmentNode } from "graphql";
|
||||
import { ApolloCache } from "../core/cache.js";
|
||||
import type { Cache } from "../core/types/Cache.js";
|
||||
import type { StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { InMemoryCacheConfig, NormalizedCacheObject } from "./types.js";
|
||||
import { makeVar } from "./reactiveVars.js";
|
||||
import { Policies } from "./policies.js";
|
||||
import type { OperationVariables } from "../../core/index.js";
|
||||
import { getInMemoryCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
type BroadcastOptions = Pick<Cache.BatchOptions<InMemoryCache>, "optimistic" | "onWatchUpdated">;
|
||||
export declare class InMemoryCache extends ApolloCache<NormalizedCacheObject> {
|
||||
private data;
|
||||
private optimisticData;
|
||||
protected config: InMemoryCacheConfig;
|
||||
private watches;
|
||||
private addTypename;
|
||||
private storeReader;
|
||||
private storeWriter;
|
||||
private addTypenameTransform;
|
||||
private maybeBroadcastWatch;
|
||||
readonly assumeImmutableResults = true;
|
||||
readonly policies: Policies;
|
||||
readonly makeVar: typeof makeVar;
|
||||
constructor(config?: InMemoryCacheConfig);
|
||||
private init;
|
||||
private resetResultCache;
|
||||
restore(data: NormalizedCacheObject): this;
|
||||
extract(optimistic?: boolean): NormalizedCacheObject;
|
||||
read<T>(options: Cache.ReadOptions): T | null;
|
||||
write(options: Cache.WriteOptions): Reference | undefined;
|
||||
modify<Entity extends Record<string, any> = Record<string, any>>(options: Cache.ModifyOptions<Entity>): boolean;
|
||||
diff<TData, TVariables extends OperationVariables = any>(options: Cache.DiffOptions<TData, TVariables>): Cache.DiffResult<TData>;
|
||||
watch<TData = any, TVariables = any>(watch: Cache.WatchOptions<TData, TVariables>): () => void;
|
||||
gc(options?: {
|
||||
resetResultCache?: boolean;
|
||||
resetResultIdentities?: boolean;
|
||||
}): string[];
|
||||
retain(rootId: string, optimistic?: boolean): number;
|
||||
release(rootId: string, optimistic?: boolean): number;
|
||||
identify(object: StoreObject | Reference): string | undefined;
|
||||
evict(options: Cache.EvictOptions): boolean;
|
||||
reset(options?: Cache.ResetOptions): Promise<void>;
|
||||
removeOptimistic(idToRemove: string): void;
|
||||
private txCount;
|
||||
batch<TUpdateResult>(options: Cache.BatchOptions<InMemoryCache, TUpdateResult>): TUpdateResult;
|
||||
performTransaction(update: (cache: InMemoryCache) => any, optimisticId?: string | null): any;
|
||||
transformDocument(document: DocumentNode): DocumentNode;
|
||||
fragmentMatches(fragment: InlineFragmentNode, typename: string): boolean;
|
||||
lookupFragment(fragmentName: string): FragmentDefinitionNode | null;
|
||||
protected broadcastWatches(options?: BroadcastOptions): void;
|
||||
private addFragmentsToDocument;
|
||||
private addTypenameToDocument;
|
||||
private broadcastWatch;
|
||||
/**
|
||||
* @experimental
|
||||
* @internal
|
||||
* This is not a stable API - it is used in development builds to expose
|
||||
* information to the DevTools.
|
||||
* Use at your own risk!
|
||||
*/
|
||||
getMemoryInternals?: typeof getInMemoryCacheMemoryInternals;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=inMemoryCache.d.ts.map
|
||||
479
node_modules/@apollo/client/cache/inmemory/inMemoryCache.js
generated
vendored
Normal file
479
node_modules/@apollo/client/cache/inmemory/inMemoryCache.js
generated
vendored
Normal file
@@ -0,0 +1,479 @@
|
||||
import { __assign, __extends } from "tslib";
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
// Make builtins like Map and Set safe to use with non-extensible objects.
|
||||
import "./fixPolyfills.js";
|
||||
import { wrap } from "optimism";
|
||||
import { equal } from "@wry/equality";
|
||||
import { ApolloCache } from "../core/cache.js";
|
||||
import { MissingFieldError } from "../core/types/common.js";
|
||||
import { addTypenameToDocument, isReference, DocumentTransform, canonicalStringify, print, cacheSizes, } from "../../utilities/index.js";
|
||||
import { StoreReader } from "./readFromStore.js";
|
||||
import { StoreWriter } from "./writeToStore.js";
|
||||
import { EntityStore, supportsResultCaching } from "./entityStore.js";
|
||||
import { makeVar, forgetCache, recallCache } from "./reactiveVars.js";
|
||||
import { Policies } from "./policies.js";
|
||||
import { hasOwn, normalizeConfig, shouldCanonizeResults } from "./helpers.js";
|
||||
import { getInMemoryCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
var InMemoryCache = /** @class */ (function (_super) {
|
||||
__extends(InMemoryCache, _super);
|
||||
function InMemoryCache(config) {
|
||||
if (config === void 0) { config = {}; }
|
||||
var _this = _super.call(this) || this;
|
||||
_this.watches = new Set();
|
||||
_this.addTypenameTransform = new DocumentTransform(addTypenameToDocument);
|
||||
// Override the default value, since InMemoryCache result objects are frozen
|
||||
// in development and expected to remain logically immutable in production.
|
||||
_this.assumeImmutableResults = true;
|
||||
_this.makeVar = makeVar;
|
||||
_this.txCount = 0;
|
||||
_this.config = normalizeConfig(config);
|
||||
_this.addTypename = !!_this.config.addTypename;
|
||||
_this.policies = new Policies({
|
||||
cache: _this,
|
||||
dataIdFromObject: _this.config.dataIdFromObject,
|
||||
possibleTypes: _this.config.possibleTypes,
|
||||
typePolicies: _this.config.typePolicies,
|
||||
});
|
||||
_this.init();
|
||||
return _this;
|
||||
}
|
||||
InMemoryCache.prototype.init = function () {
|
||||
// Passing { resultCaching: false } in the InMemoryCache constructor options
|
||||
// will completely disable dependency tracking, which will improve memory
|
||||
// usage but worsen the performance of repeated reads.
|
||||
var rootStore = (this.data = new EntityStore.Root({
|
||||
policies: this.policies,
|
||||
resultCaching: this.config.resultCaching,
|
||||
}));
|
||||
// When no optimistic writes are currently active, cache.optimisticData ===
|
||||
// cache.data, so there are no additional layers on top of the actual data.
|
||||
// When an optimistic update happens, this.optimisticData will become a
|
||||
// linked list of EntityStore Layer objects that terminates with the
|
||||
// original this.data cache object.
|
||||
this.optimisticData = rootStore.stump;
|
||||
this.resetResultCache();
|
||||
};
|
||||
InMemoryCache.prototype.resetResultCache = function (resetResultIdentities) {
|
||||
var _this = this;
|
||||
var previousReader = this.storeReader;
|
||||
var fragments = this.config.fragments;
|
||||
// The StoreWriter is mostly stateless and so doesn't really need to be
|
||||
// reset, but it does need to have its writer.storeReader reference updated,
|
||||
// so it's simpler to update this.storeWriter as well.
|
||||
this.storeWriter = new StoreWriter(this, (this.storeReader = new StoreReader({
|
||||
cache: this,
|
||||
addTypename: this.addTypename,
|
||||
resultCacheMaxSize: this.config.resultCacheMaxSize,
|
||||
canonizeResults: shouldCanonizeResults(this.config),
|
||||
canon: resetResultIdentities ? void 0 : (previousReader && previousReader.canon),
|
||||
fragments: fragments,
|
||||
})), fragments);
|
||||
this.maybeBroadcastWatch = wrap(function (c, options) {
|
||||
return _this.broadcastWatch(c, options);
|
||||
}, {
|
||||
max: this.config.resultCacheMaxSize ||
|
||||
cacheSizes["inMemoryCache.maybeBroadcastWatch"] ||
|
||||
5000 /* defaultCacheSizes["inMemoryCache.maybeBroadcastWatch"] */,
|
||||
makeCacheKey: function (c) {
|
||||
// Return a cache key (thus enabling result caching) only if we're
|
||||
// currently using a data store that can track cache dependencies.
|
||||
var store = c.optimistic ? _this.optimisticData : _this.data;
|
||||
if (supportsResultCaching(store)) {
|
||||
var optimistic = c.optimistic, id = c.id, variables = c.variables;
|
||||
return store.makeCacheKey(c.query,
|
||||
// Different watches can have the same query, optimistic
|
||||
// status, rootId, and variables, but if their callbacks are
|
||||
// different, the (identical) result needs to be delivered to
|
||||
// each distinct callback. The easiest way to achieve that
|
||||
// separation is to include c.callback in the cache key for
|
||||
// maybeBroadcastWatch calls. See issue #5733.
|
||||
c.callback, canonicalStringify({ optimistic: optimistic, id: id, variables: variables }));
|
||||
}
|
||||
},
|
||||
});
|
||||
// Since we have thrown away all the cached functions that depend on the
|
||||
// CacheGroup dependencies maintained by EntityStore, we should also reset
|
||||
// all CacheGroup dependency information.
|
||||
new Set([this.data.group, this.optimisticData.group]).forEach(function (group) {
|
||||
return group.resetCaching();
|
||||
});
|
||||
};
|
||||
InMemoryCache.prototype.restore = function (data) {
|
||||
this.init();
|
||||
// Since calling this.init() discards/replaces the entire StoreReader, along
|
||||
// with the result caches it maintains, this.data.replace(data) won't have
|
||||
// to bother deleting the old data.
|
||||
if (data)
|
||||
this.data.replace(data);
|
||||
return this;
|
||||
};
|
||||
InMemoryCache.prototype.extract = function (optimistic) {
|
||||
if (optimistic === void 0) { optimistic = false; }
|
||||
return (optimistic ? this.optimisticData : this.data).extract();
|
||||
};
|
||||
InMemoryCache.prototype.read = function (options) {
|
||||
var
|
||||
// Since read returns data or null, without any additional metadata
|
||||
// about whether/where there might have been missing fields, the
|
||||
// default behavior cannot be returnPartialData = true (like it is
|
||||
// for the diff method), since defaulting to true would violate the
|
||||
// integrity of the T in the return type. However, partial data may
|
||||
// be useful in some cases, so returnPartialData:true may be
|
||||
// specified explicitly.
|
||||
_a = options.returnPartialData,
|
||||
// Since read returns data or null, without any additional metadata
|
||||
// about whether/where there might have been missing fields, the
|
||||
// default behavior cannot be returnPartialData = true (like it is
|
||||
// for the diff method), since defaulting to true would violate the
|
||||
// integrity of the T in the return type. However, partial data may
|
||||
// be useful in some cases, so returnPartialData:true may be
|
||||
// specified explicitly.
|
||||
returnPartialData = _a === void 0 ? false : _a;
|
||||
try {
|
||||
return (this.storeReader.diffQueryAgainstStore(__assign(__assign({}, options), { store: options.optimistic ? this.optimisticData : this.data, config: this.config, returnPartialData: returnPartialData })).result || null);
|
||||
}
|
||||
catch (e) {
|
||||
if (e instanceof MissingFieldError) {
|
||||
// Swallow MissingFieldError and return null, so callers do not need to
|
||||
// worry about catching "normal" exceptions resulting from incomplete
|
||||
// cache data. Unexpected errors will be re-thrown. If you need more
|
||||
// information about which fields were missing, use cache.diff instead,
|
||||
// and examine diffResult.missing.
|
||||
return null;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.write = function (options) {
|
||||
try {
|
||||
++this.txCount;
|
||||
return this.storeWriter.writeToStore(this.data, options);
|
||||
}
|
||||
finally {
|
||||
if (!--this.txCount && options.broadcast !== false) {
|
||||
this.broadcastWatches();
|
||||
}
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.modify = function (options) {
|
||||
if (hasOwn.call(options, "id") && !options.id) {
|
||||
// To my knowledge, TypeScript does not currently provide a way to
|
||||
// enforce that an optional property?:type must *not* be undefined
|
||||
// when present. That ability would be useful here, because we want
|
||||
// options.id to default to ROOT_QUERY only when no options.id was
|
||||
// provided. If the caller attempts to pass options.id with a
|
||||
// falsy/undefined value (perhaps because cache.identify failed), we
|
||||
// should not assume the goal was to modify the ROOT_QUERY object.
|
||||
// We could throw, but it seems natural to return false to indicate
|
||||
// that nothing was modified.
|
||||
return false;
|
||||
}
|
||||
var store = ((options.optimistic) // Defaults to false.
|
||||
) ?
|
||||
this.optimisticData
|
||||
: this.data;
|
||||
try {
|
||||
++this.txCount;
|
||||
return store.modify(options.id || "ROOT_QUERY", options.fields);
|
||||
}
|
||||
finally {
|
||||
if (!--this.txCount && options.broadcast !== false) {
|
||||
this.broadcastWatches();
|
||||
}
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.diff = function (options) {
|
||||
return this.storeReader.diffQueryAgainstStore(__assign(__assign({}, options), { store: options.optimistic ? this.optimisticData : this.data, rootId: options.id || "ROOT_QUERY", config: this.config }));
|
||||
};
|
||||
InMemoryCache.prototype.watch = function (watch) {
|
||||
var _this = this;
|
||||
if (!this.watches.size) {
|
||||
// In case we previously called forgetCache(this) because
|
||||
// this.watches became empty (see below), reattach this cache to any
|
||||
// reactive variables on which it previously depended. It might seem
|
||||
// paradoxical that we're able to recall something we supposedly
|
||||
// forgot, but the point of calling forgetCache(this) is to silence
|
||||
// useless broadcasts while this.watches is empty, and to allow the
|
||||
// cache to be garbage collected. If, however, we manage to call
|
||||
// recallCache(this) here, this cache object must not have been
|
||||
// garbage collected yet, and should resume receiving updates from
|
||||
// reactive variables, now that it has a watcher to notify.
|
||||
recallCache(this);
|
||||
}
|
||||
this.watches.add(watch);
|
||||
if (watch.immediate) {
|
||||
this.maybeBroadcastWatch(watch);
|
||||
}
|
||||
return function () {
|
||||
// Once we remove the last watch from this.watches, cache.broadcastWatches
|
||||
// no longer does anything, so we preemptively tell the reactive variable
|
||||
// system to exclude this cache from future broadcasts.
|
||||
if (_this.watches.delete(watch) && !_this.watches.size) {
|
||||
forgetCache(_this);
|
||||
}
|
||||
// Remove this watch from the LRU cache managed by the
|
||||
// maybeBroadcastWatch OptimisticWrapperFunction, to prevent memory
|
||||
// leaks involving the closure of watch.callback.
|
||||
_this.maybeBroadcastWatch.forget(watch);
|
||||
};
|
||||
};
|
||||
InMemoryCache.prototype.gc = function (options) {
|
||||
var _a;
|
||||
canonicalStringify.reset();
|
||||
print.reset();
|
||||
this.addTypenameTransform.resetCache();
|
||||
(_a = this.config.fragments) === null || _a === void 0 ? void 0 : _a.resetCaches();
|
||||
var ids = this.optimisticData.gc();
|
||||
if (options && !this.txCount) {
|
||||
if (options.resetResultCache) {
|
||||
this.resetResultCache(options.resetResultIdentities);
|
||||
}
|
||||
else if (options.resetResultIdentities) {
|
||||
this.storeReader.resetCanon();
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
};
|
||||
// Call this method to ensure the given root ID remains in the cache after
|
||||
// garbage collection, along with its transitive child entities. Note that
|
||||
// the cache automatically retains all directly written entities. By default,
|
||||
// the retainment persists after optimistic updates are removed. Pass true
|
||||
// for the optimistic argument if you would prefer for the retainment to be
|
||||
// discarded when the top-most optimistic layer is removed. Returns the
|
||||
// resulting (non-negative) retainment count.
|
||||
InMemoryCache.prototype.retain = function (rootId, optimistic) {
|
||||
return (optimistic ? this.optimisticData : this.data).retain(rootId);
|
||||
};
|
||||
// Call this method to undo the effect of the retain method, above. Once the
|
||||
// retainment count falls to zero, the given ID will no longer be preserved
|
||||
// during garbage collection, though it may still be preserved by other safe
|
||||
// entities that refer to it. Returns the resulting (non-negative) retainment
|
||||
// count, in case that's useful.
|
||||
InMemoryCache.prototype.release = function (rootId, optimistic) {
|
||||
return (optimistic ? this.optimisticData : this.data).release(rootId);
|
||||
};
|
||||
// Returns the canonical ID for a given StoreObject, obeying typePolicies
|
||||
// and keyFields (and dataIdFromObject, if you still use that). At minimum,
|
||||
// the object must contain a __typename and any primary key fields required
|
||||
// to identify entities of that type. If you pass a query result object, be
|
||||
// sure that none of the primary key fields have been renamed by aliasing.
|
||||
// If you pass a Reference object, its __ref ID string will be returned.
|
||||
InMemoryCache.prototype.identify = function (object) {
|
||||
if (isReference(object))
|
||||
return object.__ref;
|
||||
try {
|
||||
return this.policies.identify(object)[0];
|
||||
}
|
||||
catch (e) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(e);
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.evict = function (options) {
|
||||
if (!options.id) {
|
||||
if (hasOwn.call(options, "id")) {
|
||||
// See comment in modify method about why we return false when
|
||||
// options.id exists but is falsy/undefined.
|
||||
return false;
|
||||
}
|
||||
options = __assign(__assign({}, options), { id: "ROOT_QUERY" });
|
||||
}
|
||||
try {
|
||||
// It's unlikely that the eviction will end up invoking any other
|
||||
// cache update operations while it's running, but {in,de}crementing
|
||||
// this.txCount still seems like a good idea, for uniformity with
|
||||
// the other update methods.
|
||||
++this.txCount;
|
||||
// Pass this.data as a limit on the depth of the eviction, so evictions
|
||||
// during optimistic updates (when this.data is temporarily set equal to
|
||||
// this.optimisticData) do not escape their optimistic Layer.
|
||||
return this.optimisticData.evict(options, this.data);
|
||||
}
|
||||
finally {
|
||||
if (!--this.txCount && options.broadcast !== false) {
|
||||
this.broadcastWatches();
|
||||
}
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.reset = function (options) {
|
||||
var _this = this;
|
||||
this.init();
|
||||
canonicalStringify.reset();
|
||||
if (options && options.discardWatches) {
|
||||
// Similar to what happens in the unsubscribe function returned by
|
||||
// cache.watch, applied to all current watches.
|
||||
this.watches.forEach(function (watch) { return _this.maybeBroadcastWatch.forget(watch); });
|
||||
this.watches.clear();
|
||||
forgetCache(this);
|
||||
}
|
||||
else {
|
||||
// Calling this.init() above unblocks all maybeBroadcastWatch caching, so
|
||||
// this.broadcastWatches() triggers a broadcast to every current watcher
|
||||
// (letting them know their data is now missing). This default behavior is
|
||||
// convenient because it means the watches do not have to be manually
|
||||
// reestablished after resetting the cache. To prevent this broadcast and
|
||||
// cancel all watches, pass true for options.discardWatches.
|
||||
this.broadcastWatches();
|
||||
}
|
||||
return Promise.resolve();
|
||||
};
|
||||
InMemoryCache.prototype.removeOptimistic = function (idToRemove) {
|
||||
var newOptimisticData = this.optimisticData.removeLayer(idToRemove);
|
||||
if (newOptimisticData !== this.optimisticData) {
|
||||
this.optimisticData = newOptimisticData;
|
||||
this.broadcastWatches();
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.batch = function (options) {
|
||||
var _this = this;
|
||||
var update = options.update, _a = options.optimistic, optimistic = _a === void 0 ? true : _a, removeOptimistic = options.removeOptimistic, onWatchUpdated = options.onWatchUpdated;
|
||||
var updateResult;
|
||||
var perform = function (layer) {
|
||||
var _a = _this, data = _a.data, optimisticData = _a.optimisticData;
|
||||
++_this.txCount;
|
||||
if (layer) {
|
||||
_this.data = _this.optimisticData = layer;
|
||||
}
|
||||
try {
|
||||
return (updateResult = update(_this));
|
||||
}
|
||||
finally {
|
||||
--_this.txCount;
|
||||
_this.data = data;
|
||||
_this.optimisticData = optimisticData;
|
||||
}
|
||||
};
|
||||
var alreadyDirty = new Set();
|
||||
if (onWatchUpdated && !this.txCount) {
|
||||
// If an options.onWatchUpdated callback is provided, we want to call it
|
||||
// with only the Cache.WatchOptions objects affected by options.update,
|
||||
// but there might be dirty watchers already waiting to be broadcast that
|
||||
// have nothing to do with the update. To prevent including those watchers
|
||||
// in the post-update broadcast, we perform this initial broadcast to
|
||||
// collect the dirty watchers, so we can re-dirty them later, after the
|
||||
// post-update broadcast, allowing them to receive their pending
|
||||
// broadcasts the next time broadcastWatches is called, just as they would
|
||||
// if we never called cache.batch.
|
||||
this.broadcastWatches(__assign(__assign({}, options), { onWatchUpdated: function (watch) {
|
||||
alreadyDirty.add(watch);
|
||||
return false;
|
||||
} }));
|
||||
}
|
||||
if (typeof optimistic === "string") {
|
||||
// Note that there can be multiple layers with the same optimistic ID.
|
||||
// When removeOptimistic(id) is called for that id, all matching layers
|
||||
// will be removed, and the remaining layers will be reapplied.
|
||||
this.optimisticData = this.optimisticData.addLayer(optimistic, perform);
|
||||
}
|
||||
else if (optimistic === false) {
|
||||
// Ensure both this.data and this.optimisticData refer to the root
|
||||
// (non-optimistic) layer of the cache during the update. Note that
|
||||
// this.data could be a Layer if we are currently executing an optimistic
|
||||
// update function, but otherwise will always be an EntityStore.Root
|
||||
// instance.
|
||||
perform(this.data);
|
||||
}
|
||||
else {
|
||||
// Otherwise, leave this.data and this.optimisticData unchanged and run
|
||||
// the update with broadcast batching.
|
||||
perform();
|
||||
}
|
||||
if (typeof removeOptimistic === "string") {
|
||||
this.optimisticData = this.optimisticData.removeLayer(removeOptimistic);
|
||||
}
|
||||
// Note: if this.txCount > 0, then alreadyDirty.size === 0, so this code
|
||||
// takes the else branch and calls this.broadcastWatches(options), which
|
||||
// does nothing when this.txCount > 0.
|
||||
if (onWatchUpdated && alreadyDirty.size) {
|
||||
this.broadcastWatches(__assign(__assign({}, options), { onWatchUpdated: function (watch, diff) {
|
||||
var result = onWatchUpdated.call(this, watch, diff);
|
||||
if (result !== false) {
|
||||
// Since onWatchUpdated did not return false, this diff is
|
||||
// about to be broadcast to watch.callback, so we don't need
|
||||
// to re-dirty it with the other alreadyDirty watches below.
|
||||
alreadyDirty.delete(watch);
|
||||
}
|
||||
return result;
|
||||
} }));
|
||||
// Silently re-dirty any watches that were already dirty before the update
|
||||
// was performed, and were not broadcast just now.
|
||||
if (alreadyDirty.size) {
|
||||
alreadyDirty.forEach(function (watch) { return _this.maybeBroadcastWatch.dirty(watch); });
|
||||
}
|
||||
}
|
||||
else {
|
||||
// If alreadyDirty is empty or we don't have an onWatchUpdated
|
||||
// function, we don't need to go to the trouble of wrapping
|
||||
// options.onWatchUpdated.
|
||||
this.broadcastWatches(options);
|
||||
}
|
||||
return updateResult;
|
||||
};
|
||||
InMemoryCache.prototype.performTransaction = function (update, optimisticId) {
|
||||
return this.batch({
|
||||
update: update,
|
||||
optimistic: optimisticId || optimisticId !== null,
|
||||
});
|
||||
};
|
||||
InMemoryCache.prototype.transformDocument = function (document) {
|
||||
return this.addTypenameToDocument(this.addFragmentsToDocument(document));
|
||||
};
|
||||
InMemoryCache.prototype.fragmentMatches = function (fragment, typename) {
|
||||
return this.policies.fragmentMatches(fragment, typename);
|
||||
};
|
||||
InMemoryCache.prototype.lookupFragment = function (fragmentName) {
|
||||
var _a;
|
||||
return ((_a = this.config.fragments) === null || _a === void 0 ? void 0 : _a.lookup(fragmentName)) || null;
|
||||
};
|
||||
InMemoryCache.prototype.broadcastWatches = function (options) {
|
||||
var _this = this;
|
||||
if (!this.txCount) {
|
||||
this.watches.forEach(function (c) { return _this.maybeBroadcastWatch(c, options); });
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.addFragmentsToDocument = function (document) {
|
||||
var fragments = this.config.fragments;
|
||||
return fragments ? fragments.transform(document) : document;
|
||||
};
|
||||
InMemoryCache.prototype.addTypenameToDocument = function (document) {
|
||||
if (this.addTypename) {
|
||||
return this.addTypenameTransform.transformDocument(document);
|
||||
}
|
||||
return document;
|
||||
};
|
||||
// This method is wrapped by maybeBroadcastWatch, which is called by
|
||||
// broadcastWatches, so that we compute and broadcast results only when
|
||||
// the data that would be broadcast might have changed. It would be
|
||||
// simpler to check for changes after recomputing a result but before
|
||||
// broadcasting it, but this wrapping approach allows us to skip both
|
||||
// the recomputation and the broadcast, in most cases.
|
||||
InMemoryCache.prototype.broadcastWatch = function (c, options) {
|
||||
var lastDiff = c.lastDiff;
|
||||
// Both WatchOptions and DiffOptions extend ReadOptions, and DiffOptions
|
||||
// currently requires no additional properties, so we can use c (a
|
||||
// WatchOptions object) as DiffOptions, without having to allocate a new
|
||||
// object, and without having to enumerate the relevant properties (query,
|
||||
// variables, etc.) explicitly. There will be some additional properties
|
||||
// (lastDiff, callback, etc.), but cache.diff ignores them.
|
||||
var diff = this.diff(c);
|
||||
if (options) {
|
||||
if (c.optimistic && typeof options.optimistic === "string") {
|
||||
diff.fromOptimisticTransaction = true;
|
||||
}
|
||||
if (options.onWatchUpdated &&
|
||||
options.onWatchUpdated.call(this, c, diff, lastDiff) === false) {
|
||||
// Returning false from the onWatchUpdated callback will prevent
|
||||
// calling c.callback(diff) for this watcher.
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!lastDiff || !equal(lastDiff.result, diff.result)) {
|
||||
c.callback((c.lastDiff = diff), lastDiff);
|
||||
}
|
||||
};
|
||||
return InMemoryCache;
|
||||
}(ApolloCache));
|
||||
export { InMemoryCache };
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
InMemoryCache.prototype.getMemoryInternals = getInMemoryCacheMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=inMemoryCache.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
9
node_modules/@apollo/client/cache/inmemory/key-extractor.d.ts
generated
vendored
Normal file
9
node_modules/@apollo/client/cache/inmemory/key-extractor.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import type { KeySpecifier, KeyFieldsFunction, KeyArgsFunction } from "./policies.js";
|
||||
export declare function keyFieldsFnFromSpecifier(specifier: KeySpecifier): KeyFieldsFunction;
|
||||
export declare function keyArgsFnFromSpecifier(specifier: KeySpecifier): KeyArgsFunction;
|
||||
export declare function collectSpecifierPaths(specifier: KeySpecifier, extractor: (path: string[]) => any): Record<string, any>;
|
||||
export declare function getSpecifierPaths(spec: KeySpecifier): string[][];
|
||||
declare function extractKey<TObj extends Record<string, any>, TKey extends string>(object: TObj, key: TKey): TObj[TKey] | undefined;
|
||||
export declare function extractKeyPath(object: Record<string, any>, path: string[], extract?: typeof extractKey): any;
|
||||
export {};
|
||||
//# sourceMappingURL=key-extractor.d.ts.map
|
||||
192
node_modules/@apollo/client/cache/inmemory/key-extractor.js
generated
vendored
Normal file
192
node_modules/@apollo/client/cache/inmemory/key-extractor.js
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
import { argumentsObjectFromField, DeepMerger, isNonEmptyArray, isNonNullObject, } from "../../utilities/index.js";
|
||||
import { hasOwn, isArray } from "./helpers.js";
|
||||
// Mapping from JSON-encoded KeySpecifier strings to associated information.
|
||||
var specifierInfoCache = Object.create(null);
|
||||
function lookupSpecifierInfo(spec) {
|
||||
// It's safe to encode KeySpecifier arrays with JSON.stringify, since they're
|
||||
// just arrays of strings or nested KeySpecifier arrays, and the order of the
|
||||
// array elements is important (and suitably preserved by JSON.stringify).
|
||||
var cacheKey = JSON.stringify(spec);
|
||||
return (specifierInfoCache[cacheKey] ||
|
||||
(specifierInfoCache[cacheKey] = Object.create(null)));
|
||||
}
|
||||
export function keyFieldsFnFromSpecifier(specifier) {
|
||||
var info = lookupSpecifierInfo(specifier);
|
||||
return (info.keyFieldsFn || (info.keyFieldsFn = function (object, context) {
|
||||
var extract = function (from, key) {
|
||||
return context.readField(key, from);
|
||||
};
|
||||
var keyObject = (context.keyObject = collectSpecifierPaths(specifier, function (schemaKeyPath) {
|
||||
var extracted = extractKeyPath(context.storeObject, schemaKeyPath,
|
||||
// Using context.readField to extract paths from context.storeObject
|
||||
// allows the extraction to see through Reference objects and respect
|
||||
// custom read functions.
|
||||
extract);
|
||||
if (extracted === void 0 &&
|
||||
object !== context.storeObject &&
|
||||
hasOwn.call(object, schemaKeyPath[0])) {
|
||||
// If context.storeObject fails to provide a value for the requested
|
||||
// path, fall back to the raw result object, if it has a top-level key
|
||||
// matching the first key in the path (schemaKeyPath[0]). This allows
|
||||
// key fields included in the written data to be saved in the cache
|
||||
// even if they are not selected explicitly in context.selectionSet.
|
||||
// Not being mentioned by context.selectionSet is convenient here,
|
||||
// since it means these extra fields cannot be affected by field
|
||||
// aliasing, which is why we can use extractKey instead of
|
||||
// context.readField for this extraction.
|
||||
extracted = extractKeyPath(object, schemaKeyPath, extractKey);
|
||||
}
|
||||
invariant(extracted !== void 0, 5, schemaKeyPath.join("."), object);
|
||||
return extracted;
|
||||
}));
|
||||
return "".concat(context.typename, ":").concat(JSON.stringify(keyObject));
|
||||
}));
|
||||
}
|
||||
// The keyArgs extraction process is roughly analogous to keyFields extraction,
|
||||
// but there are no aliases involved, missing fields are tolerated (by merely
|
||||
// omitting them from the key), and drawing from field.directives or variables
|
||||
// is allowed (in addition to drawing from the field's arguments object).
|
||||
// Concretely, these differences mean passing a different key path extractor
|
||||
// function to collectSpecifierPaths, reusing the shared extractKeyPath helper
|
||||
// wherever possible.
|
||||
export function keyArgsFnFromSpecifier(specifier) {
|
||||
var info = lookupSpecifierInfo(specifier);
|
||||
return (info.keyArgsFn ||
|
||||
(info.keyArgsFn = function (args, _a) {
|
||||
var field = _a.field, variables = _a.variables, fieldName = _a.fieldName;
|
||||
var collected = collectSpecifierPaths(specifier, function (keyPath) {
|
||||
var firstKey = keyPath[0];
|
||||
var firstChar = firstKey.charAt(0);
|
||||
if (firstChar === "@") {
|
||||
if (field && isNonEmptyArray(field.directives)) {
|
||||
var directiveName_1 = firstKey.slice(1);
|
||||
// If the directive appears multiple times, only the first
|
||||
// occurrence's arguments will be used. TODO Allow repetition?
|
||||
// TODO Cache this work somehow, a la aliasMap?
|
||||
var d = field.directives.find(function (d) { return d.name.value === directiveName_1; });
|
||||
// Fortunately argumentsObjectFromField works for DirectiveNode!
|
||||
var directiveArgs = d && argumentsObjectFromField(d, variables);
|
||||
// For directives without arguments (d defined, but directiveArgs ===
|
||||
// null), the presence or absence of the directive still counts as
|
||||
// part of the field key, so we return null in those cases. If no
|
||||
// directive with this name was found for this field (d undefined and
|
||||
// thus directiveArgs undefined), we return undefined, which causes
|
||||
// this value to be omitted from the key object returned by
|
||||
// collectSpecifierPaths.
|
||||
return (directiveArgs &&
|
||||
extractKeyPath(directiveArgs,
|
||||
// If keyPath.length === 1, this code calls extractKeyPath with an
|
||||
// empty path, which works because it uses directiveArgs as the
|
||||
// extracted value.
|
||||
keyPath.slice(1)));
|
||||
}
|
||||
// If the key started with @ but there was no corresponding directive,
|
||||
// we want to omit this value from the key object, not fall through to
|
||||
// treating @whatever as a normal argument name.
|
||||
return;
|
||||
}
|
||||
if (firstChar === "$") {
|
||||
var variableName = firstKey.slice(1);
|
||||
if (variables && hasOwn.call(variables, variableName)) {
|
||||
var varKeyPath = keyPath.slice(0);
|
||||
varKeyPath[0] = variableName;
|
||||
return extractKeyPath(variables, varKeyPath);
|
||||
}
|
||||
// If the key started with $ but there was no corresponding variable, we
|
||||
// want to omit this value from the key object, not fall through to
|
||||
// treating $whatever as a normal argument name.
|
||||
return;
|
||||
}
|
||||
if (args) {
|
||||
return extractKeyPath(args, keyPath);
|
||||
}
|
||||
});
|
||||
var suffix = JSON.stringify(collected);
|
||||
// If no arguments were passed to this field, and it didn't have any other
|
||||
// field key contributions from directives or variables, hide the empty
|
||||
// :{} suffix from the field key. However, a field passed no arguments can
|
||||
// still end up with a non-empty :{...} suffix if its key configuration
|
||||
// refers to directives or variables.
|
||||
if (args || suffix !== "{}") {
|
||||
fieldName += ":" + suffix;
|
||||
}
|
||||
return fieldName;
|
||||
}));
|
||||
}
|
||||
export function collectSpecifierPaths(specifier, extractor) {
|
||||
// For each path specified by specifier, invoke the extractor, and repeatedly
|
||||
// merge the results together, with appropriate ancestor context.
|
||||
var merger = new DeepMerger();
|
||||
return getSpecifierPaths(specifier).reduce(function (collected, path) {
|
||||
var _a;
|
||||
var toMerge = extractor(path);
|
||||
if (toMerge !== void 0) {
|
||||
// This path is not expected to contain array indexes, so the toMerge
|
||||
// reconstruction will not contain arrays. TODO Fix this?
|
||||
for (var i = path.length - 1; i >= 0; --i) {
|
||||
toMerge = (_a = {}, _a[path[i]] = toMerge, _a);
|
||||
}
|
||||
collected = merger.merge(collected, toMerge);
|
||||
}
|
||||
return collected;
|
||||
}, Object.create(null));
|
||||
}
|
||||
export function getSpecifierPaths(spec) {
|
||||
var info = lookupSpecifierInfo(spec);
|
||||
if (!info.paths) {
|
||||
var paths_1 = (info.paths = []);
|
||||
var currentPath_1 = [];
|
||||
spec.forEach(function (s, i) {
|
||||
if (isArray(s)) {
|
||||
getSpecifierPaths(s).forEach(function (p) { return paths_1.push(currentPath_1.concat(p)); });
|
||||
currentPath_1.length = 0;
|
||||
}
|
||||
else {
|
||||
currentPath_1.push(s);
|
||||
if (!isArray(spec[i + 1])) {
|
||||
paths_1.push(currentPath_1.slice(0));
|
||||
currentPath_1.length = 0;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return info.paths;
|
||||
}
|
||||
function extractKey(object, key) {
|
||||
return object[key];
|
||||
}
|
||||
export function extractKeyPath(object, path, extract) {
|
||||
// For each key in path, extract the corresponding child property from obj,
|
||||
// flattening arrays if encountered (uncommon for keyFields and keyArgs, but
|
||||
// possible). The final result of path.reduce is normalized so unexpected leaf
|
||||
// objects have their keys safely sorted. That final result is difficult to
|
||||
// type as anything other than any. You're welcome to try to improve the
|
||||
// return type, but keep in mind extractKeyPath is not a public function
|
||||
// (exported only for testing), so the effort may not be worthwhile unless the
|
||||
// limited set of actual callers (see above) pass arguments that TypeScript
|
||||
// can statically type. If we know only that path is some array of strings
|
||||
// (and not, say, a specific tuple of statically known strings), any (or
|
||||
// possibly unknown) is the honest answer.
|
||||
extract = extract || extractKey;
|
||||
return normalize(path.reduce(function reducer(obj, key) {
|
||||
return isArray(obj) ?
|
||||
obj.map(function (child) { return reducer(child, key); })
|
||||
: obj && extract(obj, key);
|
||||
}, object));
|
||||
}
|
||||
function normalize(value) {
|
||||
// Usually the extracted value will be a scalar value, since most primary
|
||||
// key fields are scalar, but just in case we get an object or an array, we
|
||||
// need to do some normalization of the order of (nested) keys.
|
||||
if (isNonNullObject(value)) {
|
||||
if (isArray(value)) {
|
||||
return value.map(normalize);
|
||||
}
|
||||
return collectSpecifierPaths(Object.keys(value).sort(), function (path) {
|
||||
return extractKeyPath(value, path);
|
||||
});
|
||||
}
|
||||
return value;
|
||||
}
|
||||
//# sourceMappingURL=key-extractor.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/key-extractor.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/key-extractor.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
12
node_modules/@apollo/client/cache/inmemory/object-canon.d.ts
generated
vendored
Normal file
12
node_modules/@apollo/client/cache/inmemory/object-canon.d.ts
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
export declare class ObjectCanon {
|
||||
private known;
|
||||
private pool;
|
||||
isKnown(value: any): boolean;
|
||||
private passes;
|
||||
pass<T>(value: T): T;
|
||||
admit<T>(value: T): T;
|
||||
private sortedKeys;
|
||||
private keysByJSON;
|
||||
readonly empty: {};
|
||||
}
|
||||
//# sourceMappingURL=object-canon.d.ts.map
|
||||
181
node_modules/@apollo/client/cache/inmemory/object-canon.js
generated
vendored
Normal file
181
node_modules/@apollo/client/cache/inmemory/object-canon.js
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
import { __assign } from "tslib";
|
||||
import { Trie } from "@wry/trie";
|
||||
import { canUseWeakMap, canUseWeakSet, isNonNullObject as isObjectOrArray, } from "../../utilities/index.js";
|
||||
import { isArray } from "./helpers.js";
|
||||
function shallowCopy(value) {
|
||||
if (isObjectOrArray(value)) {
|
||||
return isArray(value) ?
|
||||
value.slice(0)
|
||||
: __assign({ __proto__: Object.getPrototypeOf(value) }, value);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
// When programmers talk about the "canonical form" of an object, they
|
||||
// usually have the following meaning in mind, which I've copied from
|
||||
// https://en.wiktionary.org/wiki/canonical_form:
|
||||
//
|
||||
// 1. A standard or normal presentation of a mathematical entity [or
|
||||
// object]. A canonical form is an element of a set of representatives
|
||||
// of equivalence classes of forms such that there is a function or
|
||||
// procedure which projects every element of each equivalence class
|
||||
// onto that one element, the canonical form of that equivalence
|
||||
// class. The canonical form is expected to be simpler than the rest of
|
||||
// the forms in some way.
|
||||
//
|
||||
// That's a long-winded way of saying any two objects that have the same
|
||||
// canonical form may be considered equivalent, even if they are !==,
|
||||
// which usually means the objects are structurally equivalent (deeply
|
||||
// equal), but don't necessarily use the same memory.
|
||||
//
|
||||
// Like a literary or musical canon, this ObjectCanon class represents a
|
||||
// collection of unique canonical items (JavaScript objects), with the
|
||||
// important property that canon.admit(a) === canon.admit(b) if a and b
|
||||
// are deeply equal to each other. In terms of the definition above, the
|
||||
// canon.admit method is the "function or procedure which projects every"
|
||||
// object "onto that one element, the canonical form."
|
||||
//
|
||||
// In the worst case, the canonicalization process may involve looking at
|
||||
// every property in the provided object tree, so it takes the same order
|
||||
// of time as deep equality checking. Fortunately, already-canonicalized
|
||||
// objects are returned immediately from canon.admit, so the presence of
|
||||
// canonical subtrees tends to speed up canonicalization.
|
||||
//
|
||||
// Since consumers of canonical objects can check for deep equality in
|
||||
// constant time, canonicalizing cache results can massively improve the
|
||||
// performance of application code that skips re-rendering unchanged
|
||||
// results, such as "pure" UI components in a framework like React.
|
||||
//
|
||||
// Of course, since canonical objects may be shared widely between
|
||||
// unrelated consumers, it's important to think of them as immutable, even
|
||||
// though they are not actually frozen with Object.freeze in production,
|
||||
// due to the extra performance overhead that comes with frozen objects.
|
||||
//
|
||||
// Custom scalar objects whose internal class name is neither Array nor
|
||||
// Object can be included safely in the admitted tree, but they will not
|
||||
// be replaced with a canonical version (to put it another way, they are
|
||||
// assumed to be canonical already).
|
||||
//
|
||||
// If we ignore custom objects, no detection of cycles or repeated object
|
||||
// references is currently required by the StoreReader class, since
|
||||
// GraphQL result objects are JSON-serializable trees (and thus contain
|
||||
// neither cycles nor repeated subtrees), so we can avoid the complexity
|
||||
// of keeping track of objects we've already seen during the recursion of
|
||||
// the admit method.
|
||||
//
|
||||
// In the future, we may consider adding additional cases to the switch
|
||||
// statement to handle other common object types, such as "[object Date]"
|
||||
// objects, as needed.
|
||||
var ObjectCanon = /** @class */ (function () {
|
||||
function ObjectCanon() {
|
||||
// Set of all canonical objects this ObjectCanon has admitted, allowing
|
||||
// canon.admit to return previously-canonicalized objects immediately.
|
||||
this.known = new (canUseWeakSet ? WeakSet : Set)();
|
||||
// Efficient storage/lookup structure for canonical objects.
|
||||
this.pool = new Trie(canUseWeakMap);
|
||||
// Make the ObjectCanon assume this value has already been
|
||||
// canonicalized.
|
||||
this.passes = new WeakMap();
|
||||
// Arrays that contain the same elements in a different order can share
|
||||
// the same SortedKeysInfo object, to save memory.
|
||||
this.keysByJSON = new Map();
|
||||
// This has to come last because it depends on keysByJSON.
|
||||
this.empty = this.admit({});
|
||||
}
|
||||
ObjectCanon.prototype.isKnown = function (value) {
|
||||
return isObjectOrArray(value) && this.known.has(value);
|
||||
};
|
||||
ObjectCanon.prototype.pass = function (value) {
|
||||
if (isObjectOrArray(value)) {
|
||||
var copy = shallowCopy(value);
|
||||
this.passes.set(copy, value);
|
||||
return copy;
|
||||
}
|
||||
return value;
|
||||
};
|
||||
ObjectCanon.prototype.admit = function (value) {
|
||||
var _this = this;
|
||||
if (isObjectOrArray(value)) {
|
||||
var original = this.passes.get(value);
|
||||
if (original)
|
||||
return original;
|
||||
var proto = Object.getPrototypeOf(value);
|
||||
switch (proto) {
|
||||
case Array.prototype: {
|
||||
if (this.known.has(value))
|
||||
return value;
|
||||
var array = value.map(this.admit, this);
|
||||
// Arrays are looked up in the Trie using their recursively
|
||||
// canonicalized elements, and the known version of the array is
|
||||
// preserved as node.array.
|
||||
var node = this.pool.lookupArray(array);
|
||||
if (!node.array) {
|
||||
this.known.add((node.array = array));
|
||||
// Since canonical arrays may be shared widely between
|
||||
// unrelated consumers, it's important to regard them as
|
||||
// immutable, even if they are not frozen in production.
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
Object.freeze(array);
|
||||
}
|
||||
}
|
||||
return node.array;
|
||||
}
|
||||
case null:
|
||||
case Object.prototype: {
|
||||
if (this.known.has(value))
|
||||
return value;
|
||||
var proto_1 = Object.getPrototypeOf(value);
|
||||
var array_1 = [proto_1];
|
||||
var keys = this.sortedKeys(value);
|
||||
array_1.push(keys.json);
|
||||
var firstValueIndex_1 = array_1.length;
|
||||
keys.sorted.forEach(function (key) {
|
||||
array_1.push(_this.admit(value[key]));
|
||||
});
|
||||
// Objects are looked up in the Trie by their prototype (which
|
||||
// is *not* recursively canonicalized), followed by a JSON
|
||||
// representation of their (sorted) keys, followed by the
|
||||
// sequence of recursively canonicalized values corresponding to
|
||||
// those keys. To keep the final results unambiguous with other
|
||||
// sequences (such as arrays that just happen to contain [proto,
|
||||
// keys.json, value1, value2, ...]), the known version of the
|
||||
// object is stored as node.object.
|
||||
var node = this.pool.lookupArray(array_1);
|
||||
if (!node.object) {
|
||||
var obj_1 = (node.object = Object.create(proto_1));
|
||||
this.known.add(obj_1);
|
||||
keys.sorted.forEach(function (key, i) {
|
||||
obj_1[key] = array_1[firstValueIndex_1 + i];
|
||||
});
|
||||
// Since canonical objects may be shared widely between
|
||||
// unrelated consumers, it's important to regard them as
|
||||
// immutable, even if they are not frozen in production.
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
Object.freeze(obj_1);
|
||||
}
|
||||
}
|
||||
return node.object;
|
||||
}
|
||||
}
|
||||
}
|
||||
return value;
|
||||
};
|
||||
// It's worthwhile to cache the sorting of arrays of strings, since the
|
||||
// same initial unsorted arrays tend to be encountered many times.
|
||||
// Fortunately, we can reuse the Trie machinery to look up the sorted
|
||||
// arrays in linear time (which is faster than sorting large arrays).
|
||||
ObjectCanon.prototype.sortedKeys = function (obj) {
|
||||
var keys = Object.keys(obj);
|
||||
var node = this.pool.lookupArray(keys);
|
||||
if (!node.keys) {
|
||||
keys.sort();
|
||||
var json = JSON.stringify(keys);
|
||||
if (!(node.keys = this.keysByJSON.get(json))) {
|
||||
this.keysByJSON.set(json, (node.keys = { sorted: keys, json: json }));
|
||||
}
|
||||
}
|
||||
return node.keys;
|
||||
};
|
||||
return ObjectCanon;
|
||||
}());
|
||||
export { ObjectCanon };
|
||||
//# sourceMappingURL=object-canon.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/object-canon.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/object-canon.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
97
node_modules/@apollo/client/cache/inmemory/policies.d.ts
generated
vendored
Normal file
97
node_modules/@apollo/client/cache/inmemory/policies.d.ts
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
import type { InlineFragmentNode, FragmentDefinitionNode, SelectionSetNode, FieldNode } from "graphql";
|
||||
import type { FragmentMap, StoreValue, StoreObject, Reference } from "../../utilities/index.js";
|
||||
import { isReference } from "../../utilities/index.js";
|
||||
import type { IdGetter, MergeInfo, ReadMergeModifyContext } from "./types.js";
|
||||
import type { InMemoryCache } from "./inMemoryCache.js";
|
||||
import type { SafeReadonly, FieldSpecifier, ToReferenceFunction, ReadFieldFunction, ReadFieldOptions, CanReadFunction } from "../core/types/common.js";
|
||||
import type { WriteContext } from "./writeToStore.js";
|
||||
export type TypePolicies = {
|
||||
[__typename: string]: TypePolicy;
|
||||
};
|
||||
export type KeySpecifier = ReadonlyArray<string | KeySpecifier>;
|
||||
export type KeyFieldsContext = {
|
||||
typename: string | undefined;
|
||||
storeObject: StoreObject;
|
||||
readField: ReadFieldFunction;
|
||||
selectionSet?: SelectionSetNode;
|
||||
fragmentMap?: FragmentMap;
|
||||
keyObject?: Record<string, any>;
|
||||
};
|
||||
export type KeyFieldsFunction = (object: Readonly<StoreObject>, context: KeyFieldsContext) => KeySpecifier | false | ReturnType<IdGetter>;
|
||||
export type TypePolicy = {
|
||||
keyFields?: KeySpecifier | KeyFieldsFunction | false;
|
||||
merge?: FieldMergeFunction | boolean;
|
||||
queryType?: true;
|
||||
mutationType?: true;
|
||||
subscriptionType?: true;
|
||||
fields?: {
|
||||
[fieldName: string]: FieldPolicy<any> | FieldReadFunction<any>;
|
||||
};
|
||||
};
|
||||
export type KeyArgsFunction = (args: Record<string, any> | null, context: {
|
||||
typename: string;
|
||||
fieldName: string;
|
||||
field: FieldNode | null;
|
||||
variables?: Record<string, any>;
|
||||
}) => KeySpecifier | false | ReturnType<IdGetter>;
|
||||
export type FieldPolicy<TExisting = any, TIncoming = TExisting, TReadResult = TIncoming, TOptions extends FieldFunctionOptions = FieldFunctionOptions> = {
|
||||
keyArgs?: KeySpecifier | KeyArgsFunction | false;
|
||||
read?: FieldReadFunction<TExisting, TReadResult, TOptions>;
|
||||
merge?: FieldMergeFunction<TExisting, TIncoming, TOptions> | boolean;
|
||||
};
|
||||
export type StorageType = Record<string, any>;
|
||||
export interface FieldFunctionOptions<TArgs = Record<string, any>, TVars = Record<string, any>> {
|
||||
args: TArgs | null;
|
||||
fieldName: string;
|
||||
storeFieldName: string;
|
||||
field: FieldNode | null;
|
||||
variables?: TVars;
|
||||
isReference: typeof isReference;
|
||||
toReference: ToReferenceFunction;
|
||||
storage: StorageType;
|
||||
cache: InMemoryCache;
|
||||
readField: ReadFieldFunction;
|
||||
canRead: CanReadFunction;
|
||||
mergeObjects: MergeObjectsFunction;
|
||||
}
|
||||
type MergeObjectsFunction = <T extends StoreObject | Reference>(existing: T, incoming: T) => T;
|
||||
export type FieldReadFunction<TExisting = any, TReadResult = TExisting, TOptions extends FieldFunctionOptions = FieldFunctionOptions> = (existing: SafeReadonly<TExisting> | undefined, options: TOptions) => TReadResult | undefined;
|
||||
export type FieldMergeFunction<TExisting = any, TIncoming = TExisting, TOptions extends FieldFunctionOptions = FieldFunctionOptions> = (existing: SafeReadonly<TExisting> | undefined, incoming: SafeReadonly<TIncoming>, options: TOptions) => SafeReadonly<TExisting>;
|
||||
export type PossibleTypesMap = {
|
||||
[supertype: string]: string[];
|
||||
};
|
||||
export declare class Policies {
|
||||
private config;
|
||||
private typePolicies;
|
||||
private toBeAdded;
|
||||
private supertypeMap;
|
||||
private fuzzySubtypes;
|
||||
readonly cache: InMemoryCache;
|
||||
readonly rootIdsByTypename: Record<string, string>;
|
||||
readonly rootTypenamesById: Record<string, string>;
|
||||
readonly usingPossibleTypes = false;
|
||||
constructor(config: {
|
||||
cache: InMemoryCache;
|
||||
dataIdFromObject?: KeyFieldsFunction;
|
||||
possibleTypes?: PossibleTypesMap;
|
||||
typePolicies?: TypePolicies;
|
||||
});
|
||||
identify(object: StoreObject, partialContext?: Partial<KeyFieldsContext>): [string?, StoreObject?];
|
||||
addTypePolicies(typePolicies: TypePolicies): void;
|
||||
private updateTypePolicy;
|
||||
private setRootTypename;
|
||||
addPossibleTypes(possibleTypes: PossibleTypesMap): void;
|
||||
private getTypePolicy;
|
||||
private getFieldPolicy;
|
||||
private getSupertypeSet;
|
||||
fragmentMatches(fragment: InlineFragmentNode | FragmentDefinitionNode, typename: string | undefined, result?: Record<string, any>, variables?: Record<string, any>): boolean;
|
||||
hasKeyArgs(typename: string | undefined, fieldName: string): boolean;
|
||||
getStoreFieldName(fieldSpec: FieldSpecifier): string;
|
||||
readField<V = StoreValue>(options: ReadFieldOptions, context: ReadMergeModifyContext): SafeReadonly<V> | undefined;
|
||||
getReadFunction(typename: string | undefined, fieldName: string): FieldReadFunction | undefined;
|
||||
getMergeFunction(parentTypename: string | undefined, fieldName: string, childTypename: string | undefined): FieldMergeFunction | undefined;
|
||||
runMergeFunction(existing: StoreValue, incoming: StoreValue, { field, typename, merge }: MergeInfo, context: WriteContext, storage?: StorageType): any;
|
||||
}
|
||||
export declare function normalizeReadFieldOptions(readFieldArgs: IArguments, objectOrReference: StoreObject | Reference | undefined, variables?: ReadMergeModifyContext["variables"]): ReadFieldOptions;
|
||||
export {};
|
||||
//# sourceMappingURL=policies.d.ts.map
|
||||
604
node_modules/@apollo/client/cache/inmemory/policies.js
generated
vendored
Normal file
604
node_modules/@apollo/client/cache/inmemory/policies.js
generated
vendored
Normal file
@@ -0,0 +1,604 @@
|
||||
import { __assign, __rest } from "tslib";
|
||||
import { invariant, newInvariantError } from "../../utilities/globals/index.js";
|
||||
import { storeKeyNameFromField, argumentsObjectFromField, isReference, getStoreKeyName, isNonNullObject, stringifyForDisplay, } from "../../utilities/index.js";
|
||||
import { hasOwn, fieldNameFromStoreName, storeValueIsStoreObject, selectionSetMatchesResult, TypeOrFieldNameRegExp, defaultDataIdFromObject, isArray, } from "./helpers.js";
|
||||
import { cacheSlot } from "./reactiveVars.js";
|
||||
import { keyArgsFnFromSpecifier, keyFieldsFnFromSpecifier, } from "./key-extractor.js";
|
||||
import { disableWarningsSlot } from "../../masking/index.js";
|
||||
function argsFromFieldSpecifier(spec) {
|
||||
return (spec.args !== void 0 ? spec.args
|
||||
: spec.field ? argumentsObjectFromField(spec.field, spec.variables)
|
||||
: null);
|
||||
}
|
||||
var nullKeyFieldsFn = function () { return void 0; };
|
||||
var simpleKeyArgsFn = function (_args, context) { return context.fieldName; };
|
||||
// These merge functions can be selected by specifying merge:true or
|
||||
// merge:false in a field policy.
|
||||
var mergeTrueFn = function (existing, incoming, _a) {
|
||||
var mergeObjects = _a.mergeObjects;
|
||||
return mergeObjects(existing, incoming);
|
||||
};
|
||||
var mergeFalseFn = function (_, incoming) { return incoming; };
|
||||
var Policies = /** @class */ (function () {
|
||||
function Policies(config) {
|
||||
this.config = config;
|
||||
this.typePolicies = Object.create(null);
|
||||
this.toBeAdded = Object.create(null);
|
||||
// Map from subtype names to sets of supertype names. Note that this
|
||||
// representation inverts the structure of possibleTypes (whose keys are
|
||||
// supertypes and whose values are arrays of subtypes) because it tends
|
||||
// to be much more efficient to search upwards than downwards.
|
||||
this.supertypeMap = new Map();
|
||||
// Any fuzzy subtypes specified by possibleTypes will be converted to
|
||||
// RegExp objects and recorded here. Every key of this map can also be
|
||||
// found in supertypeMap. In many cases this Map will be empty, which
|
||||
// means no fuzzy subtype checking will happen in fragmentMatches.
|
||||
this.fuzzySubtypes = new Map();
|
||||
this.rootIdsByTypename = Object.create(null);
|
||||
this.rootTypenamesById = Object.create(null);
|
||||
this.usingPossibleTypes = false;
|
||||
this.config = __assign({ dataIdFromObject: defaultDataIdFromObject }, config);
|
||||
this.cache = this.config.cache;
|
||||
this.setRootTypename("Query");
|
||||
this.setRootTypename("Mutation");
|
||||
this.setRootTypename("Subscription");
|
||||
if (config.possibleTypes) {
|
||||
this.addPossibleTypes(config.possibleTypes);
|
||||
}
|
||||
if (config.typePolicies) {
|
||||
this.addTypePolicies(config.typePolicies);
|
||||
}
|
||||
}
|
||||
Policies.prototype.identify = function (object, partialContext) {
|
||||
var _a;
|
||||
var policies = this;
|
||||
var typename = (partialContext &&
|
||||
(partialContext.typename || ((_a = partialContext.storeObject) === null || _a === void 0 ? void 0 : _a.__typename))) ||
|
||||
object.__typename;
|
||||
// It should be possible to write root Query fields with writeFragment,
|
||||
// using { __typename: "Query", ... } as the data, but it does not make
|
||||
// sense to allow the same identification behavior for the Mutation and
|
||||
// Subscription types, since application code should never be writing
|
||||
// directly to (or reading directly from) those root objects.
|
||||
if (typename === this.rootTypenamesById.ROOT_QUERY) {
|
||||
return ["ROOT_QUERY"];
|
||||
}
|
||||
// Default context.storeObject to object if not otherwise provided.
|
||||
var storeObject = (partialContext && partialContext.storeObject) || object;
|
||||
var context = __assign(__assign({}, partialContext), { typename: typename, storeObject: storeObject, readField: (partialContext && partialContext.readField) ||
|
||||
function () {
|
||||
var options = normalizeReadFieldOptions(arguments, storeObject);
|
||||
return policies.readField(options, {
|
||||
store: policies.cache["data"],
|
||||
variables: options.variables,
|
||||
});
|
||||
} });
|
||||
var id;
|
||||
var policy = typename && this.getTypePolicy(typename);
|
||||
var keyFn = (policy && policy.keyFn) || this.config.dataIdFromObject;
|
||||
disableWarningsSlot.withValue(true, function () {
|
||||
while (keyFn) {
|
||||
var specifierOrId = keyFn(__assign(__assign({}, object), storeObject), context);
|
||||
if (isArray(specifierOrId)) {
|
||||
keyFn = keyFieldsFnFromSpecifier(specifierOrId);
|
||||
}
|
||||
else {
|
||||
id = specifierOrId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
id = id ? String(id) : void 0;
|
||||
return context.keyObject ? [id, context.keyObject] : [id];
|
||||
};
|
||||
Policies.prototype.addTypePolicies = function (typePolicies) {
|
||||
var _this = this;
|
||||
Object.keys(typePolicies).forEach(function (typename) {
|
||||
var _a = typePolicies[typename], queryType = _a.queryType, mutationType = _a.mutationType, subscriptionType = _a.subscriptionType, incoming = __rest(_a, ["queryType", "mutationType", "subscriptionType"]);
|
||||
// Though {query,mutation,subscription}Type configurations are rare,
|
||||
// it's important to call setRootTypename as early as possible,
|
||||
// since these configurations should apply consistently for the
|
||||
// entire lifetime of the cache. Also, since only one __typename can
|
||||
// qualify as one of these root types, these three properties cannot
|
||||
// be inherited, unlike the rest of the incoming properties. That
|
||||
// restriction is convenient, because the purpose of this.toBeAdded
|
||||
// is to delay the processing of type/field policies until the first
|
||||
// time they're used, allowing policies to be added in any order as
|
||||
// long as all relevant policies (including policies for supertypes)
|
||||
// have been added by the time a given policy is used for the first
|
||||
// time. In other words, since inheritance doesn't matter for these
|
||||
// properties, there's also no need to delay their processing using
|
||||
// the this.toBeAdded queue.
|
||||
if (queryType)
|
||||
_this.setRootTypename("Query", typename);
|
||||
if (mutationType)
|
||||
_this.setRootTypename("Mutation", typename);
|
||||
if (subscriptionType)
|
||||
_this.setRootTypename("Subscription", typename);
|
||||
if (hasOwn.call(_this.toBeAdded, typename)) {
|
||||
_this.toBeAdded[typename].push(incoming);
|
||||
}
|
||||
else {
|
||||
_this.toBeAdded[typename] = [incoming];
|
||||
}
|
||||
});
|
||||
};
|
||||
Policies.prototype.updateTypePolicy = function (typename, incoming) {
|
||||
var _this = this;
|
||||
var existing = this.getTypePolicy(typename);
|
||||
var keyFields = incoming.keyFields, fields = incoming.fields;
|
||||
function setMerge(existing, merge) {
|
||||
existing.merge =
|
||||
typeof merge === "function" ? merge
|
||||
// Pass merge:true as a shorthand for a merge implementation
|
||||
// that returns options.mergeObjects(existing, incoming).
|
||||
: merge === true ? mergeTrueFn
|
||||
// Pass merge:false to make incoming always replace existing
|
||||
// without any warnings about data clobbering.
|
||||
: merge === false ? mergeFalseFn
|
||||
: existing.merge;
|
||||
}
|
||||
// Type policies can define merge functions, as an alternative to
|
||||
// using field policies to merge child objects.
|
||||
setMerge(existing, incoming.merge);
|
||||
existing.keyFn =
|
||||
// Pass false to disable normalization for this typename.
|
||||
keyFields === false ? nullKeyFieldsFn
|
||||
// Pass an array of strings to use those fields to compute a
|
||||
// composite ID for objects of this typename.
|
||||
: isArray(keyFields) ? keyFieldsFnFromSpecifier(keyFields)
|
||||
// Pass a function to take full control over identification.
|
||||
: typeof keyFields === "function" ? keyFields
|
||||
// Leave existing.keyFn unchanged if above cases fail.
|
||||
: existing.keyFn;
|
||||
if (fields) {
|
||||
Object.keys(fields).forEach(function (fieldName) {
|
||||
var existing = _this.getFieldPolicy(typename, fieldName, true);
|
||||
var incoming = fields[fieldName];
|
||||
if (typeof incoming === "function") {
|
||||
existing.read = incoming;
|
||||
}
|
||||
else {
|
||||
var keyArgs = incoming.keyArgs, read = incoming.read, merge = incoming.merge;
|
||||
existing.keyFn =
|
||||
// Pass false to disable argument-based differentiation of
|
||||
// field identities.
|
||||
keyArgs === false ? simpleKeyArgsFn
|
||||
// Pass an array of strings to use named arguments to
|
||||
// compute a composite identity for the field.
|
||||
: isArray(keyArgs) ? keyArgsFnFromSpecifier(keyArgs)
|
||||
// Pass a function to take full control over field identity.
|
||||
: typeof keyArgs === "function" ? keyArgs
|
||||
// Leave existing.keyFn unchanged if above cases fail.
|
||||
: existing.keyFn;
|
||||
if (typeof read === "function") {
|
||||
existing.read = read;
|
||||
}
|
||||
setMerge(existing, merge);
|
||||
}
|
||||
if (existing.read && existing.merge) {
|
||||
// If we have both a read and a merge function, assume
|
||||
// keyArgs:false, because read and merge together can take
|
||||
// responsibility for interpreting arguments in and out. This
|
||||
// default assumption can always be overridden by specifying
|
||||
// keyArgs explicitly in the FieldPolicy.
|
||||
existing.keyFn = existing.keyFn || simpleKeyArgsFn;
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
Policies.prototype.setRootTypename = function (which, typename) {
|
||||
if (typename === void 0) { typename = which; }
|
||||
var rootId = "ROOT_" + which.toUpperCase();
|
||||
var old = this.rootTypenamesById[rootId];
|
||||
if (typename !== old) {
|
||||
invariant(!old || old === which, 6, which);
|
||||
// First, delete any old __typename associated with this rootId from
|
||||
// rootIdsByTypename.
|
||||
if (old)
|
||||
delete this.rootIdsByTypename[old];
|
||||
// Now make this the only __typename that maps to this rootId.
|
||||
this.rootIdsByTypename[typename] = rootId;
|
||||
// Finally, update the __typename associated with this rootId.
|
||||
this.rootTypenamesById[rootId] = typename;
|
||||
}
|
||||
};
|
||||
Policies.prototype.addPossibleTypes = function (possibleTypes) {
|
||||
var _this = this;
|
||||
this.usingPossibleTypes = true;
|
||||
Object.keys(possibleTypes).forEach(function (supertype) {
|
||||
// Make sure all types have an entry in this.supertypeMap, even if
|
||||
// their supertype set is empty, so we can return false immediately
|
||||
// from policies.fragmentMatches for unknown supertypes.
|
||||
_this.getSupertypeSet(supertype, true);
|
||||
possibleTypes[supertype].forEach(function (subtype) {
|
||||
_this.getSupertypeSet(subtype, true).add(supertype);
|
||||
var match = subtype.match(TypeOrFieldNameRegExp);
|
||||
if (!match || match[0] !== subtype) {
|
||||
// TODO Don't interpret just any invalid typename as a RegExp.
|
||||
_this.fuzzySubtypes.set(subtype, new RegExp(subtype));
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
Policies.prototype.getTypePolicy = function (typename) {
|
||||
var _this = this;
|
||||
if (!hasOwn.call(this.typePolicies, typename)) {
|
||||
var policy_1 = (this.typePolicies[typename] = Object.create(null));
|
||||
policy_1.fields = Object.create(null);
|
||||
// When the TypePolicy for typename is first accessed, instead of
|
||||
// starting with an empty policy object, inherit any properties or
|
||||
// fields from the type policies of the supertypes of typename.
|
||||
//
|
||||
// Any properties or fields defined explicitly within the TypePolicy
|
||||
// for typename will take precedence, and if there are multiple
|
||||
// supertypes, the properties of policies whose types were added
|
||||
// later via addPossibleTypes will take precedence over those of
|
||||
// earlier supertypes. TODO Perhaps we should warn about these
|
||||
// conflicts in development, and recommend defining the property
|
||||
// explicitly in the subtype policy?
|
||||
//
|
||||
// Field policy inheritance is atomic/shallow: you can't inherit a
|
||||
// field policy and then override just its read function, since read
|
||||
// and merge functions often need to cooperate, so changing only one
|
||||
// of them would be a recipe for inconsistency.
|
||||
//
|
||||
// Once the TypePolicy for typename has been accessed, its properties can
|
||||
// still be updated directly using addTypePolicies, but future changes to
|
||||
// inherited supertype policies will not be reflected in this subtype
|
||||
// policy, because this code runs at most once per typename.
|
||||
var supertypes_1 = this.supertypeMap.get(typename);
|
||||
if (!supertypes_1 && this.fuzzySubtypes.size) {
|
||||
// To make the inheritance logic work for unknown typename strings that
|
||||
// may have fuzzy supertypes, we give this typename an empty supertype
|
||||
// set and then populate it with any fuzzy supertypes that match.
|
||||
supertypes_1 = this.getSupertypeSet(typename, true);
|
||||
// This only works for typenames that are directly matched by a fuzzy
|
||||
// supertype. What if there is an intermediate chain of supertypes?
|
||||
// While possible, that situation can only be solved effectively by
|
||||
// specifying the intermediate relationships via possibleTypes, manually
|
||||
// and in a non-fuzzy way.
|
||||
this.fuzzySubtypes.forEach(function (regExp, fuzzy) {
|
||||
if (regExp.test(typename)) {
|
||||
// The fuzzy parameter is just the original string version of regExp
|
||||
// (not a valid __typename string), but we can look up the
|
||||
// associated supertype(s) in this.supertypeMap.
|
||||
var fuzzySupertypes = _this.supertypeMap.get(fuzzy);
|
||||
if (fuzzySupertypes) {
|
||||
fuzzySupertypes.forEach(function (supertype) {
|
||||
return supertypes_1.add(supertype);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
if (supertypes_1 && supertypes_1.size) {
|
||||
supertypes_1.forEach(function (supertype) {
|
||||
var _a = _this.getTypePolicy(supertype), fields = _a.fields, rest = __rest(_a, ["fields"]);
|
||||
Object.assign(policy_1, rest);
|
||||
Object.assign(policy_1.fields, fields);
|
||||
});
|
||||
}
|
||||
}
|
||||
var inbox = this.toBeAdded[typename];
|
||||
if (inbox && inbox.length) {
|
||||
// Merge the pending policies into this.typePolicies, in the order they
|
||||
// were originally passed to addTypePolicy.
|
||||
inbox.splice(0).forEach(function (policy) {
|
||||
_this.updateTypePolicy(typename, policy);
|
||||
});
|
||||
}
|
||||
return this.typePolicies[typename];
|
||||
};
|
||||
Policies.prototype.getFieldPolicy = function (typename, fieldName, createIfMissing) {
|
||||
if (typename) {
|
||||
var fieldPolicies = this.getTypePolicy(typename).fields;
|
||||
return (fieldPolicies[fieldName] ||
|
||||
(createIfMissing && (fieldPolicies[fieldName] = Object.create(null))));
|
||||
}
|
||||
};
|
||||
Policies.prototype.getSupertypeSet = function (subtype, createIfMissing) {
|
||||
var supertypeSet = this.supertypeMap.get(subtype);
|
||||
if (!supertypeSet && createIfMissing) {
|
||||
this.supertypeMap.set(subtype, (supertypeSet = new Set()));
|
||||
}
|
||||
return supertypeSet;
|
||||
};
|
||||
Policies.prototype.fragmentMatches = function (fragment, typename, result, variables) {
|
||||
var _this = this;
|
||||
if (!fragment.typeCondition)
|
||||
return true;
|
||||
// If the fragment has a type condition but the object we're matching
|
||||
// against does not have a __typename, the fragment cannot match.
|
||||
if (!typename)
|
||||
return false;
|
||||
var supertype = fragment.typeCondition.name.value;
|
||||
// Common case: fragment type condition and __typename are the same.
|
||||
if (typename === supertype)
|
||||
return true;
|
||||
if (this.usingPossibleTypes && this.supertypeMap.has(supertype)) {
|
||||
var typenameSupertypeSet = this.getSupertypeSet(typename, true);
|
||||
var workQueue_1 = [typenameSupertypeSet];
|
||||
var maybeEnqueue_1 = function (subtype) {
|
||||
var supertypeSet = _this.getSupertypeSet(subtype, false);
|
||||
if (supertypeSet &&
|
||||
supertypeSet.size &&
|
||||
workQueue_1.indexOf(supertypeSet) < 0) {
|
||||
workQueue_1.push(supertypeSet);
|
||||
}
|
||||
};
|
||||
// We need to check fuzzy subtypes only if we encountered fuzzy
|
||||
// subtype strings in addPossibleTypes, and only while writing to
|
||||
// the cache, since that's when selectionSetMatchesResult gives a
|
||||
// strong signal of fragment matching. The StoreReader class calls
|
||||
// policies.fragmentMatches without passing a result object, so
|
||||
// needToCheckFuzzySubtypes is always false while reading.
|
||||
var needToCheckFuzzySubtypes = !!(result && this.fuzzySubtypes.size);
|
||||
var checkingFuzzySubtypes = false;
|
||||
// It's important to keep evaluating workQueue.length each time through
|
||||
// the loop, because the queue can grow while we're iterating over it.
|
||||
for (var i = 0; i < workQueue_1.length; ++i) {
|
||||
var supertypeSet = workQueue_1[i];
|
||||
if (supertypeSet.has(supertype)) {
|
||||
if (!typenameSupertypeSet.has(supertype)) {
|
||||
if (checkingFuzzySubtypes) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(7, typename, supertype);
|
||||
}
|
||||
// Record positive results for faster future lookup.
|
||||
// Unfortunately, we cannot safely cache negative results,
|
||||
// because new possibleTypes data could always be added to the
|
||||
// Policies class.
|
||||
typenameSupertypeSet.add(supertype);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
supertypeSet.forEach(maybeEnqueue_1);
|
||||
if (needToCheckFuzzySubtypes &&
|
||||
// Start checking fuzzy subtypes only after exhausting all
|
||||
// non-fuzzy subtypes (after the final iteration of the loop).
|
||||
i === workQueue_1.length - 1 &&
|
||||
// We could wait to compare fragment.selectionSet to result
|
||||
// after we verify the supertype, but this check is often less
|
||||
// expensive than that search, and we will have to do the
|
||||
// comparison anyway whenever we find a potential match.
|
||||
selectionSetMatchesResult(fragment.selectionSet, result, variables)) {
|
||||
// We don't always need to check fuzzy subtypes (if no result
|
||||
// was provided, or !this.fuzzySubtypes.size), but, when we do,
|
||||
// we only want to check them once.
|
||||
needToCheckFuzzySubtypes = false;
|
||||
checkingFuzzySubtypes = true;
|
||||
// If we find any fuzzy subtypes that match typename, extend the
|
||||
// workQueue to search through the supertypes of those fuzzy
|
||||
// subtypes. Otherwise the for-loop will terminate and we'll
|
||||
// return false below.
|
||||
this.fuzzySubtypes.forEach(function (regExp, fuzzyString) {
|
||||
var match = typename.match(regExp);
|
||||
if (match && match[0] === typename) {
|
||||
maybeEnqueue_1(fuzzyString);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
Policies.prototype.hasKeyArgs = function (typename, fieldName) {
|
||||
var policy = this.getFieldPolicy(typename, fieldName, false);
|
||||
return !!(policy && policy.keyFn);
|
||||
};
|
||||
Policies.prototype.getStoreFieldName = function (fieldSpec) {
|
||||
var typename = fieldSpec.typename, fieldName = fieldSpec.fieldName;
|
||||
var policy = this.getFieldPolicy(typename, fieldName, false);
|
||||
var storeFieldName;
|
||||
var keyFn = policy && policy.keyFn;
|
||||
if (keyFn && typename) {
|
||||
var context = {
|
||||
typename: typename,
|
||||
fieldName: fieldName,
|
||||
field: fieldSpec.field || null,
|
||||
variables: fieldSpec.variables,
|
||||
};
|
||||
var args = argsFromFieldSpecifier(fieldSpec);
|
||||
while (keyFn) {
|
||||
var specifierOrString = keyFn(args, context);
|
||||
if (isArray(specifierOrString)) {
|
||||
keyFn = keyArgsFnFromSpecifier(specifierOrString);
|
||||
}
|
||||
else {
|
||||
// If the custom keyFn returns a falsy value, fall back to
|
||||
// fieldName instead.
|
||||
storeFieldName = specifierOrString || fieldName;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (storeFieldName === void 0) {
|
||||
storeFieldName =
|
||||
fieldSpec.field ?
|
||||
storeKeyNameFromField(fieldSpec.field, fieldSpec.variables)
|
||||
: getStoreKeyName(fieldName, argsFromFieldSpecifier(fieldSpec));
|
||||
}
|
||||
// Returning false from a keyArgs function is like configuring
|
||||
// keyArgs: false, but more dynamic.
|
||||
if (storeFieldName === false) {
|
||||
return fieldName;
|
||||
}
|
||||
// Make sure custom field names start with the actual field.name.value
|
||||
// of the field, so we can always figure out which properties of a
|
||||
// StoreObject correspond to which original field names.
|
||||
return fieldName === fieldNameFromStoreName(storeFieldName) ? storeFieldName
|
||||
: fieldName + ":" + storeFieldName;
|
||||
};
|
||||
Policies.prototype.readField = function (options, context) {
|
||||
var objectOrReference = options.from;
|
||||
if (!objectOrReference)
|
||||
return;
|
||||
var nameOrField = options.field || options.fieldName;
|
||||
if (!nameOrField)
|
||||
return;
|
||||
if (options.typename === void 0) {
|
||||
var typename = context.store.getFieldValue(objectOrReference, "__typename");
|
||||
if (typename)
|
||||
options.typename = typename;
|
||||
}
|
||||
var storeFieldName = this.getStoreFieldName(options);
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var existing = context.store.getFieldValue(objectOrReference, storeFieldName);
|
||||
var policy = this.getFieldPolicy(options.typename, fieldName, false);
|
||||
var read = policy && policy.read;
|
||||
if (read) {
|
||||
var readOptions = makeFieldFunctionOptions(this, objectOrReference, options, context, context.store.getStorage(isReference(objectOrReference) ?
|
||||
objectOrReference.__ref
|
||||
: objectOrReference, storeFieldName));
|
||||
// Call read(existing, readOptions) with cacheSlot holding this.cache.
|
||||
return cacheSlot.withValue(this.cache, read, [
|
||||
existing,
|
||||
readOptions,
|
||||
]);
|
||||
}
|
||||
return existing;
|
||||
};
|
||||
Policies.prototype.getReadFunction = function (typename, fieldName) {
|
||||
var policy = this.getFieldPolicy(typename, fieldName, false);
|
||||
return policy && policy.read;
|
||||
};
|
||||
Policies.prototype.getMergeFunction = function (parentTypename, fieldName, childTypename) {
|
||||
var policy = this.getFieldPolicy(parentTypename, fieldName, false);
|
||||
var merge = policy && policy.merge;
|
||||
if (!merge && childTypename) {
|
||||
policy = this.getTypePolicy(childTypename);
|
||||
merge = policy && policy.merge;
|
||||
}
|
||||
return merge;
|
||||
};
|
||||
Policies.prototype.runMergeFunction = function (existing, incoming, _a, context, storage) {
|
||||
var field = _a.field, typename = _a.typename, merge = _a.merge;
|
||||
if (merge === mergeTrueFn) {
|
||||
// Instead of going to the trouble of creating a full
|
||||
// FieldFunctionOptions object and calling mergeTrueFn, we can
|
||||
// simply call mergeObjects, as mergeTrueFn would.
|
||||
return makeMergeObjectsFunction(context.store)(existing, incoming);
|
||||
}
|
||||
if (merge === mergeFalseFn) {
|
||||
// Likewise for mergeFalseFn, whose implementation is even simpler.
|
||||
return incoming;
|
||||
}
|
||||
// If cache.writeQuery or cache.writeFragment was called with
|
||||
// options.overwrite set to true, we still call merge functions, but
|
||||
// the existing data is always undefined, so the merge function will
|
||||
// not attempt to combine the incoming data with the existing data.
|
||||
if (context.overwrite) {
|
||||
existing = void 0;
|
||||
}
|
||||
return merge(existing, incoming, makeFieldFunctionOptions(this,
|
||||
// Unlike options.readField for read functions, we do not fall
|
||||
// back to the current object if no foreignObjOrRef is provided,
|
||||
// because it's not clear what the current object should be for
|
||||
// merge functions: the (possibly undefined) existing object, or
|
||||
// the incoming object? If you think your merge function needs
|
||||
// to read sibling fields in order to produce a new value for
|
||||
// the current field, you might want to rethink your strategy,
|
||||
// because that's a recipe for making merge behavior sensitive
|
||||
// to the order in which fields are written into the cache.
|
||||
// However, readField(name, ref) is useful for merge functions
|
||||
// that need to deduplicate child objects and references.
|
||||
void 0, {
|
||||
typename: typename,
|
||||
fieldName: field.name.value,
|
||||
field: field,
|
||||
variables: context.variables,
|
||||
}, context, storage || Object.create(null)));
|
||||
};
|
||||
return Policies;
|
||||
}());
|
||||
export { Policies };
|
||||
function makeFieldFunctionOptions(policies, objectOrReference, fieldSpec, context, storage) {
|
||||
var storeFieldName = policies.getStoreFieldName(fieldSpec);
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var variables = fieldSpec.variables || context.variables;
|
||||
var _a = context.store, toReference = _a.toReference, canRead = _a.canRead;
|
||||
return {
|
||||
args: argsFromFieldSpecifier(fieldSpec),
|
||||
field: fieldSpec.field || null,
|
||||
fieldName: fieldName,
|
||||
storeFieldName: storeFieldName,
|
||||
variables: variables,
|
||||
isReference: isReference,
|
||||
toReference: toReference,
|
||||
storage: storage,
|
||||
cache: policies.cache,
|
||||
canRead: canRead,
|
||||
readField: function () {
|
||||
return policies.readField(normalizeReadFieldOptions(arguments, objectOrReference, variables), context);
|
||||
},
|
||||
mergeObjects: makeMergeObjectsFunction(context.store),
|
||||
};
|
||||
}
|
||||
export function normalizeReadFieldOptions(readFieldArgs, objectOrReference, variables) {
|
||||
var fieldNameOrOptions = readFieldArgs[0], from = readFieldArgs[1], argc = readFieldArgs.length;
|
||||
var options;
|
||||
if (typeof fieldNameOrOptions === "string") {
|
||||
options = {
|
||||
fieldName: fieldNameOrOptions,
|
||||
// Default to objectOrReference only when no second argument was
|
||||
// passed for the from parameter, not when undefined is explicitly
|
||||
// passed as the second argument.
|
||||
from: argc > 1 ? from : objectOrReference,
|
||||
};
|
||||
}
|
||||
else {
|
||||
options = __assign({}, fieldNameOrOptions);
|
||||
// Default to objectOrReference only when fieldNameOrOptions.from is
|
||||
// actually omitted, rather than just undefined.
|
||||
if (!hasOwn.call(options, "from")) {
|
||||
options.from = objectOrReference;
|
||||
}
|
||||
}
|
||||
if (globalThis.__DEV__ !== false && options.from === void 0) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(8, stringifyForDisplay(Array.from(readFieldArgs)));
|
||||
}
|
||||
if (void 0 === options.variables) {
|
||||
options.variables = variables;
|
||||
}
|
||||
return options;
|
||||
}
|
||||
function makeMergeObjectsFunction(store) {
|
||||
return function mergeObjects(existing, incoming) {
|
||||
if (isArray(existing) || isArray(incoming)) {
|
||||
throw newInvariantError(9);
|
||||
}
|
||||
// These dynamic checks are necessary because the parameters of a
|
||||
// custom merge function can easily have the any type, so the type
|
||||
// system cannot always enforce the StoreObject | Reference parameter
|
||||
// types of options.mergeObjects.
|
||||
if (isNonNullObject(existing) && isNonNullObject(incoming)) {
|
||||
var eType = store.getFieldValue(existing, "__typename");
|
||||
var iType = store.getFieldValue(incoming, "__typename");
|
||||
var typesDiffer = eType && iType && eType !== iType;
|
||||
if (typesDiffer) {
|
||||
return incoming;
|
||||
}
|
||||
if (isReference(existing) && storeValueIsStoreObject(incoming)) {
|
||||
// Update the normalized EntityStore for the entity identified by
|
||||
// existing.__ref, preferring/overwriting any fields contributed by the
|
||||
// newer incoming StoreObject.
|
||||
store.merge(existing.__ref, incoming);
|
||||
return existing;
|
||||
}
|
||||
if (storeValueIsStoreObject(existing) && isReference(incoming)) {
|
||||
// Update the normalized EntityStore for the entity identified by
|
||||
// incoming.__ref, taking fields from the older existing object only if
|
||||
// those fields are not already present in the newer StoreObject
|
||||
// identified by incoming.__ref.
|
||||
store.merge(existing, incoming.__ref);
|
||||
return incoming;
|
||||
}
|
||||
if (storeValueIsStoreObject(existing) &&
|
||||
storeValueIsStoreObject(incoming)) {
|
||||
return __assign(__assign({}, existing), incoming);
|
||||
}
|
||||
}
|
||||
return incoming;
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=policies.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/policies.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/policies.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
18
node_modules/@apollo/client/cache/inmemory/reactiveVars.d.ts
generated
vendored
Normal file
18
node_modules/@apollo/client/cache/inmemory/reactiveVars.d.ts
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import type { ApolloCache } from "../../core/index.js";
|
||||
export interface ReactiveVar<T> {
|
||||
(newValue?: T): T;
|
||||
onNextChange(listener: ReactiveListener<T>): () => void;
|
||||
attachCache(cache: ApolloCache<any>): this;
|
||||
forgetCache(cache: ApolloCache<any>): boolean;
|
||||
}
|
||||
export type ReactiveListener<T> = (value: T) => any;
|
||||
export declare const cacheSlot: {
|
||||
readonly id: string;
|
||||
hasValue(): boolean;
|
||||
getValue(): ApolloCache<any> | undefined;
|
||||
withValue<TResult, TArgs extends any[], TThis = any>(value: ApolloCache<any>, callback: (this: TThis, ...args: TArgs) => TResult, args?: TArgs | undefined, thisArg?: TThis | undefined): TResult;
|
||||
};
|
||||
export declare function forgetCache(cache: ApolloCache<any>): void;
|
||||
export declare function recallCache(cache: ApolloCache<any>): void;
|
||||
export declare function makeVar<T>(value: T): ReactiveVar<T>;
|
||||
//# sourceMappingURL=reactiveVars.d.ts.map
|
||||
83
node_modules/@apollo/client/cache/inmemory/reactiveVars.js
generated
vendored
Normal file
83
node_modules/@apollo/client/cache/inmemory/reactiveVars.js
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
import { dep, Slot } from "optimism";
|
||||
// Contextual Slot that acquires its value when custom read functions are
|
||||
// called in Policies#readField.
|
||||
export var cacheSlot = new Slot();
|
||||
var cacheInfoMap = new WeakMap();
|
||||
function getCacheInfo(cache) {
|
||||
var info = cacheInfoMap.get(cache);
|
||||
if (!info) {
|
||||
cacheInfoMap.set(cache, (info = {
|
||||
vars: new Set(),
|
||||
dep: dep(),
|
||||
}));
|
||||
}
|
||||
return info;
|
||||
}
|
||||
export function forgetCache(cache) {
|
||||
getCacheInfo(cache).vars.forEach(function (rv) { return rv.forgetCache(cache); });
|
||||
}
|
||||
// Calling forgetCache(cache) serves to silence broadcasts and allows the
|
||||
// cache to be garbage collected. However, the varsByCache WeakMap
|
||||
// preserves the set of reactive variables that were previously associated
|
||||
// with this cache, which makes it possible to "recall" the cache at a
|
||||
// later time, by reattaching it to those variables. If the cache has been
|
||||
// garbage collected in the meantime, because it is no longer reachable,
|
||||
// you won't be able to call recallCache(cache), and the cache will
|
||||
// automatically disappear from the varsByCache WeakMap.
|
||||
export function recallCache(cache) {
|
||||
getCacheInfo(cache).vars.forEach(function (rv) { return rv.attachCache(cache); });
|
||||
}
|
||||
export function makeVar(value) {
|
||||
var caches = new Set();
|
||||
var listeners = new Set();
|
||||
var rv = function (newValue) {
|
||||
if (arguments.length > 0) {
|
||||
if (value !== newValue) {
|
||||
value = newValue;
|
||||
caches.forEach(function (cache) {
|
||||
// Invalidate any fields with custom read functions that
|
||||
// consumed this variable, so query results involving those
|
||||
// fields will be recomputed the next time we read them.
|
||||
getCacheInfo(cache).dep.dirty(rv);
|
||||
// Broadcast changes to any caches that have previously read
|
||||
// from this variable.
|
||||
broadcast(cache);
|
||||
});
|
||||
// Finally, notify any listeners added via rv.onNextChange.
|
||||
var oldListeners = Array.from(listeners);
|
||||
listeners.clear();
|
||||
oldListeners.forEach(function (listener) { return listener(value); });
|
||||
}
|
||||
}
|
||||
else {
|
||||
// When reading from the variable, obtain the current cache from
|
||||
// context via cacheSlot. This isn't entirely foolproof, but it's
|
||||
// the same system that powers varDep.
|
||||
var cache = cacheSlot.getValue();
|
||||
if (cache) {
|
||||
attach(cache);
|
||||
getCacheInfo(cache).dep(rv);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
};
|
||||
rv.onNextChange = function (listener) {
|
||||
listeners.add(listener);
|
||||
return function () {
|
||||
listeners.delete(listener);
|
||||
};
|
||||
};
|
||||
var attach = (rv.attachCache = function (cache) {
|
||||
caches.add(cache);
|
||||
getCacheInfo(cache).vars.add(rv);
|
||||
return rv;
|
||||
});
|
||||
rv.forgetCache = function (cache) { return caches.delete(cache); };
|
||||
return rv;
|
||||
}
|
||||
function broadcast(cache) {
|
||||
if (cache.broadcastWatches) {
|
||||
cache.broadcastWatches();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=reactiveVars.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/reactiveVars.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/reactiveVars.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
40
node_modules/@apollo/client/cache/inmemory/readFromStore.d.ts
generated
vendored
Normal file
40
node_modules/@apollo/client/cache/inmemory/readFromStore.d.ts
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
import type { SelectionSetNode } from "graphql";
|
||||
import type { Reference, StoreObject } from "../../utilities/index.js";
|
||||
import type { Cache } from "../core/types/Cache.js";
|
||||
import type { DiffQueryAgainstStoreOptions, InMemoryCacheConfig, ReadMergeModifyContext } from "./types.js";
|
||||
import type { InMemoryCache } from "./inMemoryCache.js";
|
||||
import type { MissingTree } from "../core/types/common.js";
|
||||
import { ObjectCanon } from "./object-canon.js";
|
||||
export type VariableMap = {
|
||||
[name: string]: any;
|
||||
};
|
||||
export type ExecResult<R = any> = {
|
||||
result: R;
|
||||
missing?: MissingTree;
|
||||
};
|
||||
export interface StoreReaderConfig {
|
||||
cache: InMemoryCache;
|
||||
addTypename?: boolean;
|
||||
resultCacheMaxSize?: number;
|
||||
canonizeResults?: boolean;
|
||||
canon?: ObjectCanon;
|
||||
fragments?: InMemoryCacheConfig["fragments"];
|
||||
}
|
||||
export declare class StoreReader {
|
||||
private executeSelectionSet;
|
||||
private executeSubSelectedArray;
|
||||
private config;
|
||||
private knownResults;
|
||||
canon: ObjectCanon;
|
||||
resetCanon(): void;
|
||||
constructor(config: StoreReaderConfig);
|
||||
/**
|
||||
* Given a store and a query, return as much of the result as possible and
|
||||
* identify if any data was missing from the store.
|
||||
*/
|
||||
diffQueryAgainstStore<T>({ store, query, rootId, variables, returnPartialData, canonizeResults, }: DiffQueryAgainstStoreOptions): Cache.DiffResult<T>;
|
||||
isFresh(result: Record<string, any>, parent: StoreObject | Reference, selectionSet: SelectionSetNode, context: ReadMergeModifyContext): boolean;
|
||||
private execSelectionSetImpl;
|
||||
private execSubSelectedArrayImpl;
|
||||
}
|
||||
//# sourceMappingURL=readFromStore.d.ts.map
|
||||
331
node_modules/@apollo/client/cache/inmemory/readFromStore.js
generated
vendored
Normal file
331
node_modules/@apollo/client/cache/inmemory/readFromStore.js
generated
vendored
Normal file
@@ -0,0 +1,331 @@
|
||||
import { __assign } from "tslib";
|
||||
import { invariant, newInvariantError } from "../../utilities/globals/index.js";
|
||||
import { Kind } from "graphql";
|
||||
import { wrap } from "optimism";
|
||||
import { isField, resultKeyNameFromField, isReference, makeReference, shouldInclude, addTypenameToDocument, getDefaultValues, getMainDefinition, getQueryDefinition, getFragmentFromSelection, maybeDeepFreeze, mergeDeepArray, DeepMerger, isNonNullObject, canUseWeakMap, compact, canonicalStringify, cacheSizes, } from "../../utilities/index.js";
|
||||
import { maybeDependOnExistenceOfEntity, supportsResultCaching, } from "./entityStore.js";
|
||||
import { isArray, extractFragmentContext, getTypenameFromStoreObject, shouldCanonizeResults, } from "./helpers.js";
|
||||
import { MissingFieldError } from "../core/types/common.js";
|
||||
import { ObjectCanon } from "./object-canon.js";
|
||||
function execSelectionSetKeyArgs(options) {
|
||||
return [
|
||||
options.selectionSet,
|
||||
options.objectOrReference,
|
||||
options.context,
|
||||
// We split out this property so we can pass different values
|
||||
// independently without modifying options.context itself.
|
||||
options.context.canonizeResults,
|
||||
];
|
||||
}
|
||||
var StoreReader = /** @class */ (function () {
|
||||
function StoreReader(config) {
|
||||
var _this = this;
|
||||
this.knownResults = new (canUseWeakMap ? WeakMap : Map)();
|
||||
this.config = compact(config, {
|
||||
addTypename: config.addTypename !== false,
|
||||
canonizeResults: shouldCanonizeResults(config),
|
||||
});
|
||||
this.canon = config.canon || new ObjectCanon();
|
||||
// memoized functions in this class will be "garbage-collected"
|
||||
// by recreating the whole `StoreReader` in
|
||||
// `InMemoryCache.resetResultsCache`
|
||||
// (triggered from `InMemoryCache.gc` with `resetResultCache: true`)
|
||||
this.executeSelectionSet = wrap(function (options) {
|
||||
var _a;
|
||||
var canonizeResults = options.context.canonizeResults;
|
||||
var peekArgs = execSelectionSetKeyArgs(options);
|
||||
// Negate this boolean option so we can find out if we've already read
|
||||
// this result using the other boolean value.
|
||||
peekArgs[3] = !canonizeResults;
|
||||
var other = (_a = _this.executeSelectionSet).peek.apply(_a, peekArgs);
|
||||
if (other) {
|
||||
if (canonizeResults) {
|
||||
return __assign(__assign({}, other), {
|
||||
// If we previously read this result without canonizing it, we can
|
||||
// reuse that result simply by canonizing it now.
|
||||
result: _this.canon.admit(other.result) });
|
||||
}
|
||||
// If we previously read this result with canonization enabled, we can
|
||||
// return that canonized result as-is.
|
||||
return other;
|
||||
}
|
||||
maybeDependOnExistenceOfEntity(options.context.store, options.enclosingRef.__ref);
|
||||
// Finally, if we didn't find any useful previous results, run the real
|
||||
// execSelectionSetImpl method with the given options.
|
||||
return _this.execSelectionSetImpl(options);
|
||||
}, {
|
||||
max: this.config.resultCacheMaxSize ||
|
||||
cacheSizes["inMemoryCache.executeSelectionSet"] ||
|
||||
50000 /* defaultCacheSizes["inMemoryCache.executeSelectionSet"] */,
|
||||
keyArgs: execSelectionSetKeyArgs,
|
||||
// Note that the parameters of makeCacheKey are determined by the
|
||||
// array returned by keyArgs.
|
||||
makeCacheKey: function (selectionSet, parent, context, canonizeResults) {
|
||||
if (supportsResultCaching(context.store)) {
|
||||
return context.store.makeCacheKey(selectionSet, isReference(parent) ? parent.__ref : parent, context.varString, canonizeResults);
|
||||
}
|
||||
},
|
||||
});
|
||||
this.executeSubSelectedArray = wrap(function (options) {
|
||||
maybeDependOnExistenceOfEntity(options.context.store, options.enclosingRef.__ref);
|
||||
return _this.execSubSelectedArrayImpl(options);
|
||||
}, {
|
||||
max: this.config.resultCacheMaxSize ||
|
||||
cacheSizes["inMemoryCache.executeSubSelectedArray"] ||
|
||||
10000 /* defaultCacheSizes["inMemoryCache.executeSubSelectedArray"] */,
|
||||
makeCacheKey: function (_a) {
|
||||
var field = _a.field, array = _a.array, context = _a.context;
|
||||
if (supportsResultCaching(context.store)) {
|
||||
return context.store.makeCacheKey(field, array, context.varString);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
StoreReader.prototype.resetCanon = function () {
|
||||
this.canon = new ObjectCanon();
|
||||
};
|
||||
/**
|
||||
* Given a store and a query, return as much of the result as possible and
|
||||
* identify if any data was missing from the store.
|
||||
*/
|
||||
StoreReader.prototype.diffQueryAgainstStore = function (_a) {
|
||||
var store = _a.store, query = _a.query, _b = _a.rootId, rootId = _b === void 0 ? "ROOT_QUERY" : _b, variables = _a.variables, _c = _a.returnPartialData, returnPartialData = _c === void 0 ? true : _c, _d = _a.canonizeResults, canonizeResults = _d === void 0 ? this.config.canonizeResults : _d;
|
||||
var policies = this.config.cache.policies;
|
||||
variables = __assign(__assign({}, getDefaultValues(getQueryDefinition(query))), variables);
|
||||
var rootRef = makeReference(rootId);
|
||||
var execResult = this.executeSelectionSet({
|
||||
selectionSet: getMainDefinition(query).selectionSet,
|
||||
objectOrReference: rootRef,
|
||||
enclosingRef: rootRef,
|
||||
context: __assign({ store: store, query: query, policies: policies, variables: variables, varString: canonicalStringify(variables), canonizeResults: canonizeResults }, extractFragmentContext(query, this.config.fragments)),
|
||||
});
|
||||
var missing;
|
||||
if (execResult.missing) {
|
||||
// For backwards compatibility we still report an array of
|
||||
// MissingFieldError objects, even though there will only ever be at most
|
||||
// one of them, now that all missing field error messages are grouped
|
||||
// together in the execResult.missing tree.
|
||||
missing = [
|
||||
new MissingFieldError(firstMissing(execResult.missing), execResult.missing, query, variables),
|
||||
];
|
||||
if (!returnPartialData) {
|
||||
throw missing[0];
|
||||
}
|
||||
}
|
||||
return {
|
||||
result: execResult.result,
|
||||
complete: !missing,
|
||||
missing: missing,
|
||||
};
|
||||
};
|
||||
StoreReader.prototype.isFresh = function (result, parent, selectionSet, context) {
|
||||
if (supportsResultCaching(context.store) &&
|
||||
this.knownResults.get(result) === selectionSet) {
|
||||
var latest = this.executeSelectionSet.peek(selectionSet, parent, context,
|
||||
// If result is canonical, then it could only have been previously
|
||||
// cached by the canonizing version of executeSelectionSet, so we can
|
||||
// avoid checking both possibilities here.
|
||||
this.canon.isKnown(result));
|
||||
if (latest && result === latest.result) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
// Uncached version of executeSelectionSet.
|
||||
StoreReader.prototype.execSelectionSetImpl = function (_a) {
|
||||
var _this = this;
|
||||
var selectionSet = _a.selectionSet, objectOrReference = _a.objectOrReference, enclosingRef = _a.enclosingRef, context = _a.context;
|
||||
if (isReference(objectOrReference) &&
|
||||
!context.policies.rootTypenamesById[objectOrReference.__ref] &&
|
||||
!context.store.has(objectOrReference.__ref)) {
|
||||
return {
|
||||
result: this.canon.empty,
|
||||
missing: "Dangling reference to missing ".concat(objectOrReference.__ref, " object"),
|
||||
};
|
||||
}
|
||||
var variables = context.variables, policies = context.policies, store = context.store;
|
||||
var typename = store.getFieldValue(objectOrReference, "__typename");
|
||||
var objectsToMerge = [];
|
||||
var missing;
|
||||
var missingMerger = new DeepMerger();
|
||||
if (this.config.addTypename &&
|
||||
typeof typename === "string" &&
|
||||
!policies.rootIdsByTypename[typename]) {
|
||||
// Ensure we always include a default value for the __typename
|
||||
// field, if we have one, and this.config.addTypename is true. Note
|
||||
// that this field can be overridden by other merged objects.
|
||||
objectsToMerge.push({ __typename: typename });
|
||||
}
|
||||
function handleMissing(result, resultName) {
|
||||
var _a;
|
||||
if (result.missing) {
|
||||
missing = missingMerger.merge(missing, (_a = {},
|
||||
_a[resultName] = result.missing,
|
||||
_a));
|
||||
}
|
||||
return result.result;
|
||||
}
|
||||
var workSet = new Set(selectionSet.selections);
|
||||
workSet.forEach(function (selection) {
|
||||
var _a, _b;
|
||||
// Omit fields with directives @skip(if: <truthy value>) or
|
||||
// @include(if: <falsy value>).
|
||||
if (!shouldInclude(selection, variables))
|
||||
return;
|
||||
if (isField(selection)) {
|
||||
var fieldValue = policies.readField({
|
||||
fieldName: selection.name.value,
|
||||
field: selection,
|
||||
variables: context.variables,
|
||||
from: objectOrReference,
|
||||
}, context);
|
||||
var resultName = resultKeyNameFromField(selection);
|
||||
if (fieldValue === void 0) {
|
||||
if (!addTypenameToDocument.added(selection)) {
|
||||
missing = missingMerger.merge(missing, (_a = {},
|
||||
_a[resultName] = "Can't find field '".concat(selection.name.value, "' on ").concat(isReference(objectOrReference) ?
|
||||
objectOrReference.__ref + " object"
|
||||
: "object " + JSON.stringify(objectOrReference, null, 2)),
|
||||
_a));
|
||||
}
|
||||
}
|
||||
else if (isArray(fieldValue)) {
|
||||
if (fieldValue.length > 0) {
|
||||
fieldValue = handleMissing(_this.executeSubSelectedArray({
|
||||
field: selection,
|
||||
array: fieldValue,
|
||||
enclosingRef: enclosingRef,
|
||||
context: context,
|
||||
}), resultName);
|
||||
}
|
||||
}
|
||||
else if (!selection.selectionSet) {
|
||||
// If the field does not have a selection set, then we handle it
|
||||
// as a scalar value. To keep this.canon from canonicalizing
|
||||
// this value, we use this.canon.pass to wrap fieldValue in a
|
||||
// Pass object that this.canon.admit will later unwrap as-is.
|
||||
if (context.canonizeResults) {
|
||||
fieldValue = _this.canon.pass(fieldValue);
|
||||
}
|
||||
}
|
||||
else if (fieldValue != null) {
|
||||
// In this case, because we know the field has a selection set,
|
||||
// it must be trying to query a GraphQLObjectType, which is why
|
||||
// fieldValue must be != null.
|
||||
fieldValue = handleMissing(_this.executeSelectionSet({
|
||||
selectionSet: selection.selectionSet,
|
||||
objectOrReference: fieldValue,
|
||||
enclosingRef: isReference(fieldValue) ? fieldValue : enclosingRef,
|
||||
context: context,
|
||||
}), resultName);
|
||||
}
|
||||
if (fieldValue !== void 0) {
|
||||
objectsToMerge.push((_b = {}, _b[resultName] = fieldValue, _b));
|
||||
}
|
||||
}
|
||||
else {
|
||||
var fragment = getFragmentFromSelection(selection, context.lookupFragment);
|
||||
if (!fragment && selection.kind === Kind.FRAGMENT_SPREAD) {
|
||||
throw newInvariantError(10, selection.name.value);
|
||||
}
|
||||
if (fragment && policies.fragmentMatches(fragment, typename)) {
|
||||
fragment.selectionSet.selections.forEach(workSet.add, workSet);
|
||||
}
|
||||
}
|
||||
});
|
||||
var result = mergeDeepArray(objectsToMerge);
|
||||
var finalResult = { result: result, missing: missing };
|
||||
var frozen = context.canonizeResults ?
|
||||
this.canon.admit(finalResult)
|
||||
// Since this.canon is normally responsible for freezing results (only in
|
||||
// development), freeze them manually if canonization is disabled.
|
||||
: maybeDeepFreeze(finalResult);
|
||||
// Store this result with its selection set so that we can quickly
|
||||
// recognize it again in the StoreReader#isFresh method.
|
||||
if (frozen.result) {
|
||||
this.knownResults.set(frozen.result, selectionSet);
|
||||
}
|
||||
return frozen;
|
||||
};
|
||||
// Uncached version of executeSubSelectedArray.
|
||||
StoreReader.prototype.execSubSelectedArrayImpl = function (_a) {
|
||||
var _this = this;
|
||||
var field = _a.field, array = _a.array, enclosingRef = _a.enclosingRef, context = _a.context;
|
||||
var missing;
|
||||
var missingMerger = new DeepMerger();
|
||||
function handleMissing(childResult, i) {
|
||||
var _a;
|
||||
if (childResult.missing) {
|
||||
missing = missingMerger.merge(missing, (_a = {}, _a[i] = childResult.missing, _a));
|
||||
}
|
||||
return childResult.result;
|
||||
}
|
||||
if (field.selectionSet) {
|
||||
array = array.filter(context.store.canRead);
|
||||
}
|
||||
array = array.map(function (item, i) {
|
||||
// null value in array
|
||||
if (item === null) {
|
||||
return null;
|
||||
}
|
||||
// This is a nested array, recurse
|
||||
if (isArray(item)) {
|
||||
return handleMissing(_this.executeSubSelectedArray({
|
||||
field: field,
|
||||
array: item,
|
||||
enclosingRef: enclosingRef,
|
||||
context: context,
|
||||
}), i);
|
||||
}
|
||||
// This is an object, run the selection set on it
|
||||
if (field.selectionSet) {
|
||||
return handleMissing(_this.executeSelectionSet({
|
||||
selectionSet: field.selectionSet,
|
||||
objectOrReference: item,
|
||||
enclosingRef: isReference(item) ? item : enclosingRef,
|
||||
context: context,
|
||||
}), i);
|
||||
}
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
assertSelectionSetForIdValue(context.store, field, item);
|
||||
}
|
||||
return item;
|
||||
});
|
||||
return {
|
||||
result: context.canonizeResults ? this.canon.admit(array) : array,
|
||||
missing: missing,
|
||||
};
|
||||
};
|
||||
return StoreReader;
|
||||
}());
|
||||
export { StoreReader };
|
||||
function firstMissing(tree) {
|
||||
try {
|
||||
JSON.stringify(tree, function (_, value) {
|
||||
if (typeof value === "string")
|
||||
throw value;
|
||||
return value;
|
||||
});
|
||||
}
|
||||
catch (result) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
function assertSelectionSetForIdValue(store, field, fieldValue) {
|
||||
if (!field.selectionSet) {
|
||||
var workSet_1 = new Set([fieldValue]);
|
||||
workSet_1.forEach(function (value) {
|
||||
if (isNonNullObject(value)) {
|
||||
invariant(
|
||||
!isReference(value),
|
||||
11,
|
||||
getTypenameFromStoreObject(store, value),
|
||||
field.name.value
|
||||
);
|
||||
Object.values(value).forEach(workSet_1.add, workSet_1);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=readFromStore.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/readFromStore.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/readFromStore.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
126
node_modules/@apollo/client/cache/inmemory/types.d.ts
generated
vendored
Normal file
126
node_modules/@apollo/client/cache/inmemory/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
import type { DocumentNode, FieldNode } from "graphql";
|
||||
import type { Transaction } from "../core/cache.js";
|
||||
import type { StoreObject, StoreValue, Reference } from "../../utilities/index.js";
|
||||
import type { FieldValueGetter } from "./entityStore.js";
|
||||
import type { TypePolicies, PossibleTypesMap, KeyFieldsFunction, StorageType, FieldMergeFunction } from "./policies.js";
|
||||
import type { Modifiers, ToReferenceFunction, CanReadFunction, AllFieldsModifier } from "../core/types/common.js";
|
||||
import type { FragmentRegistryAPI } from "./fragmentRegistry.js";
|
||||
export type { StoreObject, StoreValue, Reference };
|
||||
export interface IdGetterObj extends Object {
|
||||
__typename?: string;
|
||||
id?: string;
|
||||
_id?: string;
|
||||
}
|
||||
export declare type IdGetter = (value: IdGetterObj) => string | undefined;
|
||||
/**
|
||||
* This is an interface used to access, set and remove
|
||||
* StoreObjects from the cache
|
||||
*/
|
||||
export interface NormalizedCache {
|
||||
has(dataId: string): boolean;
|
||||
get(dataId: string, fieldName: string): StoreValue;
|
||||
merge(olderId: string, newerObject: StoreObject): void;
|
||||
merge(olderObject: StoreObject, newerId: string): void;
|
||||
modify<Entity extends Record<string, any>>(dataId: string, fields: Modifiers<Entity> | AllFieldsModifier<Entity>): boolean;
|
||||
delete(dataId: string, fieldName?: string): boolean;
|
||||
clear(): void;
|
||||
/**
|
||||
* returns an Object with key-value pairs matching the contents of the store
|
||||
*/
|
||||
toObject(): NormalizedCacheObject;
|
||||
/**
|
||||
* replace the state of the store
|
||||
*/
|
||||
replace(newData: NormalizedCacheObject): void;
|
||||
/**
|
||||
* Retain (or release) a given root ID to protect (or expose) it and its
|
||||
* transitive child entities from (or to) garbage collection. The current
|
||||
* retainment count is returned by both methods. Note that releasing a root
|
||||
* ID does not cause that entity to be garbage collected, but merely removes
|
||||
* it from the set of root IDs that will be considered during the next
|
||||
* mark-and-sweep collection.
|
||||
*/
|
||||
retain(rootId: string): number;
|
||||
release(rootId: string): number;
|
||||
getFieldValue: FieldValueGetter;
|
||||
toReference: ToReferenceFunction;
|
||||
canRead: CanReadFunction;
|
||||
getStorage(idOrObj: string | StoreObject, ...storeFieldNames: (string | number)[]): StorageType;
|
||||
}
|
||||
/**
|
||||
* This is a normalized representation of the Apollo query result cache. It consists of
|
||||
* a flattened representation of query result trees.
|
||||
*/
|
||||
export interface NormalizedCacheObject {
|
||||
__META?: {
|
||||
extraRootIds: string[];
|
||||
};
|
||||
[dataId: string]: StoreObject | undefined;
|
||||
}
|
||||
export type OptimisticStoreItem = {
|
||||
id: string;
|
||||
data: NormalizedCacheObject;
|
||||
transaction: Transaction<NormalizedCacheObject>;
|
||||
};
|
||||
export type ReadQueryOptions = {
|
||||
/**
|
||||
* The Apollo Client store object.
|
||||
*/
|
||||
store: NormalizedCache;
|
||||
/**
|
||||
* A parsed GraphQL query document.
|
||||
*/
|
||||
query: DocumentNode;
|
||||
variables?: Object;
|
||||
previousResult?: any;
|
||||
/**
|
||||
* @deprecated
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not
|
||||
* recommend using this option anymore.
|
||||
* A future version of Apollo Client will contain a similar feature without
|
||||
* the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
rootId?: string;
|
||||
config?: ApolloReducerConfig;
|
||||
};
|
||||
export type DiffQueryAgainstStoreOptions = ReadQueryOptions & {
|
||||
returnPartialData?: boolean;
|
||||
};
|
||||
export type ApolloReducerConfig = {
|
||||
dataIdFromObject?: KeyFieldsFunction;
|
||||
addTypename?: boolean;
|
||||
};
|
||||
export interface InMemoryCacheConfig extends ApolloReducerConfig {
|
||||
resultCaching?: boolean;
|
||||
possibleTypes?: PossibleTypesMap;
|
||||
typePolicies?: TypePolicies;
|
||||
/**
|
||||
* @deprecated
|
||||
* Please use `cacheSizes` instead.
|
||||
*/
|
||||
resultCacheMaxSize?: number;
|
||||
/**
|
||||
* @deprecated
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not
|
||||
* recommend using this option anymore.
|
||||
* A future version of Apollo Client will contain a similar feature.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
fragments?: FragmentRegistryAPI;
|
||||
}
|
||||
export interface MergeInfo {
|
||||
field: FieldNode;
|
||||
typename: string | undefined;
|
||||
merge: FieldMergeFunction;
|
||||
}
|
||||
export interface MergeTree {
|
||||
info?: MergeInfo;
|
||||
map: Map<string | number, MergeTree>;
|
||||
}
|
||||
export interface ReadMergeModifyContext {
|
||||
store: NormalizedCache;
|
||||
variables?: Record<string, any>;
|
||||
varString?: string;
|
||||
}
|
||||
//# sourceMappingURL=types.d.ts.map
|
||||
2
node_modules/@apollo/client/cache/inmemory/types.js
generated
vendored
Normal file
2
node_modules/@apollo/client/cache/inmemory/types.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=types.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/types.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/types.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
37
node_modules/@apollo/client/cache/inmemory/writeToStore.d.ts
generated
vendored
Normal file
37
node_modules/@apollo/client/cache/inmemory/writeToStore.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import type { SelectionSetNode, FieldNode } from "graphql";
|
||||
import type { FragmentMap, FragmentMapFunction, StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { NormalizedCache, ReadMergeModifyContext, MergeTree, InMemoryCacheConfig } from "./types.js";
|
||||
import type { StoreReader } from "./readFromStore.js";
|
||||
import type { InMemoryCache } from "./inMemoryCache.js";
|
||||
import type { Cache } from "../../core/index.js";
|
||||
export interface WriteContext extends ReadMergeModifyContext {
|
||||
readonly written: {
|
||||
[dataId: string]: SelectionSetNode[];
|
||||
};
|
||||
readonly fragmentMap: FragmentMap;
|
||||
lookupFragment: FragmentMapFunction;
|
||||
merge<T>(existing: T, incoming: T): T;
|
||||
overwrite: boolean;
|
||||
incomingById: Map<string, {
|
||||
storeObject: StoreObject;
|
||||
mergeTree?: MergeTree;
|
||||
fieldNodeSet: Set<FieldNode>;
|
||||
}>;
|
||||
clientOnly: boolean;
|
||||
deferred: boolean;
|
||||
flavors: Map<string, FlavorableWriteContext>;
|
||||
}
|
||||
type FlavorableWriteContext = Pick<WriteContext, "clientOnly" | "deferred" | "flavors">;
|
||||
export declare class StoreWriter {
|
||||
readonly cache: InMemoryCache;
|
||||
private reader?;
|
||||
private fragments?;
|
||||
constructor(cache: InMemoryCache, reader?: StoreReader | undefined, fragments?: InMemoryCacheConfig["fragments"]);
|
||||
writeToStore(store: NormalizedCache, { query, result, dataId, variables, overwrite }: Cache.WriteOptions): Reference | undefined;
|
||||
private processSelectionSet;
|
||||
private processFieldValue;
|
||||
private flattenFields;
|
||||
private applyMerges;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=writeToStore.d.ts.map
|
||||
529
node_modules/@apollo/client/cache/inmemory/writeToStore.js
generated
vendored
Normal file
529
node_modules/@apollo/client/cache/inmemory/writeToStore.js
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
import { __assign } from "tslib";
|
||||
import { invariant, newInvariantError } from "../../utilities/globals/index.js";
|
||||
import { equal } from "@wry/equality";
|
||||
import { Trie } from "@wry/trie";
|
||||
import { Kind } from "graphql";
|
||||
import { getFragmentFromSelection, getDefaultValues, getOperationDefinition, getTypenameFromResult, makeReference, isField, resultKeyNameFromField, isReference, shouldInclude, cloneDeep, addTypenameToDocument, isNonEmptyArray, argumentsObjectFromField, canonicalStringify, } from "../../utilities/index.js";
|
||||
import { isArray, makeProcessedFieldsMerger, fieldNameFromStoreName, storeValueIsStoreObject, extractFragmentContext, } from "./helpers.js";
|
||||
import { normalizeReadFieldOptions } from "./policies.js";
|
||||
// Since there are only four possible combinations of context.clientOnly and
|
||||
// context.deferred values, we should need at most four "flavors" of any given
|
||||
// WriteContext. To avoid creating multiple copies of the same context, we cache
|
||||
// the contexts in the context.flavors Map (shared by all flavors) according to
|
||||
// their clientOnly and deferred values (always in that order).
|
||||
function getContextFlavor(context, clientOnly, deferred) {
|
||||
var key = "".concat(clientOnly).concat(deferred);
|
||||
var flavored = context.flavors.get(key);
|
||||
if (!flavored) {
|
||||
context.flavors.set(key, (flavored =
|
||||
context.clientOnly === clientOnly && context.deferred === deferred ?
|
||||
context
|
||||
: __assign(__assign({}, context), { clientOnly: clientOnly, deferred: deferred })));
|
||||
}
|
||||
return flavored;
|
||||
}
|
||||
var StoreWriter = /** @class */ (function () {
|
||||
function StoreWriter(cache, reader, fragments) {
|
||||
this.cache = cache;
|
||||
this.reader = reader;
|
||||
this.fragments = fragments;
|
||||
}
|
||||
StoreWriter.prototype.writeToStore = function (store, _a) {
|
||||
var _this = this;
|
||||
var query = _a.query, result = _a.result, dataId = _a.dataId, variables = _a.variables, overwrite = _a.overwrite;
|
||||
var operationDefinition = getOperationDefinition(query);
|
||||
var merger = makeProcessedFieldsMerger();
|
||||
variables = __assign(__assign({}, getDefaultValues(operationDefinition)), variables);
|
||||
var context = __assign(__assign({ store: store, written: Object.create(null), merge: function (existing, incoming) {
|
||||
return merger.merge(existing, incoming);
|
||||
}, variables: variables, varString: canonicalStringify(variables) }, extractFragmentContext(query, this.fragments)), { overwrite: !!overwrite, incomingById: new Map(), clientOnly: false, deferred: false, flavors: new Map() });
|
||||
var ref = this.processSelectionSet({
|
||||
result: result || Object.create(null),
|
||||
dataId: dataId,
|
||||
selectionSet: operationDefinition.selectionSet,
|
||||
mergeTree: { map: new Map() },
|
||||
context: context,
|
||||
});
|
||||
if (!isReference(ref)) {
|
||||
throw newInvariantError(12, result);
|
||||
}
|
||||
// So far, the store has not been modified, so now it's time to process
|
||||
// context.incomingById and merge those incoming fields into context.store.
|
||||
context.incomingById.forEach(function (_a, dataId) {
|
||||
var storeObject = _a.storeObject, mergeTree = _a.mergeTree, fieldNodeSet = _a.fieldNodeSet;
|
||||
var entityRef = makeReference(dataId);
|
||||
if (mergeTree && mergeTree.map.size) {
|
||||
var applied = _this.applyMerges(mergeTree, entityRef, storeObject, context);
|
||||
if (isReference(applied)) {
|
||||
// Assume References returned by applyMerges have already been merged
|
||||
// into the store. See makeMergeObjectsFunction in policies.ts for an
|
||||
// example of how this can happen.
|
||||
return;
|
||||
}
|
||||
// Otherwise, applyMerges returned a StoreObject, whose fields we should
|
||||
// merge into the store (see store.merge statement below).
|
||||
storeObject = applied;
|
||||
}
|
||||
if (globalThis.__DEV__ !== false && !context.overwrite) {
|
||||
var fieldsWithSelectionSets_1 = Object.create(null);
|
||||
fieldNodeSet.forEach(function (field) {
|
||||
if (field.selectionSet) {
|
||||
fieldsWithSelectionSets_1[field.name.value] = true;
|
||||
}
|
||||
});
|
||||
var hasSelectionSet_1 = function (storeFieldName) {
|
||||
return fieldsWithSelectionSets_1[fieldNameFromStoreName(storeFieldName)] ===
|
||||
true;
|
||||
};
|
||||
var hasMergeFunction_1 = function (storeFieldName) {
|
||||
var childTree = mergeTree && mergeTree.map.get(storeFieldName);
|
||||
return Boolean(childTree && childTree.info && childTree.info.merge);
|
||||
};
|
||||
Object.keys(storeObject).forEach(function (storeFieldName) {
|
||||
// If a merge function was defined for this field, trust that it
|
||||
// did the right thing about (not) clobbering data. If the field
|
||||
// has no selection set, it's a scalar field, so it doesn't need
|
||||
// a merge function (even if it's an object, like JSON data).
|
||||
if (hasSelectionSet_1(storeFieldName) &&
|
||||
!hasMergeFunction_1(storeFieldName)) {
|
||||
warnAboutDataLoss(entityRef, storeObject, storeFieldName, context.store);
|
||||
}
|
||||
});
|
||||
}
|
||||
store.merge(dataId, storeObject);
|
||||
});
|
||||
// Any IDs written explicitly to the cache will be retained as
|
||||
// reachable root IDs for garbage collection purposes. Although this
|
||||
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
|
||||
// retainment counts are effectively ignored because cache.gc() always
|
||||
// includes them in its root ID set.
|
||||
store.retain(ref.__ref);
|
||||
return ref;
|
||||
};
|
||||
StoreWriter.prototype.processSelectionSet = function (_a) {
|
||||
var _this = this;
|
||||
var dataId = _a.dataId, result = _a.result, selectionSet = _a.selectionSet, context = _a.context,
|
||||
// This object allows processSelectionSet to report useful information
|
||||
// to its callers without explicitly returning that information.
|
||||
mergeTree = _a.mergeTree;
|
||||
var policies = this.cache.policies;
|
||||
// This variable will be repeatedly updated using context.merge to
|
||||
// accumulate all fields that need to be written into the store.
|
||||
var incoming = Object.create(null);
|
||||
// If typename was not passed in, infer it. Note that typename is
|
||||
// always passed in for tricky-to-infer cases such as "Query" for
|
||||
// ROOT_QUERY.
|
||||
var typename = (dataId && policies.rootTypenamesById[dataId]) ||
|
||||
getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
|
||||
(dataId && context.store.get(dataId, "__typename"));
|
||||
if ("string" === typeof typename) {
|
||||
incoming.__typename = typename;
|
||||
}
|
||||
// This readField function will be passed as context.readField in the
|
||||
// KeyFieldsContext object created within policies.identify (called below).
|
||||
// In addition to reading from the existing context.store (thanks to the
|
||||
// policies.readField(options, context) line at the very bottom), this
|
||||
// version of readField can read from Reference objects that are currently
|
||||
// pending in context.incomingById, which is important whenever keyFields
|
||||
// need to be extracted from a child object that processSelectionSet has
|
||||
// turned into a Reference.
|
||||
var readField = function () {
|
||||
var options = normalizeReadFieldOptions(arguments, incoming, context.variables);
|
||||
if (isReference(options.from)) {
|
||||
var info = context.incomingById.get(options.from.__ref);
|
||||
if (info) {
|
||||
var result_1 = policies.readField(__assign(__assign({}, options), { from: info.storeObject }), context);
|
||||
if (result_1 !== void 0) {
|
||||
return result_1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return policies.readField(options, context);
|
||||
};
|
||||
var fieldNodeSet = new Set();
|
||||
this.flattenFields(selectionSet, result,
|
||||
// This WriteContext will be the default context value for fields returned
|
||||
// by the flattenFields method, but some fields may be assigned a modified
|
||||
// context, depending on the presence of @client and other directives.
|
||||
context, typename).forEach(function (context, field) {
|
||||
var _a;
|
||||
var resultFieldKey = resultKeyNameFromField(field);
|
||||
var value = result[resultFieldKey];
|
||||
fieldNodeSet.add(field);
|
||||
if (value !== void 0) {
|
||||
var storeFieldName = policies.getStoreFieldName({
|
||||
typename: typename,
|
||||
fieldName: field.name.value,
|
||||
field: field,
|
||||
variables: context.variables,
|
||||
});
|
||||
var childTree = getChildMergeTree(mergeTree, storeFieldName);
|
||||
var incomingValue = _this.processFieldValue(value, field,
|
||||
// Reset context.clientOnly and context.deferred to their default
|
||||
// values before processing nested selection sets.
|
||||
field.selectionSet ?
|
||||
getContextFlavor(context, false, false)
|
||||
: context, childTree);
|
||||
// To determine if this field holds a child object with a merge function
|
||||
// defined in its type policy (see PR #7070), we need to figure out the
|
||||
// child object's __typename.
|
||||
var childTypename = void 0;
|
||||
// The field's value can be an object that has a __typename only if the
|
||||
// field has a selection set. Otherwise incomingValue is scalar.
|
||||
if (field.selectionSet &&
|
||||
(isReference(incomingValue) || storeValueIsStoreObject(incomingValue))) {
|
||||
childTypename = readField("__typename", incomingValue);
|
||||
}
|
||||
var merge = policies.getMergeFunction(typename, field.name.value, childTypename);
|
||||
if (merge) {
|
||||
childTree.info = {
|
||||
// TODO Check compatibility against any existing childTree.field?
|
||||
field: field,
|
||||
typename: typename,
|
||||
merge: merge,
|
||||
};
|
||||
}
|
||||
else {
|
||||
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
|
||||
}
|
||||
incoming = context.merge(incoming, (_a = {},
|
||||
_a[storeFieldName] = incomingValue,
|
||||
_a));
|
||||
}
|
||||
else if (globalThis.__DEV__ !== false &&
|
||||
!context.clientOnly &&
|
||||
!context.deferred &&
|
||||
!addTypenameToDocument.added(field) &&
|
||||
// If the field has a read function, it may be a synthetic field or
|
||||
// provide a default value, so its absence from the written data should
|
||||
// not be cause for alarm.
|
||||
!policies.getReadFunction(typename, field.name.value)) {
|
||||
globalThis.__DEV__ !== false && invariant.error(13, resultKeyNameFromField(field), result);
|
||||
}
|
||||
});
|
||||
// Identify the result object, even if dataId was already provided,
|
||||
// since we always need keyObject below.
|
||||
try {
|
||||
var _b = policies.identify(result, {
|
||||
typename: typename,
|
||||
selectionSet: selectionSet,
|
||||
fragmentMap: context.fragmentMap,
|
||||
storeObject: incoming,
|
||||
readField: readField,
|
||||
}), id = _b[0], keyObject = _b[1];
|
||||
// If dataId was not provided, fall back to the id just generated by
|
||||
// policies.identify.
|
||||
dataId = dataId || id;
|
||||
// Write any key fields that were used during identification, even if
|
||||
// they were not mentioned in the original query.
|
||||
if (keyObject) {
|
||||
// TODO Reverse the order of the arguments?
|
||||
incoming = context.merge(incoming, keyObject);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// If dataId was provided, tolerate failure of policies.identify.
|
||||
if (!dataId)
|
||||
throw e;
|
||||
}
|
||||
if ("string" === typeof dataId) {
|
||||
var dataRef = makeReference(dataId);
|
||||
// Avoid processing the same entity object using the same selection
|
||||
// set more than once. We use an array instead of a Set since most
|
||||
// entity IDs will be written using only one selection set, so the
|
||||
// size of this array is likely to be very small, meaning indexOf is
|
||||
// likely to be faster than Set.prototype.has.
|
||||
var sets = context.written[dataId] || (context.written[dataId] = []);
|
||||
if (sets.indexOf(selectionSet) >= 0)
|
||||
return dataRef;
|
||||
sets.push(selectionSet);
|
||||
// If we're about to write a result object into the store, but we
|
||||
// happen to know that the exact same (===) result object would be
|
||||
// returned if we were to reread the result with the same inputs,
|
||||
// then we can skip the rest of the processSelectionSet work for
|
||||
// this object, and immediately return a Reference to it.
|
||||
if (this.reader &&
|
||||
this.reader.isFresh(result, dataRef, selectionSet, context)) {
|
||||
return dataRef;
|
||||
}
|
||||
var previous_1 = context.incomingById.get(dataId);
|
||||
if (previous_1) {
|
||||
previous_1.storeObject = context.merge(previous_1.storeObject, incoming);
|
||||
previous_1.mergeTree = mergeMergeTrees(previous_1.mergeTree, mergeTree);
|
||||
fieldNodeSet.forEach(function (field) { return previous_1.fieldNodeSet.add(field); });
|
||||
}
|
||||
else {
|
||||
context.incomingById.set(dataId, {
|
||||
storeObject: incoming,
|
||||
// Save a reference to mergeTree only if it is not empty, because
|
||||
// empty MergeTrees may be recycled by maybeRecycleChildMergeTree and
|
||||
// reused for entirely different parts of the result tree.
|
||||
mergeTree: mergeTreeIsEmpty(mergeTree) ? void 0 : mergeTree,
|
||||
fieldNodeSet: fieldNodeSet,
|
||||
});
|
||||
}
|
||||
return dataRef;
|
||||
}
|
||||
return incoming;
|
||||
};
|
||||
StoreWriter.prototype.processFieldValue = function (value, field, context, mergeTree) {
|
||||
var _this = this;
|
||||
if (!field.selectionSet || value === null) {
|
||||
// In development, we need to clone scalar values so that they can be
|
||||
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
|
||||
// it's cheaper to store the scalar values directly in the cache.
|
||||
return globalThis.__DEV__ !== false ? cloneDeep(value) : value;
|
||||
}
|
||||
if (isArray(value)) {
|
||||
return value.map(function (item, i) {
|
||||
var value = _this.processFieldValue(item, field, context, getChildMergeTree(mergeTree, i));
|
||||
maybeRecycleChildMergeTree(mergeTree, i);
|
||||
return value;
|
||||
});
|
||||
}
|
||||
return this.processSelectionSet({
|
||||
result: value,
|
||||
selectionSet: field.selectionSet,
|
||||
context: context,
|
||||
mergeTree: mergeTree,
|
||||
});
|
||||
};
|
||||
// Implements https://spec.graphql.org/draft/#sec-Field-Collection, but with
|
||||
// some additions for tracking @client and @defer directives.
|
||||
StoreWriter.prototype.flattenFields = function (selectionSet, result, context, typename) {
|
||||
if (typename === void 0) { typename = getTypenameFromResult(result, selectionSet, context.fragmentMap); }
|
||||
var fieldMap = new Map();
|
||||
var policies = this.cache.policies;
|
||||
var limitingTrie = new Trie(false); // No need for WeakMap, since limitingTrie does not escape.
|
||||
(function flatten(selectionSet, inheritedContext) {
|
||||
var visitedNode = limitingTrie.lookup(selectionSet,
|
||||
// Because we take inheritedClientOnly and inheritedDeferred into
|
||||
// consideration here (in addition to selectionSet), it's possible for
|
||||
// the same selection set to be flattened more than once, if it appears
|
||||
// in the query with different @client and/or @directive configurations.
|
||||
inheritedContext.clientOnly, inheritedContext.deferred);
|
||||
if (visitedNode.visited)
|
||||
return;
|
||||
visitedNode.visited = true;
|
||||
selectionSet.selections.forEach(function (selection) {
|
||||
if (!shouldInclude(selection, context.variables))
|
||||
return;
|
||||
var clientOnly = inheritedContext.clientOnly, deferred = inheritedContext.deferred;
|
||||
if (
|
||||
// Since the presence of @client or @defer on this field can only
|
||||
// cause clientOnly or deferred to become true, we can skip the
|
||||
// forEach loop if both clientOnly and deferred are already true.
|
||||
!(clientOnly && deferred) &&
|
||||
isNonEmptyArray(selection.directives)) {
|
||||
selection.directives.forEach(function (dir) {
|
||||
var name = dir.name.value;
|
||||
if (name === "client")
|
||||
clientOnly = true;
|
||||
if (name === "defer") {
|
||||
var args = argumentsObjectFromField(dir, context.variables);
|
||||
// The @defer directive takes an optional args.if boolean
|
||||
// argument, similar to @include(if: boolean). Note that
|
||||
// @defer(if: false) does not make context.deferred false, but
|
||||
// instead behaves as if there was no @defer directive.
|
||||
if (!args || args.if !== false) {
|
||||
deferred = true;
|
||||
}
|
||||
// TODO In the future, we may want to record args.label using
|
||||
// context.deferred, if a label is specified.
|
||||
}
|
||||
});
|
||||
}
|
||||
if (isField(selection)) {
|
||||
var existing = fieldMap.get(selection);
|
||||
if (existing) {
|
||||
// If this field has been visited along another recursive path
|
||||
// before, the final context should have clientOnly or deferred set
|
||||
// to true only if *all* paths have the directive (hence the &&).
|
||||
clientOnly = clientOnly && existing.clientOnly;
|
||||
deferred = deferred && existing.deferred;
|
||||
}
|
||||
fieldMap.set(selection, getContextFlavor(context, clientOnly, deferred));
|
||||
}
|
||||
else {
|
||||
var fragment = getFragmentFromSelection(selection, context.lookupFragment);
|
||||
if (!fragment && selection.kind === Kind.FRAGMENT_SPREAD) {
|
||||
throw newInvariantError(14, selection.name.value);
|
||||
}
|
||||
if (fragment &&
|
||||
policies.fragmentMatches(fragment, typename, result, context.variables)) {
|
||||
flatten(fragment.selectionSet, getContextFlavor(context, clientOnly, deferred));
|
||||
}
|
||||
}
|
||||
});
|
||||
})(selectionSet, context);
|
||||
return fieldMap;
|
||||
};
|
||||
StoreWriter.prototype.applyMerges = function (mergeTree, existing, incoming, context, getStorageArgs) {
|
||||
var _a;
|
||||
var _this = this;
|
||||
if (mergeTree.map.size && !isReference(incoming)) {
|
||||
var e_1 =
|
||||
// Items in the same position in different arrays are not
|
||||
// necessarily related to each other, so when incoming is an array
|
||||
// we process its elements as if there was no existing data.
|
||||
(!isArray(incoming) &&
|
||||
// Likewise, existing must be either a Reference or a StoreObject
|
||||
// in order for its fields to be safe to merge with the fields of
|
||||
// the incoming object.
|
||||
(isReference(existing) || storeValueIsStoreObject(existing))) ?
|
||||
existing
|
||||
: void 0;
|
||||
// This narrowing is implied by mergeTree.map.size > 0 and
|
||||
// !isReference(incoming), though TypeScript understandably cannot
|
||||
// hope to infer this type.
|
||||
var i_1 = incoming;
|
||||
// The options.storage objects provided to read and merge functions
|
||||
// are derived from the identity of the parent object plus a
|
||||
// sequence of storeFieldName strings/numbers identifying the nested
|
||||
// field name path of each field value to be merged.
|
||||
if (e_1 && !getStorageArgs) {
|
||||
getStorageArgs = [isReference(e_1) ? e_1.__ref : e_1];
|
||||
}
|
||||
// It's possible that applying merge functions to this subtree will
|
||||
// not change the incoming data, so this variable tracks the fields
|
||||
// that did change, so we can create a new incoming object when (and
|
||||
// only when) at least one incoming field has changed. We use a Map
|
||||
// to preserve the type of numeric keys.
|
||||
var changedFields_1;
|
||||
var getValue_1 = function (from, name) {
|
||||
return (isArray(from) ?
|
||||
typeof name === "number" ?
|
||||
from[name]
|
||||
: void 0
|
||||
: context.store.getFieldValue(from, String(name)));
|
||||
};
|
||||
mergeTree.map.forEach(function (childTree, storeFieldName) {
|
||||
var eVal = getValue_1(e_1, storeFieldName);
|
||||
var iVal = getValue_1(i_1, storeFieldName);
|
||||
// If we have no incoming data, leave any existing data untouched.
|
||||
if (void 0 === iVal)
|
||||
return;
|
||||
if (getStorageArgs) {
|
||||
getStorageArgs.push(storeFieldName);
|
||||
}
|
||||
var aVal = _this.applyMerges(childTree, eVal, iVal, context, getStorageArgs);
|
||||
if (aVal !== iVal) {
|
||||
changedFields_1 = changedFields_1 || new Map();
|
||||
changedFields_1.set(storeFieldName, aVal);
|
||||
}
|
||||
if (getStorageArgs) {
|
||||
invariant(getStorageArgs.pop() === storeFieldName);
|
||||
}
|
||||
});
|
||||
if (changedFields_1) {
|
||||
// Shallow clone i so we can add changed fields to it.
|
||||
incoming = (isArray(i_1) ? i_1.slice(0) : __assign({}, i_1));
|
||||
changedFields_1.forEach(function (value, name) {
|
||||
incoming[name] = value;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (mergeTree.info) {
|
||||
return this.cache.policies.runMergeFunction(existing, incoming, mergeTree.info, context, getStorageArgs && (_a = context.store).getStorage.apply(_a, getStorageArgs));
|
||||
}
|
||||
return incoming;
|
||||
};
|
||||
return StoreWriter;
|
||||
}());
|
||||
export { StoreWriter };
|
||||
var emptyMergeTreePool = [];
|
||||
function getChildMergeTree(_a, name) {
|
||||
var map = _a.map;
|
||||
if (!map.has(name)) {
|
||||
map.set(name, emptyMergeTreePool.pop() || { map: new Map() });
|
||||
}
|
||||
return map.get(name);
|
||||
}
|
||||
function mergeMergeTrees(left, right) {
|
||||
if (left === right || !right || mergeTreeIsEmpty(right))
|
||||
return left;
|
||||
if (!left || mergeTreeIsEmpty(left))
|
||||
return right;
|
||||
var info = left.info && right.info ? __assign(__assign({}, left.info), right.info) : left.info || right.info;
|
||||
var needToMergeMaps = left.map.size && right.map.size;
|
||||
var map = needToMergeMaps ? new Map()
|
||||
: left.map.size ? left.map
|
||||
: right.map;
|
||||
var merged = { info: info, map: map };
|
||||
if (needToMergeMaps) {
|
||||
var remainingRightKeys_1 = new Set(right.map.keys());
|
||||
left.map.forEach(function (leftTree, key) {
|
||||
merged.map.set(key, mergeMergeTrees(leftTree, right.map.get(key)));
|
||||
remainingRightKeys_1.delete(key);
|
||||
});
|
||||
remainingRightKeys_1.forEach(function (key) {
|
||||
merged.map.set(key, mergeMergeTrees(right.map.get(key), left.map.get(key)));
|
||||
});
|
||||
}
|
||||
return merged;
|
||||
}
|
||||
function mergeTreeIsEmpty(tree) {
|
||||
return !tree || !(tree.info || tree.map.size);
|
||||
}
|
||||
function maybeRecycleChildMergeTree(_a, name) {
|
||||
var map = _a.map;
|
||||
var childTree = map.get(name);
|
||||
if (childTree && mergeTreeIsEmpty(childTree)) {
|
||||
emptyMergeTreePool.push(childTree);
|
||||
map.delete(name);
|
||||
}
|
||||
}
|
||||
var warnings = new Set();
|
||||
// Note that this function is unused in production, and thus should be
|
||||
// pruned by any well-configured minifier.
|
||||
function warnAboutDataLoss(existingRef, incomingObj, storeFieldName, store) {
|
||||
var getChild = function (objOrRef) {
|
||||
var child = store.getFieldValue(objOrRef, storeFieldName);
|
||||
return typeof child === "object" && child;
|
||||
};
|
||||
var existing = getChild(existingRef);
|
||||
if (!existing)
|
||||
return;
|
||||
var incoming = getChild(incomingObj);
|
||||
if (!incoming)
|
||||
return;
|
||||
// It's always safe to replace a reference, since it refers to data
|
||||
// safely stored elsewhere.
|
||||
if (isReference(existing))
|
||||
return;
|
||||
// If the values are structurally equivalent, we do not need to worry
|
||||
// about incoming replacing existing.
|
||||
if (equal(existing, incoming))
|
||||
return;
|
||||
// If we're replacing every key of the existing object, then the
|
||||
// existing data would be overwritten even if the objects were
|
||||
// normalized, so warning would not be helpful here.
|
||||
if (Object.keys(existing).every(function (key) { return store.getFieldValue(incoming, key) !== void 0; })) {
|
||||
return;
|
||||
}
|
||||
var parentType = store.getFieldValue(existingRef, "__typename") ||
|
||||
store.getFieldValue(incomingObj, "__typename");
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var typeDotName = "".concat(parentType, ".").concat(fieldName);
|
||||
// Avoid warning more than once for the same type and field name.
|
||||
if (warnings.has(typeDotName))
|
||||
return;
|
||||
warnings.add(typeDotName);
|
||||
var childTypenames = [];
|
||||
// Arrays do not have __typename fields, and always need a custom merge
|
||||
// function, even if their elements are normalized entities.
|
||||
if (!isArray(existing) && !isArray(incoming)) {
|
||||
[existing, incoming].forEach(function (child) {
|
||||
var typename = store.getFieldValue(child, "__typename");
|
||||
if (typeof typename === "string" && !childTypenames.includes(typename)) {
|
||||
childTypenames.push(typename);
|
||||
}
|
||||
});
|
||||
}
|
||||
globalThis.__DEV__ !== false && invariant.warn(15, fieldName, parentType, childTypenames.length ?
|
||||
"either ensure all objects of type " +
|
||||
childTypenames.join(" and ") +
|
||||
" have an ID or a custom merge function, or "
|
||||
: "", typeDotName, __assign({}, existing), __assign({}, incoming));
|
||||
}
|
||||
//# sourceMappingURL=writeToStore.js.map
|
||||
1
node_modules/@apollo/client/cache/inmemory/writeToStore.js.map
generated
vendored
Normal file
1
node_modules/@apollo/client/cache/inmemory/writeToStore.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
8
node_modules/@apollo/client/cache/package.json
generated
vendored
Normal file
8
node_modules/@apollo/client/cache/package.json
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "@apollo/client/cache",
|
||||
"type": "module",
|
||||
"main": "cache.cjs",
|
||||
"module": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"sideEffects": false
|
||||
}
|
||||
3
node_modules/@apollo/client/config/jest/areApolloErrorsEqual.d.ts
generated
vendored
Normal file
3
node_modules/@apollo/client/config/jest/areApolloErrorsEqual.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import type { Tester } from "@jest/expect-utils";
|
||||
export declare const areApolloErrorsEqual: Tester;
|
||||
//# sourceMappingURL=areApolloErrorsEqual.d.ts.map
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user