mobileapplicationPassvault/node_modules/@firebase/firestore/dist/index.node.mjs

32030 lines
1.2 MiB
JavaScript
Raw Permalink Normal View History

2024-04-12 05:23:32 +00:00
import { _getProvider, getApp, _removeServiceInstance, _registerComponent, registerVersion, SDK_VERSION as SDK_VERSION$1 } from '@firebase/app';
import { Component } from '@firebase/component';
import { Logger, LogLevel } from '@firebase/logger';
import { inspect, TextEncoder, TextDecoder } from 'util';
import { FirebaseError, createMockUserToken, getModularInstance, deepEqual, getDefaultEmulatorHostnameAndPort, getUA, isIndexedDBAvailable, isSafari } from '@firebase/util';
import { randomBytes as randomBytes$1 } from 'crypto';
import { Integer, Md5 } from '@firebase/webchannel-wrapper';
import * as grpc from '@grpc/grpc-js';
import * as protoLoader from '@grpc/proto-loader';
const name = "@firebase/firestore";
const version$1 = "4.4.2";
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Simple wrapper around a nullable UID. Mostly exists to make code more
* readable.
*/
class User {
constructor(uid) {
this.uid = uid;
}
isAuthenticated() {
return this.uid != null;
}
/**
* Returns a key representing this user, suitable for inclusion in a
* dictionary.
*/
toKey() {
if (this.isAuthenticated()) {
return 'uid:' + this.uid;
}
else {
return 'anonymous-user';
}
}
isEqual(otherUser) {
return otherUser.uid === this.uid;
}
}
/** A user with a null UID. */
User.UNAUTHENTICATED = new User(null);
// TODO(mikelehen): Look into getting a proper uid-equivalent for
// non-FirebaseAuth providers.
User.GOOGLE_CREDENTIALS = new User('google-credentials-uid');
User.FIRST_PARTY = new User('first-party-uid');
User.MOCK_USER = new User('mock-user');
const version = "10.8.0";
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
let SDK_VERSION = version;
function setSDKVersion(version) {
SDK_VERSION = version;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Formats an object as a JSON string, suitable for logging. */
function formatJSON(value) {
// util.inspect() results in much more readable output than JSON.stringify()
return inspect(value, { depth: 100 });
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const logClient = new Logger('@firebase/firestore');
// Helper methods are needed because variables can't be exported as read/write
function getLogLevel() {
return logClient.logLevel;
}
/**
* Sets the verbosity of Cloud Firestore logs (debug, error, or silent).
*
* @param logLevel - The verbosity you set for activity and error logging. Can
* be any of the following values:
*
* <ul>
* <li>`debug` for the most verbose logging level, primarily for
* debugging.</li>
* <li>`error` to log errors only.</li>
* <li><code>`silent` to turn off logging.</li>
* </ul>
*/
function setLogLevel(logLevel) {
logClient.setLogLevel(logLevel);
}
function logDebug(msg, ...obj) {
if (logClient.logLevel <= LogLevel.DEBUG) {
const args = obj.map(argToString);
logClient.debug(`Firestore (${SDK_VERSION}): ${msg}`, ...args);
}
}
function logError(msg, ...obj) {
if (logClient.logLevel <= LogLevel.ERROR) {
const args = obj.map(argToString);
logClient.error(`Firestore (${SDK_VERSION}): ${msg}`, ...args);
}
}
/**
* @internal
*/
function logWarn(msg, ...obj) {
if (logClient.logLevel <= LogLevel.WARN) {
const args = obj.map(argToString);
logClient.warn(`Firestore (${SDK_VERSION}): ${msg}`, ...args);
}
}
/**
* Converts an additional log parameter to a string representation.
*/
function argToString(obj) {
if (typeof obj === 'string') {
return obj;
}
else {
try {
return formatJSON(obj);
}
catch (e) {
// Converting to JSON failed, just log the object directly
return obj;
}
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Unconditionally fails, throwing an Error with the given message.
* Messages are stripped in production builds.
*
* Returns `never` and can be used in expressions:
* @example
* let futureVar = fail('not implemented yet');
*/
function fail(failure = 'Unexpected state') {
// Log the failure in addition to throw an exception, just in case the
// exception is swallowed.
const message = `FIRESTORE (${SDK_VERSION}) INTERNAL ASSERTION FAILED: ` + failure;
logError(message);
// NOTE: We don't use FirestoreError here because these are internal failures
// that cannot be handled by the user. (Also it would create a circular
// dependency between the error and assert modules which doesn't work.)
throw new Error(message);
}
/**
* Fails if the given assertion condition is false, throwing an Error with the
* given message if it did.
*
* Messages are stripped in production builds.
*/
function hardAssert(assertion, message) {
if (!assertion) {
fail();
}
}
/**
* Fails if the given assertion condition is false, throwing an Error with the
* given message if it did.
*
* The code of callsites invoking this function are stripped out in production
* builds. Any side-effects of code within the debugAssert() invocation will not
* happen in this case.
*
* @internal
*/
function debugAssert(assertion, message) {
if (!assertion) {
fail();
}
}
/**
* Casts `obj` to `T`. In non-production builds, verifies that `obj` is an
* instance of `T` before casting.
*/
function debugCast(obj,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
constructor) {
return obj;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const Code = {
// Causes are copied from:
// https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
/** Not an error; returned on success. */
OK: 'ok',
/** The operation was cancelled (typically by the caller). */
CANCELLED: 'cancelled',
/** Unknown error or an error from a different error domain. */
UNKNOWN: 'unknown',
/**
* Client specified an invalid argument. Note that this differs from
* FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments that are
* problematic regardless of the state of the system (e.g., a malformed file
* name).
*/
INVALID_ARGUMENT: 'invalid-argument',
/**
* Deadline expired before operation could complete. For operations that
* change the state of the system, this error may be returned even if the
* operation has completed successfully. For example, a successful response
* from a server could have been delayed long enough for the deadline to
* expire.
*/
DEADLINE_EXCEEDED: 'deadline-exceeded',
/** Some requested entity (e.g., file or directory) was not found. */
NOT_FOUND: 'not-found',
/**
* Some entity that we attempted to create (e.g., file or directory) already
* exists.
*/
ALREADY_EXISTS: 'already-exists',
/**
* The caller does not have permission to execute the specified operation.
* PERMISSION_DENIED must not be used for rejections caused by exhausting
* some resource (use RESOURCE_EXHAUSTED instead for those errors).
* PERMISSION_DENIED must not be used if the caller can not be identified
* (use UNAUTHENTICATED instead for those errors).
*/
PERMISSION_DENIED: 'permission-denied',
/**
* The request does not have valid authentication credentials for the
* operation.
*/
UNAUTHENTICATED: 'unauthenticated',
/**
* Some resource has been exhausted, perhaps a per-user quota, or perhaps the
* entire file system is out of space.
*/
RESOURCE_EXHAUSTED: 'resource-exhausted',
/**
* Operation was rejected because the system is not in a state required for
* the operation's execution. For example, directory to be deleted may be
* non-empty, an rmdir operation is applied to a non-directory, etc.
*
* A litmus test that may help a service implementor in deciding
* between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
* (a) Use UNAVAILABLE if the client can retry just the failing call.
* (b) Use ABORTED if the client should retry at a higher-level
* (e.g., restarting a read-modify-write sequence).
* (c) Use FAILED_PRECONDITION if the client should not retry until
* the system state has been explicitly fixed. E.g., if an "rmdir"
* fails because the directory is non-empty, FAILED_PRECONDITION
* should be returned since the client should not retry unless
* they have first fixed up the directory by deleting files from it.
* (d) Use FAILED_PRECONDITION if the client performs conditional
* REST Get/Update/Delete on a resource and the resource on the
* server does not match the condition. E.g., conflicting
* read-modify-write on the same resource.
*/
FAILED_PRECONDITION: 'failed-precondition',
/**
* The operation was aborted, typically due to a concurrency issue like
* sequencer check failures, transaction aborts, etc.
*
* See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
* and UNAVAILABLE.
*/
ABORTED: 'aborted',
/**
* Operation was attempted past the valid range. E.g., seeking or reading
* past end of file.
*
* Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed
* if the system state changes. For example, a 32-bit file system will
* generate INVALID_ARGUMENT if asked to read at an offset that is not in the
* range [0,2^32-1], but it will generate OUT_OF_RANGE if asked to read from
* an offset past the current file size.
*
* There is a fair bit of overlap between FAILED_PRECONDITION and
* OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific error)
* when it applies so that callers who are iterating through a space can
* easily look for an OUT_OF_RANGE error to detect when they are done.
*/
OUT_OF_RANGE: 'out-of-range',
/** Operation is not implemented or not supported/enabled in this service. */
UNIMPLEMENTED: 'unimplemented',
/**
* Internal errors. Means some invariants expected by underlying System has
* been broken. If you see one of these errors, Something is very broken.
*/
INTERNAL: 'internal',
/**
* The service is currently unavailable. This is a most likely a transient
* condition and may be corrected by retrying with a backoff.
*
* See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
* and UNAVAILABLE.
*/
UNAVAILABLE: 'unavailable',
/** Unrecoverable data loss or corruption. */
DATA_LOSS: 'data-loss'
};
/** An error returned by a Firestore operation. */
class FirestoreError extends FirebaseError {
/** @hideconstructor */
constructor(
/**
* The backend error code associated with this error.
*/
code,
/**
* A custom error description.
*/
message) {
super(code, message);
this.code = code;
this.message = message;
// HACK: We write a toString property directly because Error is not a real
// class and so inheritance does not work correctly. We could alternatively
// do the same "back-door inheritance" trick that FirebaseError does.
this.toString = () => `${this.name}: [code=${this.code}]: ${this.message}`;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class Deferred {
constructor() {
this.promise = new Promise((resolve, reject) => {
this.resolve = resolve;
this.reject = reject;
});
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class OAuthToken {
constructor(value, user) {
this.user = user;
this.type = 'OAuth';
this.headers = new Map();
this.headers.set('Authorization', `Bearer ${value}`);
}
}
/**
* A CredentialsProvider that always yields an empty token.
* @internal
*/
class EmptyAuthCredentialsProvider {
getToken() {
return Promise.resolve(null);
}
invalidateToken() { }
start(asyncQueue, changeListener) {
// Fire with initial user.
asyncQueue.enqueueRetryable(() => changeListener(User.UNAUTHENTICATED));
}
shutdown() { }
}
/**
* A CredentialsProvider that always returns a constant token. Used for
* emulator token mocking.
*/
class EmulatorAuthCredentialsProvider {
constructor(token) {
this.token = token;
/**
* Stores the listener registered with setChangeListener()
* This isn't actually necessary since the UID never changes, but we use this
* to verify the listen contract is adhered to in tests.
*/
this.changeListener = null;
}
getToken() {
return Promise.resolve(this.token);
}
invalidateToken() { }
start(asyncQueue, changeListener) {
this.changeListener = changeListener;
// Fire with initial user.
asyncQueue.enqueueRetryable(() => changeListener(this.token.user));
}
shutdown() {
this.changeListener = null;
}
}
class FirebaseAuthCredentialsProvider {
constructor(authProvider) {
this.authProvider = authProvider;
/** Tracks the current User. */
this.currentUser = User.UNAUTHENTICATED;
/**
* Counter used to detect if the token changed while a getToken request was
* outstanding.
*/
this.tokenCounter = 0;
this.forceRefresh = false;
this.auth = null;
}
start(asyncQueue, changeListener) {
let lastTokenId = this.tokenCounter;
// A change listener that prevents double-firing for the same token change.
const guardedChangeListener = user => {
if (this.tokenCounter !== lastTokenId) {
lastTokenId = this.tokenCounter;
return changeListener(user);
}
else {
return Promise.resolve();
}
};
// A promise that can be waited on to block on the next token change.
// This promise is re-created after each change.
let nextToken = new Deferred();
this.tokenListener = () => {
this.tokenCounter++;
this.currentUser = this.getUser();
nextToken.resolve();
nextToken = new Deferred();
asyncQueue.enqueueRetryable(() => guardedChangeListener(this.currentUser));
};
const awaitNextToken = () => {
const currentTokenAttempt = nextToken;
asyncQueue.enqueueRetryable(async () => {
await currentTokenAttempt.promise;
await guardedChangeListener(this.currentUser);
});
};
const registerAuth = (auth) => {
logDebug('FirebaseAuthCredentialsProvider', 'Auth detected');
this.auth = auth;
this.auth.addAuthTokenListener(this.tokenListener);
awaitNextToken();
};
this.authProvider.onInit(auth => registerAuth(auth));
// Our users can initialize Auth right after Firestore, so we give it
// a chance to register itself with the component framework before we
// determine whether to start up in unauthenticated mode.
setTimeout(() => {
if (!this.auth) {
const auth = this.authProvider.getImmediate({ optional: true });
if (auth) {
registerAuth(auth);
}
else {
// If auth is still not available, proceed with `null` user
logDebug('FirebaseAuthCredentialsProvider', 'Auth not yet detected');
nextToken.resolve();
nextToken = new Deferred();
}
}
}, 0);
awaitNextToken();
}
getToken() {
// Take note of the current value of the tokenCounter so that this method
// can fail (with an ABORTED error) if there is a token change while the
// request is outstanding.
const initialTokenCounter = this.tokenCounter;
const forceRefresh = this.forceRefresh;
this.forceRefresh = false;
if (!this.auth) {
return Promise.resolve(null);
}
return this.auth.getToken(forceRefresh).then(tokenData => {
// Cancel the request since the token changed while the request was
// outstanding so the response is potentially for a previous user (which
// user, we can't be sure).
if (this.tokenCounter !== initialTokenCounter) {
logDebug('FirebaseAuthCredentialsProvider', 'getToken aborted due to token change.');
return this.getToken();
}
else {
if (tokenData) {
hardAssert(typeof tokenData.accessToken === 'string');
return new OAuthToken(tokenData.accessToken, this.currentUser);
}
else {
return null;
}
}
});
}
invalidateToken() {
this.forceRefresh = true;
}
shutdown() {
if (this.auth) {
this.auth.removeAuthTokenListener(this.tokenListener);
}
}
// Auth.getUid() can return null even with a user logged in. It is because
// getUid() is synchronous, but the auth code populating Uid is asynchronous.
// This method should only be called in the AuthTokenListener callback
// to guarantee to get the actual user.
getUser() {
const currentUid = this.auth && this.auth.getUid();
hardAssert(currentUid === null || typeof currentUid === 'string');
return new User(currentUid);
}
}
/*
* FirstPartyToken provides a fresh token each time its value
* is requested, because if the token is too old, requests will be rejected.
* Technically this may no longer be necessary since the SDK should gracefully
* recover from unauthenticated errors (see b/33147818 for context), but it's
* safer to keep the implementation as-is.
*/
class FirstPartyToken {
constructor(sessionIndex, iamToken, authTokenFactory) {
this.sessionIndex = sessionIndex;
this.iamToken = iamToken;
this.authTokenFactory = authTokenFactory;
this.type = 'FirstParty';
this.user = User.FIRST_PARTY;
this._headers = new Map();
}
/**
* Gets an authorization token, using a provided factory function, or return
* null.
*/
getAuthToken() {
if (this.authTokenFactory) {
return this.authTokenFactory();
}
else {
return null;
}
}
get headers() {
this._headers.set('X-Goog-AuthUser', this.sessionIndex);
// Use array notation to prevent minification
const authHeaderTokenValue = this.getAuthToken();
if (authHeaderTokenValue) {
this._headers.set('Authorization', authHeaderTokenValue);
}
if (this.iamToken) {
this._headers.set('X-Goog-Iam-Authorization-Token', this.iamToken);
}
return this._headers;
}
}
/*
* Provides user credentials required for the Firestore JavaScript SDK
* to authenticate the user, using technique that is only available
* to applications hosted by Google.
*/
class FirstPartyAuthCredentialsProvider {
constructor(sessionIndex, iamToken, authTokenFactory) {
this.sessionIndex = sessionIndex;
this.iamToken = iamToken;
this.authTokenFactory = authTokenFactory;
}
getToken() {
return Promise.resolve(new FirstPartyToken(this.sessionIndex, this.iamToken, this.authTokenFactory));
}
start(asyncQueue, changeListener) {
// Fire with initial uid.
asyncQueue.enqueueRetryable(() => changeListener(User.FIRST_PARTY));
}
shutdown() { }
invalidateToken() { }
}
class AppCheckToken {
constructor(value) {
this.value = value;
this.type = 'AppCheck';
this.headers = new Map();
if (value && value.length > 0) {
this.headers.set('x-firebase-appcheck', this.value);
}
}
}
class FirebaseAppCheckTokenProvider {
constructor(appCheckProvider) {
this.appCheckProvider = appCheckProvider;
this.forceRefresh = false;
this.appCheck = null;
this.latestAppCheckToken = null;
}
start(asyncQueue, changeListener) {
const onTokenChanged = tokenResult => {
if (tokenResult.error != null) {
logDebug('FirebaseAppCheckTokenProvider', `Error getting App Check token; using placeholder token instead. Error: ${tokenResult.error.message}`);
}
const tokenUpdated = tokenResult.token !== this.latestAppCheckToken;
this.latestAppCheckToken = tokenResult.token;
logDebug('FirebaseAppCheckTokenProvider', `Received ${tokenUpdated ? 'new' : 'existing'} token.`);
return tokenUpdated
? changeListener(tokenResult.token)
: Promise.resolve();
};
this.tokenListener = (tokenResult) => {
asyncQueue.enqueueRetryable(() => onTokenChanged(tokenResult));
};
const registerAppCheck = (appCheck) => {
logDebug('FirebaseAppCheckTokenProvider', 'AppCheck detected');
this.appCheck = appCheck;
this.appCheck.addTokenListener(this.tokenListener);
};
this.appCheckProvider.onInit(appCheck => registerAppCheck(appCheck));
// Our users can initialize AppCheck after Firestore, so we give it
// a chance to register itself with the component framework.
setTimeout(() => {
if (!this.appCheck) {
const appCheck = this.appCheckProvider.getImmediate({ optional: true });
if (appCheck) {
registerAppCheck(appCheck);
}
else {
// If AppCheck is still not available, proceed without it.
logDebug('FirebaseAppCheckTokenProvider', 'AppCheck not yet detected');
}
}
}, 0);
}
getToken() {
const forceRefresh = this.forceRefresh;
this.forceRefresh = false;
if (!this.appCheck) {
return Promise.resolve(null);
}
return this.appCheck.getToken(forceRefresh).then(tokenResult => {
if (tokenResult) {
hardAssert(typeof tokenResult.token === 'string');
this.latestAppCheckToken = tokenResult.token;
return new AppCheckToken(tokenResult.token);
}
else {
return null;
}
});
}
invalidateToken() {
this.forceRefresh = true;
}
shutdown() {
if (this.appCheck) {
this.appCheck.removeTokenListener(this.tokenListener);
}
}
}
/**
* An AppCheck token provider that always yields an empty token.
* @internal
*/
class EmptyAppCheckTokenProvider {
getToken() {
return Promise.resolve(new AppCheckToken(''));
}
invalidateToken() { }
start(asyncQueue, changeListener) { }
shutdown() { }
}
/**
* Builds a CredentialsProvider depending on the type of
* the credentials passed in.
*/
function makeAuthCredentialsProvider(credentials) {
if (!credentials) {
return new EmptyAuthCredentialsProvider();
}
switch (credentials['type']) {
case 'firstParty':
return new FirstPartyAuthCredentialsProvider(credentials['sessionIndex'] || '0', credentials['iamToken'] || null, credentials['authTokenFactory'] || null);
case 'provider':
return credentials['client'];
default:
throw new FirestoreError(Code.INVALID_ARGUMENT, 'makeAuthCredentialsProvider failed due to invalid credential type');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Generates `nBytes` of random bytes.
*
* If `nBytes < 0` , an error will be thrown.
*/
function randomBytes(nBytes) {
return randomBytes$1(nBytes);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A utility class for generating unique alphanumeric IDs of a specified length.
*
* @internal
* Exported internally for testing purposes.
*/
class AutoId {
static newId() {
// Alphanumeric characters
const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
// The largest byte value that is a multiple of `char.length`.
const maxMultiple = Math.floor(256 / chars.length) * chars.length;
let autoId = '';
const targetLength = 20;
while (autoId.length < targetLength) {
const bytes = randomBytes(40);
for (let i = 0; i < bytes.length; ++i) {
// Only accept values that are [0, maxMultiple), this ensures they can
// be evenly mapped to indices of `chars` via a modulo operation.
if (autoId.length < targetLength && bytes[i] < maxMultiple) {
autoId += chars.charAt(bytes[i] % chars.length);
}
}
}
return autoId;
}
}
function primitiveComparator(left, right) {
if (left < right) {
return -1;
}
if (left > right) {
return 1;
}
return 0;
}
/** Helper to compare arrays using isEqual(). */
function arrayEquals(left, right, comparator) {
if (left.length !== right.length) {
return false;
}
return left.every((value, index) => comparator(value, right[index]));
}
/**
* Returns the immediate lexicographically-following string. This is useful to
* construct an inclusive range for indexeddb iterators.
*/
function immediateSuccessor(s) {
// Return the input string, with an additional NUL byte appended.
return s + '\0';
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The earliest date supported by Firestore timestamps (0001-01-01T00:00:00Z).
const MIN_SECONDS = -62135596800;
// Number of nanoseconds in a millisecond.
const MS_TO_NANOS = 1e6;
/**
* A `Timestamp` represents a point in time independent of any time zone or
* calendar, represented as seconds and fractions of seconds at nanosecond
* resolution in UTC Epoch time.
*
* It is encoded using the Proleptic Gregorian Calendar which extends the
* Gregorian calendar backwards to year one. It is encoded assuming all minutes
* are 60 seconds long, i.e. leap seconds are "smeared" so that no leap second
* table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to
* 9999-12-31T23:59:59.999999999Z.
*
* For examples and further specifications, refer to the
* {@link https://github.com/google/protobuf/blob/master/src/google/protobuf/timestamp.proto | Timestamp definition}.
*/
class Timestamp {
/**
* Creates a new timestamp.
*
* @param seconds - The number of seconds of UTC time since Unix epoch
* 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
* 9999-12-31T23:59:59Z inclusive.
* @param nanoseconds - The non-negative fractions of a second at nanosecond
* resolution. Negative second values with fractions must still have
* non-negative nanoseconds values that count forward in time. Must be
* from 0 to 999,999,999 inclusive.
*/
constructor(
/**
* The number of seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z.
*/
seconds,
/**
* The fractions of a second at nanosecond resolution.*
*/
nanoseconds) {
this.seconds = seconds;
this.nanoseconds = nanoseconds;
if (nanoseconds < 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp nanoseconds out of range: ' + nanoseconds);
}
if (nanoseconds >= 1e9) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp nanoseconds out of range: ' + nanoseconds);
}
if (seconds < MIN_SECONDS) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp seconds out of range: ' + seconds);
}
// This will break in the year 10,000.
if (seconds >= 253402300800) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp seconds out of range: ' + seconds);
}
}
/**
* Creates a new timestamp with the current date, with millisecond precision.
*
* @returns a new timestamp representing the current date.
*/
static now() {
return Timestamp.fromMillis(Date.now());
}
/**
* Creates a new timestamp from the given date.
*
* @param date - The date to initialize the `Timestamp` from.
* @returns A new `Timestamp` representing the same point in time as the given
* date.
*/
static fromDate(date) {
return Timestamp.fromMillis(date.getTime());
}
/**
* Creates a new timestamp from the given number of milliseconds.
*
* @param milliseconds - Number of milliseconds since Unix epoch
* 1970-01-01T00:00:00Z.
* @returns A new `Timestamp` representing the same point in time as the given
* number of milliseconds.
*/
static fromMillis(milliseconds) {
const seconds = Math.floor(milliseconds / 1000);
const nanos = Math.floor((milliseconds - seconds * 1000) * MS_TO_NANOS);
return new Timestamp(seconds, nanos);
}
/**
* Converts a `Timestamp` to a JavaScript `Date` object. This conversion
* causes a loss of precision since `Date` objects only support millisecond
* precision.
*
* @returns JavaScript `Date` object representing the same point in time as
* this `Timestamp`, with millisecond precision.
*/
toDate() {
return new Date(this.toMillis());
}
/**
* Converts a `Timestamp` to a numeric timestamp (in milliseconds since
* epoch). This operation causes a loss of precision.
*
* @returns The point in time corresponding to this timestamp, represented as
* the number of milliseconds since Unix epoch 1970-01-01T00:00:00Z.
*/
toMillis() {
return this.seconds * 1000 + this.nanoseconds / MS_TO_NANOS;
}
_compareTo(other) {
if (this.seconds === other.seconds) {
return primitiveComparator(this.nanoseconds, other.nanoseconds);
}
return primitiveComparator(this.seconds, other.seconds);
}
/**
* Returns true if this `Timestamp` is equal to the provided one.
*
* @param other - The `Timestamp` to compare against.
* @returns true if this `Timestamp` is equal to the provided one.
*/
isEqual(other) {
return (other.seconds === this.seconds && other.nanoseconds === this.nanoseconds);
}
/** Returns a textual representation of this `Timestamp`. */
toString() {
return ('Timestamp(seconds=' +
this.seconds +
', nanoseconds=' +
this.nanoseconds +
')');
}
/** Returns a JSON-serializable representation of this `Timestamp`. */
toJSON() {
return { seconds: this.seconds, nanoseconds: this.nanoseconds };
}
/**
* Converts this object to a primitive string, which allows `Timestamp` objects
* to be compared using the `>`, `<=`, `>=` and `>` operators.
*/
valueOf() {
// This method returns a string of the form <seconds>.<nanoseconds> where
// <seconds> is translated to have a non-negative value and both <seconds>
// and <nanoseconds> are left-padded with zeroes to be a consistent length.
// Strings with this format then have a lexiographical ordering that matches
// the expected ordering. The <seconds> translation is done to avoid having
// a leading negative sign (i.e. a leading '-' character) in its string
// representation, which would affect its lexiographical ordering.
const adjustedSeconds = this.seconds - MIN_SECONDS;
// Note: Up to 12 decimal digits are required to represent all valid
// 'seconds' values.
const formattedSeconds = String(adjustedSeconds).padStart(12, '0');
const formattedNanoseconds = String(this.nanoseconds).padStart(9, '0');
return formattedSeconds + '.' + formattedNanoseconds;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A version of a document in Firestore. This corresponds to the version
* timestamp, such as update_time or read_time.
*/
class SnapshotVersion {
constructor(timestamp) {
this.timestamp = timestamp;
}
static fromTimestamp(value) {
return new SnapshotVersion(value);
}
static min() {
return new SnapshotVersion(new Timestamp(0, 0));
}
static max() {
return new SnapshotVersion(new Timestamp(253402300799, 1e9 - 1));
}
compareTo(other) {
return this.timestamp._compareTo(other.timestamp);
}
isEqual(other) {
return this.timestamp.isEqual(other.timestamp);
}
/** Returns a number representation of the version for use in spec tests. */
toMicroseconds() {
// Convert to microseconds.
return this.timestamp.seconds * 1e6 + this.timestamp.nanoseconds / 1000;
}
toString() {
return 'SnapshotVersion(' + this.timestamp.toString() + ')';
}
toTimestamp() {
return this.timestamp;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const DOCUMENT_KEY_NAME = '__name__';
/**
* Path represents an ordered sequence of string segments.
*/
class BasePath {
constructor(segments, offset, length) {
if (offset === undefined) {
offset = 0;
}
else if (offset > segments.length) {
fail();
}
if (length === undefined) {
length = segments.length - offset;
}
else if (length > segments.length - offset) {
fail();
}
this.segments = segments;
this.offset = offset;
this.len = length;
}
get length() {
return this.len;
}
isEqual(other) {
return BasePath.comparator(this, other) === 0;
}
child(nameOrPath) {
const segments = this.segments.slice(this.offset, this.limit());
if (nameOrPath instanceof BasePath) {
nameOrPath.forEach(segment => {
segments.push(segment);
});
}
else {
segments.push(nameOrPath);
}
return this.construct(segments);
}
/** The index of one past the last segment of the path. */
limit() {
return this.offset + this.length;
}
popFirst(size) {
size = size === undefined ? 1 : size;
return this.construct(this.segments, this.offset + size, this.length - size);
}
popLast() {
return this.construct(this.segments, this.offset, this.length - 1);
}
firstSegment() {
return this.segments[this.offset];
}
lastSegment() {
return this.get(this.length - 1);
}
get(index) {
return this.segments[this.offset + index];
}
isEmpty() {
return this.length === 0;
}
isPrefixOf(other) {
if (other.length < this.length) {
return false;
}
for (let i = 0; i < this.length; i++) {
if (this.get(i) !== other.get(i)) {
return false;
}
}
return true;
}
isImmediateParentOf(potentialChild) {
if (this.length + 1 !== potentialChild.length) {
return false;
}
for (let i = 0; i < this.length; i++) {
if (this.get(i) !== potentialChild.get(i)) {
return false;
}
}
return true;
}
forEach(fn) {
for (let i = this.offset, end = this.limit(); i < end; i++) {
fn(this.segments[i]);
}
}
toArray() {
return this.segments.slice(this.offset, this.limit());
}
static comparator(p1, p2) {
const len = Math.min(p1.length, p2.length);
for (let i = 0; i < len; i++) {
const left = p1.get(i);
const right = p2.get(i);
if (left < right) {
return -1;
}
if (left > right) {
return 1;
}
}
if (p1.length < p2.length) {
return -1;
}
if (p1.length > p2.length) {
return 1;
}
return 0;
}
}
/**
* A slash-separated path for navigating resources (documents and collections)
* within Firestore.
*
* @internal
*/
class ResourcePath extends BasePath {
construct(segments, offset, length) {
return new ResourcePath(segments, offset, length);
}
canonicalString() {
// NOTE: The client is ignorant of any path segments containing escape
// sequences (e.g. __id123__) and just passes them through raw (they exist
// for legacy reasons and should not be used frequently).
return this.toArray().join('/');
}
toString() {
return this.canonicalString();
}
/**
* Returns a string representation of this path
* where each path segment has been encoded with
* `encodeURIComponent`.
*/
toUriEncodedString() {
return this.toArray().map(encodeURIComponent).join('/');
}
/**
* Creates a resource path from the given slash-delimited string. If multiple
* arguments are provided, all components are combined. Leading and trailing
* slashes from all components are ignored.
*/
static fromString(...pathComponents) {
// NOTE: The client is ignorant of any path segments containing escape
// sequences (e.g. __id123__) and just passes them through raw (they exist
// for legacy reasons and should not be used frequently).
const segments = [];
for (const path of pathComponents) {
if (path.indexOf('//') >= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid segment (${path}). Paths must not contain // in them.`);
}
// Strip leading and traling slashed.
segments.push(...path.split('/').filter(segment => segment.length > 0));
}
return new ResourcePath(segments);
}
static emptyPath() {
return new ResourcePath([]);
}
}
const identifierRegExp = /^[_a-zA-Z][_a-zA-Z0-9]*$/;
/**
* A dot-separated path for navigating sub-objects within a document.
* @internal
*/
class FieldPath$1 extends BasePath {
construct(segments, offset, length) {
return new FieldPath$1(segments, offset, length);
}
/**
* Returns true if the string could be used as a segment in a field path
* without escaping.
*/
static isValidIdentifier(segment) {
return identifierRegExp.test(segment);
}
canonicalString() {
return this.toArray()
.map(str => {
str = str.replace(/\\/g, '\\\\').replace(/`/g, '\\`');
if (!FieldPath$1.isValidIdentifier(str)) {
str = '`' + str + '`';
}
return str;
})
.join('.');
}
toString() {
return this.canonicalString();
}
/**
* Returns true if this field references the key of a document.
*/
isKeyField() {
return this.length === 1 && this.get(0) === DOCUMENT_KEY_NAME;
}
/**
* The field designating the key of a document.
*/
static keyField() {
return new FieldPath$1([DOCUMENT_KEY_NAME]);
}
/**
* Parses a field string from the given server-formatted string.
*
* - Splitting the empty string is not allowed (for now at least).
* - Empty segments within the string (e.g. if there are two consecutive
* separators) are not allowed.
*
* TODO(b/37244157): we should make this more strict. Right now, it allows
* non-identifier path components, even if they aren't escaped.
*/
static fromServerFormat(path) {
const segments = [];
let current = '';
let i = 0;
const addCurrentSegment = () => {
if (current.length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid field path (${path}). Paths must not be empty, begin ` +
`with '.', end with '.', or contain '..'`);
}
segments.push(current);
current = '';
};
let inBackticks = false;
while (i < path.length) {
const c = path[i];
if (c === '\\') {
if (i + 1 === path.length) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Path has trailing escape character: ' + path);
}
const next = path[i + 1];
if (!(next === '\\' || next === '.' || next === '`')) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Path has invalid escape sequence: ' + path);
}
current += next;
i += 2;
}
else if (c === '`') {
inBackticks = !inBackticks;
i++;
}
else if (c === '.' && !inBackticks) {
addCurrentSegment();
i++;
}
else {
current += c;
i++;
}
}
addCurrentSegment();
if (inBackticks) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Unterminated ` in path: ' + path);
}
return new FieldPath$1(segments);
}
static emptyPath() {
return new FieldPath$1([]);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @internal
*/
class DocumentKey {
constructor(path) {
this.path = path;
}
static fromPath(path) {
return new DocumentKey(ResourcePath.fromString(path));
}
static fromName(name) {
return new DocumentKey(ResourcePath.fromString(name).popFirst(5));
}
static empty() {
return new DocumentKey(ResourcePath.emptyPath());
}
get collectionGroup() {
return this.path.popLast().lastSegment();
}
/** Returns true if the document is in the specified collectionId. */
hasCollectionId(collectionId) {
return (this.path.length >= 2 &&
this.path.get(this.path.length - 2) === collectionId);
}
/** Returns the collection group (i.e. the name of the parent collection) for this key. */
getCollectionGroup() {
return this.path.get(this.path.length - 2);
}
/** Returns the fully qualified path to the parent collection. */
getCollectionPath() {
return this.path.popLast();
}
isEqual(other) {
return (other !== null && ResourcePath.comparator(this.path, other.path) === 0);
}
toString() {
return this.path.toString();
}
static comparator(k1, k2) {
return ResourcePath.comparator(k1.path, k2.path);
}
static isDocumentKey(path) {
return path.length % 2 === 0;
}
/**
* Creates and returns a new document key with the given segments.
*
* @param segments - The segments of the path to the document
* @returns A new instance of DocumentKey
*/
static fromSegments(segments) {
return new DocumentKey(new ResourcePath(segments.slice()));
}
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The initial mutation batch id for each index. Gets updated during index
* backfill.
*/
const INITIAL_LARGEST_BATCH_ID = -1;
/**
* The initial sequence number for each index. Gets updated during index
* backfill.
*/
const INITIAL_SEQUENCE_NUMBER = 0;
/**
* An index definition for field indexes in Firestore.
*
* Every index is associated with a collection. The definition contains a list
* of fields and their index kind (which can be `ASCENDING`, `DESCENDING` or
* `CONTAINS` for ArrayContains/ArrayContainsAny queries).
*
* Unlike the backend, the SDK does not differentiate between collection or
* collection group-scoped indices. Every index can be used for both single
* collection and collection group queries.
*/
class FieldIndex {
constructor(
/**
* The index ID. Returns -1 if the index ID is not available (e.g. the index
* has not yet been persisted).
*/
indexId,
/** The collection ID this index applies to. */
collectionGroup,
/** The field segments for this index. */
fields,
/** Shows how up-to-date the index is for the current user. */
indexState) {
this.indexId = indexId;
this.collectionGroup = collectionGroup;
this.fields = fields;
this.indexState = indexState;
}
}
/** An ID for an index that has not yet been added to persistence. */
FieldIndex.UNKNOWN_ID = -1;
/** Returns the ArrayContains/ArrayContainsAny segment for this index. */
function fieldIndexGetArraySegment(fieldIndex) {
return fieldIndex.fields.find(s => s.kind === 2 /* IndexKind.CONTAINS */);
}
/** Returns all directional (ascending/descending) segments for this index. */
function fieldIndexGetDirectionalSegments(fieldIndex) {
return fieldIndex.fields.filter(s => s.kind !== 2 /* IndexKind.CONTAINS */);
}
/**
* Returns the order of the document key component for the given index.
*
* PORTING NOTE: This is only used in the Web IndexedDb implementation.
*/
function fieldIndexGetKeyOrder(fieldIndex) {
const directionalSegments = fieldIndexGetDirectionalSegments(fieldIndex);
return directionalSegments.length === 0
? 0 /* IndexKind.ASCENDING */
: directionalSegments[directionalSegments.length - 1].kind;
}
/**
* Compares indexes by collection group and segments. Ignores update time and
* index ID.
*/
function fieldIndexSemanticComparator(left, right) {
let cmp = primitiveComparator(left.collectionGroup, right.collectionGroup);
if (cmp !== 0) {
return cmp;
}
for (let i = 0; i < Math.min(left.fields.length, right.fields.length); ++i) {
cmp = indexSegmentComparator(left.fields[i], right.fields[i]);
if (cmp !== 0) {
return cmp;
}
}
return primitiveComparator(left.fields.length, right.fields.length);
}
/** Returns a debug representation of the field index */
function fieldIndexToString(fieldIndex) {
return `id=${fieldIndex.indexId}|cg=${fieldIndex.collectionGroup}|f=${fieldIndex.fields.map(f => `${f.fieldPath}:${f.kind}`).join(',')}`;
}
/** An index component consisting of field path and index type. */
class IndexSegment {
constructor(
/** The field path of the component. */
fieldPath,
/** The fields sorting order. */
kind) {
this.fieldPath = fieldPath;
this.kind = kind;
}
}
function indexSegmentComparator(left, right) {
const cmp = FieldPath$1.comparator(left.fieldPath, right.fieldPath);
if (cmp !== 0) {
return cmp;
}
return primitiveComparator(left.kind, right.kind);
}
/**
* Stores the "high water mark" that indicates how updated the Index is for the
* current user.
*/
class IndexState {
constructor(
/**
* Indicates when the index was last updated (relative to other indexes).
*/
sequenceNumber,
/** The the latest indexed read time, document and batch id. */
offset) {
this.sequenceNumber = sequenceNumber;
this.offset = offset;
}
/** The state of an index that has not yet been backfilled. */
static empty() {
return new IndexState(INITIAL_SEQUENCE_NUMBER, IndexOffset.min());
}
}
/**
* Creates an offset that matches all documents with a read time higher than
* `readTime`.
*/
function newIndexOffsetSuccessorFromReadTime(readTime, largestBatchId) {
// We want to create an offset that matches all documents with a read time
// greater than the provided read time. To do so, we technically need to
// create an offset for `(readTime, MAX_DOCUMENT_KEY)`. While we could use
// Unicode codepoints to generate MAX_DOCUMENT_KEY, it is much easier to use
// `(readTime + 1, DocumentKey.empty())` since `> DocumentKey.empty()` matches
// all valid document IDs.
const successorSeconds = readTime.toTimestamp().seconds;
const successorNanos = readTime.toTimestamp().nanoseconds + 1;
const successor = SnapshotVersion.fromTimestamp(successorNanos === 1e9
? new Timestamp(successorSeconds + 1, 0)
: new Timestamp(successorSeconds, successorNanos));
return new IndexOffset(successor, DocumentKey.empty(), largestBatchId);
}
/** Creates a new offset based on the provided document. */
function newIndexOffsetFromDocument(document) {
return new IndexOffset(document.readTime, document.key, INITIAL_LARGEST_BATCH_ID);
}
/**
* Stores the latest read time, document and batch ID that were processed for an
* index.
*/
class IndexOffset {
constructor(
/**
* The latest read time version that has been indexed by Firestore for this
* field index.
*/
readTime,
/**
* The key of the last document that was indexed for this query. Use
* `DocumentKey.empty()` if no document has been indexed.
*/
documentKey,
/*
* The largest mutation batch id that's been processed by Firestore.
*/
largestBatchId) {
this.readTime = readTime;
this.documentKey = documentKey;
this.largestBatchId = largestBatchId;
}
/** Returns an offset that sorts before all regular offsets. */
static min() {
return new IndexOffset(SnapshotVersion.min(), DocumentKey.empty(), INITIAL_LARGEST_BATCH_ID);
}
/** Returns an offset that sorts after all regular offsets. */
static max() {
return new IndexOffset(SnapshotVersion.max(), DocumentKey.empty(), INITIAL_LARGEST_BATCH_ID);
}
}
function indexOffsetComparator(left, right) {
let cmp = left.readTime.compareTo(right.readTime);
if (cmp !== 0) {
return cmp;
}
cmp = DocumentKey.comparator(left.documentKey, right.documentKey);
if (cmp !== 0) {
return cmp;
}
return primitiveComparator(left.largestBatchId, right.largestBatchId);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const PRIMARY_LEASE_LOST_ERROR_MSG = 'The current tab is not in the required state to perform this operation. ' +
'It might be necessary to refresh the browser tab.';
/**
* A base class representing a persistence transaction, encapsulating both the
* transaction's sequence numbers as well as a list of onCommitted listeners.
*
* When you call Persistence.runTransaction(), it will create a transaction and
* pass it to your callback. You then pass it to any method that operates
* on persistence.
*/
class PersistenceTransaction {
constructor() {
this.onCommittedListeners = [];
}
addOnCommittedListener(listener) {
this.onCommittedListeners.push(listener);
}
raiseOnCommittedEvent() {
this.onCommittedListeners.forEach(listener => listener());
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Verifies the error thrown by a LocalStore operation. If a LocalStore
* operation fails because the primary lease has been taken by another client,
* we ignore the error (the persistence layer will immediately call
* `applyPrimaryLease` to propagate the primary state change). All other errors
* are re-thrown.
*
* @param err - An error returned by a LocalStore operation.
* @returns A Promise that resolves after we recovered, or the original error.
*/
async function ignoreIfPrimaryLeaseLoss(err) {
if (err.code === Code.FAILED_PRECONDITION &&
err.message === PRIMARY_LEASE_LOST_ERROR_MSG) {
logDebug('LocalStore', 'Unexpectedly lost primary lease');
}
else {
throw err;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* PersistencePromise is essentially a re-implementation of Promise except
* it has a .next() method instead of .then() and .next() and .catch() callbacks
* are executed synchronously when a PersistencePromise resolves rather than
* asynchronously (Promise implementations use setImmediate() or similar).
*
* This is necessary to interoperate with IndexedDB which will automatically
* commit transactions if control is returned to the event loop without
* synchronously initiating another operation on the transaction.
*
* NOTE: .then() and .catch() only allow a single consumer, unlike normal
* Promises.
*/
class PersistencePromise {
constructor(callback) {
// NOTE: next/catchCallback will always point to our own wrapper functions,
// not the user's raw next() or catch() callbacks.
this.nextCallback = null;
this.catchCallback = null;
// When the operation resolves, we'll set result or error and mark isDone.
this.result = undefined;
this.error = undefined;
this.isDone = false;
// Set to true when .then() or .catch() are called and prevents additional
// chaining.
this.callbackAttached = false;
callback(value => {
this.isDone = true;
this.result = value;
if (this.nextCallback) {
// value should be defined unless T is Void, but we can't express
// that in the type system.
this.nextCallback(value);
}
}, error => {
this.isDone = true;
this.error = error;
if (this.catchCallback) {
this.catchCallback(error);
}
});
}
catch(fn) {
return this.next(undefined, fn);
}
next(nextFn, catchFn) {
if (this.callbackAttached) {
fail();
}
this.callbackAttached = true;
if (this.isDone) {
if (!this.error) {
return this.wrapSuccess(nextFn, this.result);
}
else {
return this.wrapFailure(catchFn, this.error);
}
}
else {
return new PersistencePromise((resolve, reject) => {
this.nextCallback = (value) => {
this.wrapSuccess(nextFn, value).next(resolve, reject);
};
this.catchCallback = (error) => {
this.wrapFailure(catchFn, error).next(resolve, reject);
};
});
}
}
toPromise() {
return new Promise((resolve, reject) => {
this.next(resolve, reject);
});
}
wrapUserFunction(fn) {
try {
const result = fn();
if (result instanceof PersistencePromise) {
return result;
}
else {
return PersistencePromise.resolve(result);
}
}
catch (e) {
return PersistencePromise.reject(e);
}
}
wrapSuccess(nextFn, value) {
if (nextFn) {
return this.wrapUserFunction(() => nextFn(value));
}
else {
// If there's no nextFn, then R must be the same as T
return PersistencePromise.resolve(value);
}
}
wrapFailure(catchFn, error) {
if (catchFn) {
return this.wrapUserFunction(() => catchFn(error));
}
else {
return PersistencePromise.reject(error);
}
}
static resolve(result) {
return new PersistencePromise((resolve, reject) => {
resolve(result);
});
}
static reject(error) {
return new PersistencePromise((resolve, reject) => {
reject(error);
});
}
static waitFor(
// Accept all Promise types in waitFor().
// eslint-disable-next-line @typescript-eslint/no-explicit-any
all) {
return new PersistencePromise((resolve, reject) => {
let expectedCount = 0;
let resolvedCount = 0;
let done = false;
all.forEach(element => {
++expectedCount;
element.next(() => {
++resolvedCount;
if (done && resolvedCount === expectedCount) {
resolve();
}
}, err => reject(err));
});
done = true;
if (resolvedCount === expectedCount) {
resolve();
}
});
}
/**
* Given an array of predicate functions that asynchronously evaluate to a
* boolean, implements a short-circuiting `or` between the results. Predicates
* will be evaluated until one of them returns `true`, then stop. The final
* result will be whether any of them returned `true`.
*/
static or(predicates) {
let p = PersistencePromise.resolve(false);
for (const predicate of predicates) {
p = p.next(isTrue => {
if (isTrue) {
return PersistencePromise.resolve(isTrue);
}
else {
return predicate();
}
});
}
return p;
}
static forEach(collection, f) {
const promises = [];
collection.forEach((r, s) => {
promises.push(f.call(this, r, s));
});
return this.waitFor(promises);
}
/**
* Concurrently map all array elements through asynchronous function.
*/
static mapArray(array, f) {
return new PersistencePromise((resolve, reject) => {
const expectedCount = array.length;
const results = new Array(expectedCount);
let resolvedCount = 0;
for (let i = 0; i < expectedCount; i++) {
const current = i;
f(array[current]).next(result => {
results[current] = result;
++resolvedCount;
if (resolvedCount === expectedCount) {
resolve(results);
}
}, err => reject(err));
}
});
}
/**
* An alternative to recursive PersistencePromise calls, that avoids
* potential memory problems from unbounded chains of promises.
*
* The `action` will be called repeatedly while `condition` is true.
*/
static doWhile(condition, action) {
return new PersistencePromise((resolve, reject) => {
const process = () => {
if (condition() === true) {
action().next(() => {
process();
}, reject);
}
else {
resolve();
}
};
process();
});
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// References to `window` are guarded by SimpleDb.isAvailable()
/* eslint-disable no-restricted-globals */
const LOG_TAG$i = 'SimpleDb';
/**
* The maximum number of retry attempts for an IndexedDb transaction that fails
* with a DOMException.
*/
const TRANSACTION_RETRY_COUNT = 3;
/**
* Wraps an IDBTransaction and exposes a store() method to get a handle to a
* specific object store.
*/
class SimpleDbTransaction {
constructor(action, transaction) {
this.action = action;
this.transaction = transaction;
this.aborted = false;
/**
* A `Promise` that resolves with the result of the IndexedDb transaction.
*/
this.completionDeferred = new Deferred();
this.transaction.oncomplete = () => {
this.completionDeferred.resolve();
};
this.transaction.onabort = () => {
if (transaction.error) {
this.completionDeferred.reject(new IndexedDbTransactionError(action, transaction.error));
}
else {
this.completionDeferred.resolve();
}
};
this.transaction.onerror = (event) => {
const error = checkForAndReportiOSError(event.target.error);
this.completionDeferred.reject(new IndexedDbTransactionError(action, error));
};
}
static open(db, action, mode, objectStoreNames) {
try {
return new SimpleDbTransaction(action, db.transaction(objectStoreNames, mode));
}
catch (e) {
throw new IndexedDbTransactionError(action, e);
}
}
get completionPromise() {
return this.completionDeferred.promise;
}
abort(error) {
if (error) {
this.completionDeferred.reject(error);
}
if (!this.aborted) {
logDebug(LOG_TAG$i, 'Aborting transaction:', error ? error.message : 'Client-initiated abort');
this.aborted = true;
this.transaction.abort();
}
}
maybeCommit() {
// If the browser supports V3 IndexedDB, we invoke commit() explicitly to
// speed up index DB processing if the event loop remains blocks.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const maybeV3IndexedDb = this.transaction;
if (!this.aborted && typeof maybeV3IndexedDb.commit === 'function') {
maybeV3IndexedDb.commit();
}
}
/**
* Returns a SimpleDbStore<KeyType, ValueType> for the specified store. All
* operations performed on the SimpleDbStore happen within the context of this
* transaction and it cannot be used anymore once the transaction is
* completed.
*
* Note that we can't actually enforce that the KeyType and ValueType are
* correct, but they allow type safety through the rest of the consuming code.
*/
store(storeName) {
const store = this.transaction.objectStore(storeName);
return new SimpleDbStore(store);
}
}
/**
* Provides a wrapper around IndexedDb with a simplified interface that uses
* Promise-like return values to chain operations. Real promises cannot be used
* since .then() continuations are executed asynchronously (e.g. via
* .setImmediate), which would cause IndexedDB to end the transaction.
* See PersistencePromise for more details.
*/
class SimpleDb {
/*
* Creates a new SimpleDb wrapper for IndexedDb database `name`.
*
* Note that `version` must not be a downgrade. IndexedDB does not support
* downgrading the schema version. We currently do not support any way to do
* versioning outside of IndexedDB's versioning mechanism, as only
* version-upgrade transactions are allowed to do things like create
* objectstores.
*/
constructor(name, version, schemaConverter) {
this.name = name;
this.version = version;
this.schemaConverter = schemaConverter;
const iOSVersion = SimpleDb.getIOSVersion(getUA());
// NOTE: According to https://bugs.webkit.org/show_bug.cgi?id=197050, the
// bug we're checking for should exist in iOS >= 12.2 and < 13, but for
// whatever reason it's much harder to hit after 12.2 so we only proactively
// log on 12.2.
if (iOSVersion === 12.2) {
logError('Firestore persistence suffers from a bug in iOS 12.2 ' +
'Safari that may cause your app to stop working. See ' +
'https://stackoverflow.com/q/56496296/110915 for details ' +
'and a potential workaround.');
}
}
/** Deletes the specified database. */
static delete(name) {
logDebug(LOG_TAG$i, 'Removing database:', name);
return wrapRequest(window.indexedDB.deleteDatabase(name)).toPromise();
}
/** Returns true if IndexedDB is available in the current environment. */
static isAvailable() {
if (!isIndexedDBAvailable()) {
return false;
}
if (SimpleDb.isMockPersistence()) {
return true;
}
// We extensively use indexed array values and compound keys,
// which IE and Edge do not support. However, they still have indexedDB
// defined on the window, so we need to check for them here and make sure
// to return that persistence is not enabled for those browsers.
// For tracking support of this feature, see here:
// https://developer.microsoft.com/en-us/microsoft-edge/platform/status/indexeddbarraysandmultientrysupport/
// Check the UA string to find out the browser.
const ua = getUA();
// IE 10
// ua = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)';
// IE 11
// ua = 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko';
// Edge
// ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,
// like Gecko) Chrome/39.0.2171.71 Safari/537.36 Edge/12.0';
// iOS Safari: Disable for users running iOS version < 10.
const iOSVersion = SimpleDb.getIOSVersion(ua);
const isUnsupportedIOS = 0 < iOSVersion && iOSVersion < 10;
// Android browser: Disable for userse running version < 4.5.
const androidVersion = SimpleDb.getAndroidVersion(ua);
const isUnsupportedAndroid = 0 < androidVersion && androidVersion < 4.5;
if (ua.indexOf('MSIE ') > 0 ||
ua.indexOf('Trident/') > 0 ||
ua.indexOf('Edge/') > 0 ||
isUnsupportedIOS ||
isUnsupportedAndroid) {
return false;
}
else {
return true;
}
}
/**
* Returns true if the backing IndexedDB store is the Node IndexedDBShim
* (see https://github.com/axemclion/IndexedDBShim).
*/
static isMockPersistence() {
var _a;
return (typeof process !== 'undefined' &&
((_a = process.env) === null || _a === void 0 ? void 0 : _a.USE_MOCK_PERSISTENCE) === 'YES');
}
/** Helper to get a typed SimpleDbStore from a transaction. */
static getStore(txn, store) {
return txn.store(store);
}
// visible for testing
/** Parse User Agent to determine iOS version. Returns -1 if not found. */
static getIOSVersion(ua) {
const iOSVersionRegex = ua.match(/i(?:phone|pad|pod) os ([\d_]+)/i);
const version = iOSVersionRegex
? iOSVersionRegex[1].split('_').slice(0, 2).join('.')
: '-1';
return Number(version);
}
// visible for testing
/** Parse User Agent to determine Android version. Returns -1 if not found. */
static getAndroidVersion(ua) {
const androidVersionRegex = ua.match(/Android ([\d.]+)/i);
const version = androidVersionRegex
? androidVersionRegex[1].split('.').slice(0, 2).join('.')
: '-1';
return Number(version);
}
/**
* Opens the specified database, creating or upgrading it if necessary.
*/
async ensureDb(action) {
if (!this.db) {
logDebug(LOG_TAG$i, 'Opening database:', this.name);
this.db = await new Promise((resolve, reject) => {
// TODO(mikelehen): Investigate browser compatibility.
// https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API/Using_IndexedDB
// suggests IE9 and older WebKit browsers handle upgrade
// differently. They expect setVersion, as described here:
// https://developer.mozilla.org/en-US/docs/Web/API/IDBVersionChangeRequest/setVersion
const request = indexedDB.open(this.name, this.version);
request.onsuccess = (event) => {
const db = event.target.result;
resolve(db);
};
request.onblocked = () => {
reject(new IndexedDbTransactionError(action, 'Cannot upgrade IndexedDB schema while another tab is open. ' +
'Close all tabs that access Firestore and reload this page to proceed.'));
};
request.onerror = (event) => {
const error = event.target.error;
if (error.name === 'VersionError') {
reject(new FirestoreError(Code.FAILED_PRECONDITION, 'A newer version of the Firestore SDK was previously used and so the persisted ' +
'data is not compatible with the version of the SDK you are now using. The SDK ' +
'will operate with persistence disabled. If you need persistence, please ' +
're-upgrade to a newer version of the SDK or else clear the persisted IndexedDB ' +
'data for your app to start fresh.'));
}
else if (error.name === 'InvalidStateError') {
reject(new FirestoreError(Code.FAILED_PRECONDITION, 'Unable to open an IndexedDB connection. This could be due to running in a ' +
'private browsing session on a browser whose private browsing sessions do not ' +
'support IndexedDB: ' +
error));
}
else {
reject(new IndexedDbTransactionError(action, error));
}
};
request.onupgradeneeded = (event) => {
logDebug(LOG_TAG$i, 'Database "' + this.name + '" requires upgrade from version:', event.oldVersion);
const db = event.target.result;
this.schemaConverter
.createOrUpgrade(db, request.transaction, event.oldVersion, this.version)
.next(() => {
logDebug(LOG_TAG$i, 'Database upgrade to version ' + this.version + ' complete');
});
};
});
}
if (this.versionchangelistener) {
this.db.onversionchange = event => this.versionchangelistener(event);
}
return this.db;
}
setVersionChangeListener(versionChangeListener) {
this.versionchangelistener = versionChangeListener;
if (this.db) {
this.db.onversionchange = (event) => {
return versionChangeListener(event);
};
}
}
async runTransaction(action, mode, objectStores, transactionFn) {
const readonly = mode === 'readonly';
let attemptNumber = 0;
while (true) {
++attemptNumber;
try {
this.db = await this.ensureDb(action);
const transaction = SimpleDbTransaction.open(this.db, action, readonly ? 'readonly' : 'readwrite', objectStores);
const transactionFnResult = transactionFn(transaction)
.next(result => {
transaction.maybeCommit();
return result;
})
.catch(error => {
// Abort the transaction if there was an error.
transaction.abort(error);
// We cannot actually recover, and calling `abort()` will cause the transaction's
// completion promise to be rejected. This in turn means that we won't use
// `transactionFnResult` below. We return a rejection here so that we don't add the
// possibility of returning `void` to the type of `transactionFnResult`.
return PersistencePromise.reject(error);
})
.toPromise();
// As noted above, errors are propagated by aborting the transaction. So
// we swallow any error here to avoid the browser logging it as unhandled.
transactionFnResult.catch(() => { });
// Wait for the transaction to complete (i.e. IndexedDb's onsuccess event to
// fire), but still return the original transactionFnResult back to the
// caller.
await transaction.completionPromise;
return transactionFnResult;
}
catch (e) {
const error = e;
// TODO(schmidt-sebastian): We could probably be smarter about this and
// not retry exceptions that are likely unrecoverable (such as quota
// exceeded errors).
// Note: We cannot use an instanceof check for FirestoreException, since the
// exception is wrapped in a generic error by our async/await handling.
const retryable = error.name !== 'FirebaseError' &&
attemptNumber < TRANSACTION_RETRY_COUNT;
logDebug(LOG_TAG$i, 'Transaction failed with error:', error.message, 'Retrying:', retryable);
this.close();
if (!retryable) {
return Promise.reject(error);
}
}
}
}
close() {
if (this.db) {
this.db.close();
}
this.db = undefined;
}
}
/**
* A controller for iterating over a key range or index. It allows an iterate
* callback to delete the currently-referenced object, or jump to a new key
* within the key range or index.
*/
class IterationController {
constructor(dbCursor) {
this.dbCursor = dbCursor;
this.shouldStop = false;
this.nextKey = null;
}
get isDone() {
return this.shouldStop;
}
get skipToKey() {
return this.nextKey;
}
set cursor(value) {
this.dbCursor = value;
}
/**
* This function can be called to stop iteration at any point.
*/
done() {
this.shouldStop = true;
}
/**
* This function can be called to skip to that next key, which could be
* an index or a primary key.
*/
skip(key) {
this.nextKey = key;
}
/**
* Delete the current cursor value from the object store.
*
* NOTE: You CANNOT do this with a keysOnly query.
*/
delete() {
return wrapRequest(this.dbCursor.delete());
}
}
/** An error that wraps exceptions that thrown during IndexedDB execution. */
class IndexedDbTransactionError extends FirestoreError {
constructor(actionName, cause) {
super(Code.UNAVAILABLE, `IndexedDB transaction '${actionName}' failed: ${cause}`);
this.name = 'IndexedDbTransactionError';
}
}
/** Verifies whether `e` is an IndexedDbTransactionError. */
function isIndexedDbTransactionError(e) {
// Use name equality, as instanceof checks on errors don't work with errors
// that wrap other errors.
return e.name === 'IndexedDbTransactionError';
}
/**
* A wrapper around an IDBObjectStore providing an API that:
*
* 1) Has generic KeyType / ValueType parameters to provide strongly-typed
* methods for acting against the object store.
* 2) Deals with IndexedDB's onsuccess / onerror event callbacks, making every
* method return a PersistencePromise instead.
* 3) Provides a higher-level API to avoid needing to do excessive wrapping of
* intermediate IndexedDB types (IDBCursorWithValue, etc.)
*/
class SimpleDbStore {
constructor(store) {
this.store = store;
}
put(keyOrValue, value) {
let request;
if (value !== undefined) {
logDebug(LOG_TAG$i, 'PUT', this.store.name, keyOrValue, value);
request = this.store.put(value, keyOrValue);
}
else {
logDebug(LOG_TAG$i, 'PUT', this.store.name, '<auto-key>', keyOrValue);
request = this.store.put(keyOrValue);
}
return wrapRequest(request);
}
/**
* Adds a new value into an Object Store and returns the new key. Similar to
* IndexedDb's `add()`, this method will fail on primary key collisions.
*
* @param value - The object to write.
* @returns The key of the value to add.
*/
add(value) {
logDebug(LOG_TAG$i, 'ADD', this.store.name, value, value);
const request = this.store.add(value);
return wrapRequest(request);
}
/**
* Gets the object with the specified key from the specified store, or null
* if no object exists with the specified key.
*
* @key The key of the object to get.
* @returns The object with the specified key or null if no object exists.
*/
get(key) {
const request = this.store.get(key);
// We're doing an unsafe cast to ValueType.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return wrapRequest(request).next(result => {
// Normalize nonexistence to null.
if (result === undefined) {
result = null;
}
logDebug(LOG_TAG$i, 'GET', this.store.name, key, result);
return result;
});
}
delete(key) {
logDebug(LOG_TAG$i, 'DELETE', this.store.name, key);
const request = this.store.delete(key);
return wrapRequest(request);
}
/**
* If we ever need more of the count variants, we can add overloads. For now,
* all we need is to count everything in a store.
*
* Returns the number of rows in the store.
*/
count() {
logDebug(LOG_TAG$i, 'COUNT', this.store.name);
const request = this.store.count();
return wrapRequest(request);
}
loadAll(indexOrRange, range) {
const iterateOptions = this.options(indexOrRange, range);
// Use `getAll()` if the browser supports IndexedDB v3, as it is roughly
// 20% faster.
const store = iterateOptions.index
? this.store.index(iterateOptions.index)
: this.store;
if (typeof store.getAll === 'function') {
const request = store.getAll(iterateOptions.range);
return new PersistencePromise((resolve, reject) => {
request.onerror = (event) => {
reject(event.target.error);
};
request.onsuccess = (event) => {
resolve(event.target.result);
};
});
}
else {
const cursor = this.cursor(iterateOptions);
const results = [];
return this.iterateCursor(cursor, (key, value) => {
results.push(value);
}).next(() => {
return results;
});
}
}
/**
* Loads the first `count` elements from the provided index range. Loads all
* elements if no limit is provided.
*/
loadFirst(range, count) {
const request = this.store.getAll(range, count === null ? undefined : count);
return new PersistencePromise((resolve, reject) => {
request.onerror = (event) => {
reject(event.target.error);
};
request.onsuccess = (event) => {
resolve(event.target.result);
};
});
}
deleteAll(indexOrRange, range) {
logDebug(LOG_TAG$i, 'DELETE ALL', this.store.name);
const options = this.options(indexOrRange, range);
options.keysOnly = false;
const cursor = this.cursor(options);
return this.iterateCursor(cursor, (key, value, control) => {
// NOTE: Calling delete() on a cursor is documented as more efficient than
// calling delete() on an object store with a single key
// (https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/delete),
// however, this requires us *not* to use a keysOnly cursor
// (https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor/delete). We
// may want to compare the performance of each method.
return control.delete();
});
}
iterate(optionsOrCallback, callback) {
let options;
if (!callback) {
options = {};
callback = optionsOrCallback;
}
else {
options = optionsOrCallback;
}
const cursor = this.cursor(options);
return this.iterateCursor(cursor, callback);
}
/**
* Iterates over a store, but waits for the given callback to complete for
* each entry before iterating the next entry. This allows the callback to do
* asynchronous work to determine if this iteration should continue.
*
* The provided callback should return `true` to continue iteration, and
* `false` otherwise.
*/
iterateSerial(callback) {
const cursorRequest = this.cursor({});
return new PersistencePromise((resolve, reject) => {
cursorRequest.onerror = (event) => {
const error = checkForAndReportiOSError(event.target.error);
reject(error);
};
cursorRequest.onsuccess = (event) => {
const cursor = event.target.result;
if (!cursor) {
resolve();
return;
}
callback(cursor.primaryKey, cursor.value).next(shouldContinue => {
if (shouldContinue) {
cursor.continue();
}
else {
resolve();
}
});
};
});
}
iterateCursor(cursorRequest, fn) {
const results = [];
return new PersistencePromise((resolve, reject) => {
cursorRequest.onerror = (event) => {
reject(event.target.error);
};
cursorRequest.onsuccess = (event) => {
const cursor = event.target.result;
if (!cursor) {
resolve();
return;
}
const controller = new IterationController(cursor);
const userResult = fn(cursor.primaryKey, cursor.value, controller);
if (userResult instanceof PersistencePromise) {
const userPromise = userResult.catch(err => {
controller.done();
return PersistencePromise.reject(err);
});
results.push(userPromise);
}
if (controller.isDone) {
resolve();
}
else if (controller.skipToKey === null) {
cursor.continue();
}
else {
cursor.continue(controller.skipToKey);
}
};
}).next(() => PersistencePromise.waitFor(results));
}
options(indexOrRange, range) {
let indexName = undefined;
if (indexOrRange !== undefined) {
if (typeof indexOrRange === 'string') {
indexName = indexOrRange;
}
else {
range = indexOrRange;
}
}
return { index: indexName, range };
}
cursor(options) {
let direction = 'next';
if (options.reverse) {
direction = 'prev';
}
if (options.index) {
const index = this.store.index(options.index);
if (options.keysOnly) {
return index.openKeyCursor(options.range, direction);
}
else {
return index.openCursor(options.range, direction);
}
}
else {
return this.store.openCursor(options.range, direction);
}
}
}
/**
* Wraps an IDBRequest in a PersistencePromise, using the onsuccess / onerror
* handlers to resolve / reject the PersistencePromise as appropriate.
*/
function wrapRequest(request) {
return new PersistencePromise((resolve, reject) => {
request.onsuccess = (event) => {
const result = event.target.result;
resolve(result);
};
request.onerror = (event) => {
const error = checkForAndReportiOSError(event.target.error);
reject(error);
};
});
}
// Guard so we only report the error once.
let reportedIOSError = false;
function checkForAndReportiOSError(error) {
const iOSVersion = SimpleDb.getIOSVersion(getUA());
if (iOSVersion >= 12.2 && iOSVersion < 13) {
const IOS_ERROR = 'An internal error was encountered in the Indexed Database server';
if (error.message.indexOf(IOS_ERROR) >= 0) {
// Wrap error in a more descriptive one.
const newError = new FirestoreError('internal', `IOS_INDEXEDDB_BUG1: IndexedDb has thrown '${IOS_ERROR}'. This is likely ` +
`due to an unavoidable bug in iOS. See https://stackoverflow.com/q/56496296/110915 ` +
`for details and a potential workaround.`);
if (!reportedIOSError) {
reportedIOSError = true;
// Throw a global exception outside of this promise chain, for the user to
// potentially catch.
setTimeout(() => {
throw newError;
}, 0);
}
return newError;
}
}
return error;
}
const LOG_TAG$h = 'IndexBackfiller';
/** How long we wait to try running index backfill after SDK initialization. */
const INITIAL_BACKFILL_DELAY_MS = 15 * 1000;
/** Minimum amount of time between backfill checks, after the first one. */
const REGULAR_BACKFILL_DELAY_MS = 60 * 1000;
/** The maximum number of documents to process each time backfill() is called. */
const MAX_DOCUMENTS_TO_PROCESS = 50;
/** This class is responsible for the scheduling of Index Backfiller. */
class IndexBackfillerScheduler {
constructor(asyncQueue, backfiller) {
this.asyncQueue = asyncQueue;
this.backfiller = backfiller;
this.task = null;
}
start() {
this.schedule(INITIAL_BACKFILL_DELAY_MS);
}
stop() {
if (this.task) {
this.task.cancel();
this.task = null;
}
}
get started() {
return this.task !== null;
}
schedule(delay) {
logDebug(LOG_TAG$h, `Scheduled in ${delay}ms`);
this.task = this.asyncQueue.enqueueAfterDelay("index_backfill" /* TimerId.IndexBackfill */, delay, async () => {
this.task = null;
try {
const documentsProcessed = await this.backfiller.backfill();
logDebug(LOG_TAG$h, `Documents written: ${documentsProcessed}`);
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG$h, 'Ignoring IndexedDB error during index backfill: ', e);
}
else {
await ignoreIfPrimaryLeaseLoss(e);
}
}
await this.schedule(REGULAR_BACKFILL_DELAY_MS);
});
}
}
/** Implements the steps for backfilling indexes. */
class IndexBackfiller {
constructor(
/**
* LocalStore provides access to IndexManager and LocalDocumentView.
* These properties will update when the user changes. Consequently,
* making a local copy of IndexManager and LocalDocumentView will require
* updates over time. The simpler solution is to rely on LocalStore to have
* an up-to-date references to IndexManager and LocalDocumentStore.
*/
localStore, persistence) {
this.localStore = localStore;
this.persistence = persistence;
}
async backfill(maxDocumentsToProcess = MAX_DOCUMENTS_TO_PROCESS) {
return this.persistence.runTransaction('Backfill Indexes', 'readwrite-primary', txn => this.writeIndexEntries(txn, maxDocumentsToProcess));
}
/** Writes index entries until the cap is reached. Returns the number of documents processed. */
writeIndexEntries(transation, maxDocumentsToProcess) {
const processedCollectionGroups = new Set();
let documentsRemaining = maxDocumentsToProcess;
let continueLoop = true;
return PersistencePromise.doWhile(() => continueLoop === true && documentsRemaining > 0, () => {
return this.localStore.indexManager
.getNextCollectionGroupToUpdate(transation)
.next((collectionGroup) => {
if (collectionGroup === null ||
processedCollectionGroups.has(collectionGroup)) {
continueLoop = false;
}
else {
logDebug(LOG_TAG$h, `Processing collection: ${collectionGroup}`);
return this.writeEntriesForCollectionGroup(transation, collectionGroup, documentsRemaining).next(documentsProcessed => {
documentsRemaining -= documentsProcessed;
processedCollectionGroups.add(collectionGroup);
});
}
});
}).next(() => maxDocumentsToProcess - documentsRemaining);
}
/**
* Writes entries for the provided collection group. Returns the number of documents processed.
*/
writeEntriesForCollectionGroup(transaction, collectionGroup, documentsRemainingUnderCap) {
// Use the earliest offset of all field indexes to query the local cache.
return this.localStore.indexManager
.getMinOffsetFromCollectionGroup(transaction, collectionGroup)
.next(existingOffset => this.localStore.localDocuments
.getNextDocuments(transaction, collectionGroup, existingOffset, documentsRemainingUnderCap)
.next(nextBatch => {
const docs = nextBatch.changes;
return this.localStore.indexManager
.updateIndexEntries(transaction, docs)
.next(() => this.getNewOffset(existingOffset, nextBatch))
.next(newOffset => {
logDebug(LOG_TAG$h, `Updating offset: ${newOffset}`);
return this.localStore.indexManager.updateCollectionGroup(transaction, collectionGroup, newOffset);
})
.next(() => docs.size);
}));
}
/** Returns the next offset based on the provided documents. */
getNewOffset(existingOffset, lookupResult) {
let maxOffset = existingOffset;
lookupResult.changes.forEach((key, document) => {
const newOffset = newIndexOffsetFromDocument(document);
if (indexOffsetComparator(newOffset, maxOffset) > 0) {
maxOffset = newOffset;
}
});
return new IndexOffset(maxOffset.readTime, maxOffset.documentKey, Math.max(lookupResult.batchId, existingOffset.largestBatchId));
}
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* `ListenSequence` is a monotonic sequence. It is initialized with a minimum value to
* exceed. All subsequent calls to next will return increasing values. If provided with a
* `SequenceNumberSyncer`, it will additionally bump its next value when told of a new value, as
* well as write out sequence numbers that it produces via `next()`.
*/
class ListenSequence {
constructor(previousValue, sequenceNumberSyncer) {
this.previousValue = previousValue;
if (sequenceNumberSyncer) {
sequenceNumberSyncer.sequenceNumberHandler = sequenceNumber => this.setPreviousValue(sequenceNumber);
this.writeNewSequenceNumber = sequenceNumber => sequenceNumberSyncer.writeSequenceNumber(sequenceNumber);
}
}
setPreviousValue(externalPreviousValue) {
this.previousValue = Math.max(externalPreviousValue, this.previousValue);
return this.previousValue;
}
next() {
const nextValue = ++this.previousValue;
if (this.writeNewSequenceNumber) {
this.writeNewSequenceNumber(nextValue);
}
return nextValue;
}
}
ListenSequence.INVALID = -1;
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const escapeChar = '\u0001';
const encodedSeparatorChar = '\u0001';
const encodedNul = '\u0010';
const encodedEscape = '\u0011';
/**
* Encodes a resource path into a IndexedDb-compatible string form.
*/
function encodeResourcePath(path) {
let result = '';
for (let i = 0; i < path.length; i++) {
if (result.length > 0) {
result = encodeSeparator(result);
}
result = encodeSegment(path.get(i), result);
}
return encodeSeparator(result);
}
/** Encodes a single segment of a resource path into the given result */
function encodeSegment(segment, resultBuf) {
let result = resultBuf;
const length = segment.length;
for (let i = 0; i < length; i++) {
const c = segment.charAt(i);
switch (c) {
case '\0':
result += escapeChar + encodedNul;
break;
case escapeChar:
result += escapeChar + encodedEscape;
break;
default:
result += c;
}
}
return result;
}
/** Encodes a path separator into the given result */
function encodeSeparator(result) {
return result + escapeChar + encodedSeparatorChar;
}
/**
* Decodes the given IndexedDb-compatible string form of a resource path into
* a ResourcePath instance. Note that this method is not suitable for use with
* decoding resource names from the server; those are One Platform format
* strings.
*/
function decodeResourcePath(path) {
// Event the empty path must encode as a path of at least length 2. A path
// with exactly 2 must be the empty path.
const length = path.length;
hardAssert(length >= 2);
if (length === 2) {
hardAssert(path.charAt(0) === escapeChar && path.charAt(1) === encodedSeparatorChar);
return ResourcePath.emptyPath();
}
// Escape characters cannot exist past the second-to-last position in the
// source value.
const lastReasonableEscapeIndex = length - 2;
const segments = [];
let segmentBuilder = '';
for (let start = 0; start < length;) {
// The last two characters of a valid encoded path must be a separator, so
// there must be an end to this segment.
const end = path.indexOf(escapeChar, start);
if (end < 0 || end > lastReasonableEscapeIndex) {
fail();
}
const next = path.charAt(end + 1);
switch (next) {
case encodedSeparatorChar:
const currentPiece = path.substring(start, end);
let segment;
if (segmentBuilder.length === 0) {
// Avoid copying for the common case of a segment that excludes \0
// and \001
segment = currentPiece;
}
else {
segmentBuilder += currentPiece;
segment = segmentBuilder;
segmentBuilder = '';
}
segments.push(segment);
break;
case encodedNul:
segmentBuilder += path.substring(start, end);
segmentBuilder += '\0';
break;
case encodedEscape:
// The escape character can be used in the output to encode itself.
segmentBuilder += path.substring(start, end + 1);
break;
default:
fail();
}
start = end + 2;
}
return new ResourcePath(segments);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const DbRemoteDocumentStore$1 = 'remoteDocuments';
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Name of the IndexedDb object store.
*
* Note that the name 'owner' is chosen to ensure backwards compatibility with
* older clients that only supported single locked access to the persistence
* layer.
*/
const DbPrimaryClientStore = 'owner';
/**
* The key string used for the single object that exists in the
* DbPrimaryClient store.
*/
const DbPrimaryClientKey = 'owner';
/** Name of the IndexedDb object store. */
const DbMutationQueueStore = 'mutationQueues';
/** Keys are automatically assigned via the userId property. */
const DbMutationQueueKeyPath = 'userId';
/** Name of the IndexedDb object store. */
const DbMutationBatchStore = 'mutations';
/** Keys are automatically assigned via the userId, batchId properties. */
const DbMutationBatchKeyPath = 'batchId';
/** The index name for lookup of mutations by user. */
const DbMutationBatchUserMutationsIndex = 'userMutationsIndex';
/** The user mutations index is keyed by [userId, batchId] pairs. */
const DbMutationBatchUserMutationsKeyPath = ['userId', 'batchId'];
/**
* Creates a [userId] key for use in the DbDocumentMutations index to iterate
* over all of a user's document mutations.
*/
function newDbDocumentMutationPrefixForUser(userId) {
return [userId];
}
/**
* Creates a [userId, encodedPath] key for use in the DbDocumentMutations
* index to iterate over all at document mutations for a given path or lower.
*/
function newDbDocumentMutationPrefixForPath(userId, path) {
return [userId, encodeResourcePath(path)];
}
/**
* Creates a full index key of [userId, encodedPath, batchId] for inserting
* and deleting into the DbDocumentMutations index.
*/
function newDbDocumentMutationKey(userId, path, batchId) {
return [userId, encodeResourcePath(path), batchId];
}
/**
* Because we store all the useful information for this store in the key,
* there is no useful information to store as the value. The raw (unencoded)
* path cannot be stored because IndexedDb doesn't store prototype
* information.
*/
const DbDocumentMutationPlaceholder = {};
const DbDocumentMutationStore = 'documentMutations';
const DbRemoteDocumentStore = 'remoteDocumentsV14';
/**
* The primary key of the remote documents store, which allows for efficient
* access by collection path and read time.
*/
const DbRemoteDocumentKeyPath = [
'prefixPath',
'collectionGroup',
'readTime',
'documentId'
];
/** An index that provides access to documents by key. */
const DbRemoteDocumentDocumentKeyIndex = 'documentKeyIndex';
const DbRemoteDocumentDocumentKeyIndexPath = [
'prefixPath',
'collectionGroup',
'documentId'
];
/**
* An index that provides access to documents by collection group and read
* time.
*
* This index is used by the index backfiller.
*/
const DbRemoteDocumentCollectionGroupIndex = 'collectionGroupIndex';
const DbRemoteDocumentCollectionGroupIndexPath = [
'collectionGroup',
'readTime',
'prefixPath',
'documentId'
];
const DbRemoteDocumentGlobalStore = 'remoteDocumentGlobal';
const DbRemoteDocumentGlobalKey = 'remoteDocumentGlobalKey';
const DbTargetStore = 'targets';
/** Keys are automatically assigned via the targetId property. */
const DbTargetKeyPath = 'targetId';
/** The name of the queryTargets index. */
const DbTargetQueryTargetsIndexName = 'queryTargetsIndex';
/**
* The index of all canonicalIds to the targets that they match. This is not
* a unique mapping because canonicalId does not promise a unique name for all
* possible queries, so we append the targetId to make the mapping unique.
*/
const DbTargetQueryTargetsKeyPath = ['canonicalId', 'targetId'];
/** Name of the IndexedDb object store. */
const DbTargetDocumentStore = 'targetDocuments';
/** Keys are automatically assigned via the targetId, path properties. */
const DbTargetDocumentKeyPath = ['targetId', 'path'];
/** The index name for the reverse index. */
const DbTargetDocumentDocumentTargetsIndex = 'documentTargetsIndex';
/** We also need to create the reverse index for these properties. */
const DbTargetDocumentDocumentTargetsKeyPath = ['path', 'targetId'];
/**
* The key string used for the single object that exists in the
* DbTargetGlobal store.
*/
const DbTargetGlobalKey = 'targetGlobalKey';
const DbTargetGlobalStore = 'targetGlobal';
/** Name of the IndexedDb object store. */
const DbCollectionParentStore = 'collectionParents';
/** Keys are automatically assigned via the collectionId, parent properties. */
const DbCollectionParentKeyPath = ['collectionId', 'parent'];
/** Name of the IndexedDb object store. */
const DbClientMetadataStore = 'clientMetadata';
/** Keys are automatically assigned via the clientId properties. */
const DbClientMetadataKeyPath = 'clientId';
/** Name of the IndexedDb object store. */
const DbBundleStore = 'bundles';
const DbBundleKeyPath = 'bundleId';
/** Name of the IndexedDb object store. */
const DbNamedQueryStore = 'namedQueries';
const DbNamedQueryKeyPath = 'name';
/** Name of the IndexedDb object store. */
const DbIndexConfigurationStore = 'indexConfiguration';
const DbIndexConfigurationKeyPath = 'indexId';
/**
* An index that provides access to the index configurations by collection
* group.
*
* PORTING NOTE: iOS and Android maintain this index in-memory, but this is
* not possible here as the Web client supports concurrent access to
* persistence via multi-tab.
*/
const DbIndexConfigurationCollectionGroupIndex = 'collectionGroupIndex';
const DbIndexConfigurationCollectionGroupIndexPath = 'collectionGroup';
/** Name of the IndexedDb object store. */
const DbIndexStateStore = 'indexState';
const DbIndexStateKeyPath = ['indexId', 'uid'];
/**
* An index that provides access to documents in a collection sorted by last
* update time. Used by the backfiller.
*
* PORTING NOTE: iOS and Android maintain this index in-memory, but this is
* not possible here as the Web client supports concurrent access to
* persistence via multi-tab.
*/
const DbIndexStateSequenceNumberIndex = 'sequenceNumberIndex';
const DbIndexStateSequenceNumberIndexPath = ['uid', 'sequenceNumber'];
/** Name of the IndexedDb object store. */
const DbIndexEntryStore = 'indexEntries';
const DbIndexEntryKeyPath = [
'indexId',
'uid',
'arrayValue',
'directionalValue',
'orderedDocumentKey',
'documentKey'
];
const DbIndexEntryDocumentKeyIndex = 'documentKeyIndex';
const DbIndexEntryDocumentKeyIndexPath = [
'indexId',
'uid',
'orderedDocumentKey'
];
/** Name of the IndexedDb object store. */
const DbDocumentOverlayStore = 'documentOverlays';
const DbDocumentOverlayKeyPath = [
'userId',
'collectionPath',
'documentId'
];
const DbDocumentOverlayCollectionPathOverlayIndex = 'collectionPathOverlayIndex';
const DbDocumentOverlayCollectionPathOverlayIndexPath = [
'userId',
'collectionPath',
'largestBatchId'
];
const DbDocumentOverlayCollectionGroupOverlayIndex = 'collectionGroupOverlayIndex';
const DbDocumentOverlayCollectionGroupOverlayIndexPath = [
'userId',
'collectionGroup',
'largestBatchId'
];
// Visible for testing
const V1_STORES = [
DbMutationQueueStore,
DbMutationBatchStore,
DbDocumentMutationStore,
DbRemoteDocumentStore$1,
DbTargetStore,
DbPrimaryClientStore,
DbTargetGlobalStore,
DbTargetDocumentStore
];
// Visible for testing
const V3_STORES = V1_STORES;
// Note: DbRemoteDocumentChanges is no longer used and dropped with v9.
const V4_STORES = [...V3_STORES, DbClientMetadataStore];
const V6_STORES = [...V4_STORES, DbRemoteDocumentGlobalStore];
const V8_STORES = [...V6_STORES, DbCollectionParentStore];
const V11_STORES = [...V8_STORES, DbBundleStore, DbNamedQueryStore];
const V12_STORES = [...V11_STORES, DbDocumentOverlayStore];
const V13_STORES = [
DbMutationQueueStore,
DbMutationBatchStore,
DbDocumentMutationStore,
DbRemoteDocumentStore,
DbTargetStore,
DbPrimaryClientStore,
DbTargetGlobalStore,
DbTargetDocumentStore,
DbClientMetadataStore,
DbRemoteDocumentGlobalStore,
DbCollectionParentStore,
DbBundleStore,
DbNamedQueryStore,
DbDocumentOverlayStore
];
const V14_STORES = V13_STORES;
const V15_STORES = [
...V14_STORES,
DbIndexConfigurationStore,
DbIndexStateStore,
DbIndexEntryStore
];
/** Returns the object stores for the provided schema. */
function getObjectStores(schemaVersion) {
if (schemaVersion === 15) {
return V15_STORES;
}
else if (schemaVersion === 14) {
return V14_STORES;
}
else if (schemaVersion === 13) {
return V13_STORES;
}
else if (schemaVersion === 12) {
return V12_STORES;
}
else if (schemaVersion === 11) {
return V11_STORES;
}
else {
fail();
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class IndexedDbTransaction extends PersistenceTransaction {
constructor(simpleDbTransaction, currentSequenceNumber) {
super();
this.simpleDbTransaction = simpleDbTransaction;
this.currentSequenceNumber = currentSequenceNumber;
}
}
function getStore(txn, store) {
const indexedDbTransaction = debugCast(txn);
return SimpleDb.getStore(indexedDbTransaction.simpleDbTransaction, store);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function objectSize(obj) {
let count = 0;
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
count++;
}
}
return count;
}
function forEach(obj, fn) {
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
fn(key, obj[key]);
}
}
}
function mapToArray(obj, fn) {
const result = [];
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
result.push(fn(obj[key], key, obj));
}
}
return result;
}
function isEmpty(obj) {
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
return false;
}
}
return true;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// An immutable sorted map implementation, based on a Left-leaning Red-Black
// tree.
class SortedMap {
constructor(comparator, root) {
this.comparator = comparator;
this.root = root ? root : LLRBNode.EMPTY;
}
// Returns a copy of the map, with the specified key/value added or replaced.
insert(key, value) {
return new SortedMap(this.comparator, this.root
.insert(key, value, this.comparator)
.copy(null, null, LLRBNode.BLACK, null, null));
}
// Returns a copy of the map, with the specified key removed.
remove(key) {
return new SortedMap(this.comparator, this.root
.remove(key, this.comparator)
.copy(null, null, LLRBNode.BLACK, null, null));
}
// Returns the value of the node with the given key, or null.
get(key) {
let node = this.root;
while (!node.isEmpty()) {
const cmp = this.comparator(key, node.key);
if (cmp === 0) {
return node.value;
}
else if (cmp < 0) {
node = node.left;
}
else if (cmp > 0) {
node = node.right;
}
}
return null;
}
// Returns the index of the element in this sorted map, or -1 if it doesn't
// exist.
indexOf(key) {
// Number of nodes that were pruned when descending right
let prunedNodes = 0;
let node = this.root;
while (!node.isEmpty()) {
const cmp = this.comparator(key, node.key);
if (cmp === 0) {
return prunedNodes + node.left.size;
}
else if (cmp < 0) {
node = node.left;
}
else {
// Count all nodes left of the node plus the node itself
prunedNodes += node.left.size + 1;
node = node.right;
}
}
// Node not found
return -1;
}
isEmpty() {
return this.root.isEmpty();
}
// Returns the total number of nodes in the map.
get size() {
return this.root.size;
}
// Returns the minimum key in the map.
minKey() {
return this.root.minKey();
}
// Returns the maximum key in the map.
maxKey() {
return this.root.maxKey();
}
// Traverses the map in key order and calls the specified action function
// for each key/value pair. If action returns true, traversal is aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
inorderTraversal(action) {
return this.root.inorderTraversal(action);
}
forEach(fn) {
this.inorderTraversal((k, v) => {
fn(k, v);
return false;
});
}
toString() {
const descriptions = [];
this.inorderTraversal((k, v) => {
descriptions.push(`${k}:${v}`);
return false;
});
return `{${descriptions.join(', ')}}`;
}
// Traverses the map in reverse key order and calls the specified action
// function for each key/value pair. If action returns true, traversal is
// aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
reverseTraversal(action) {
return this.root.reverseTraversal(action);
}
// Returns an iterator over the SortedMap.
getIterator() {
return new SortedMapIterator(this.root, null, this.comparator, false);
}
getIteratorFrom(key) {
return new SortedMapIterator(this.root, key, this.comparator, false);
}
getReverseIterator() {
return new SortedMapIterator(this.root, null, this.comparator, true);
}
getReverseIteratorFrom(key) {
return new SortedMapIterator(this.root, key, this.comparator, true);
}
} // end SortedMap
// An iterator over an LLRBNode.
class SortedMapIterator {
constructor(node, startKey, comparator, isReverse) {
this.isReverse = isReverse;
this.nodeStack = [];
let cmp = 1;
while (!node.isEmpty()) {
cmp = startKey ? comparator(node.key, startKey) : 1;
// flip the comparison if we're going in reverse
if (startKey && isReverse) {
cmp *= -1;
}
if (cmp < 0) {
// This node is less than our start key. ignore it
if (this.isReverse) {
node = node.left;
}
else {
node = node.right;
}
}
else if (cmp === 0) {
// This node is exactly equal to our start key. Push it on the stack,
// but stop iterating;
this.nodeStack.push(node);
break;
}
else {
// This node is greater than our start key, add it to the stack and move
// to the next one
this.nodeStack.push(node);
if (this.isReverse) {
node = node.right;
}
else {
node = node.left;
}
}
}
}
getNext() {
let node = this.nodeStack.pop();
const result = { key: node.key, value: node.value };
if (this.isReverse) {
node = node.left;
while (!node.isEmpty()) {
this.nodeStack.push(node);
node = node.right;
}
}
else {
node = node.right;
while (!node.isEmpty()) {
this.nodeStack.push(node);
node = node.left;
}
}
return result;
}
hasNext() {
return this.nodeStack.length > 0;
}
peek() {
if (this.nodeStack.length === 0) {
return null;
}
const node = this.nodeStack[this.nodeStack.length - 1];
return { key: node.key, value: node.value };
}
} // end SortedMapIterator
// Represents a node in a Left-leaning Red-Black tree.
class LLRBNode {
constructor(key, value, color, left, right) {
this.key = key;
this.value = value;
this.color = color != null ? color : LLRBNode.RED;
this.left = left != null ? left : LLRBNode.EMPTY;
this.right = right != null ? right : LLRBNode.EMPTY;
this.size = this.left.size + 1 + this.right.size;
}
// Returns a copy of the current node, optionally replacing pieces of it.
copy(key, value, color, left, right) {
return new LLRBNode(key != null ? key : this.key, value != null ? value : this.value, color != null ? color : this.color, left != null ? left : this.left, right != null ? right : this.right);
}
isEmpty() {
return false;
}
// Traverses the tree in key order and calls the specified action function
// for each node. If action returns true, traversal is aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
inorderTraversal(action) {
return (this.left.inorderTraversal(action) ||
action(this.key, this.value) ||
this.right.inorderTraversal(action));
}
// Traverses the tree in reverse key order and calls the specified action
// function for each node. If action returns true, traversal is aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
reverseTraversal(action) {
return (this.right.reverseTraversal(action) ||
action(this.key, this.value) ||
this.left.reverseTraversal(action));
}
// Returns the minimum node in the tree.
min() {
if (this.left.isEmpty()) {
return this;
}
else {
return this.left.min();
}
}
// Returns the maximum key in the tree.
minKey() {
return this.min().key;
}
// Returns the maximum key in the tree.
maxKey() {
if (this.right.isEmpty()) {
return this.key;
}
else {
return this.right.maxKey();
}
}
// Returns new tree, with the key/value added.
insert(key, value, comparator) {
let n = this;
const cmp = comparator(key, n.key);
if (cmp < 0) {
n = n.copy(null, null, null, n.left.insert(key, value, comparator), null);
}
else if (cmp === 0) {
n = n.copy(null, value, null, null, null);
}
else {
n = n.copy(null, null, null, null, n.right.insert(key, value, comparator));
}
return n.fixUp();
}
removeMin() {
if (this.left.isEmpty()) {
return LLRBNode.EMPTY;
}
let n = this;
if (!n.left.isRed() && !n.left.left.isRed()) {
n = n.moveRedLeft();
}
n = n.copy(null, null, null, n.left.removeMin(), null);
return n.fixUp();
}
// Returns new tree, with the specified item removed.
remove(key, comparator) {
let smallest;
let n = this;
if (comparator(key, n.key) < 0) {
if (!n.left.isEmpty() && !n.left.isRed() && !n.left.left.isRed()) {
n = n.moveRedLeft();
}
n = n.copy(null, null, null, n.left.remove(key, comparator), null);
}
else {
if (n.left.isRed()) {
n = n.rotateRight();
}
if (!n.right.isEmpty() && !n.right.isRed() && !n.right.left.isRed()) {
n = n.moveRedRight();
}
if (comparator(key, n.key) === 0) {
if (n.right.isEmpty()) {
return LLRBNode.EMPTY;
}
else {
smallest = n.right.min();
n = n.copy(smallest.key, smallest.value, null, null, n.right.removeMin());
}
}
n = n.copy(null, null, null, null, n.right.remove(key, comparator));
}
return n.fixUp();
}
isRed() {
return this.color;
}
// Returns new tree after performing any needed rotations.
fixUp() {
let n = this;
if (n.right.isRed() && !n.left.isRed()) {
n = n.rotateLeft();
}
if (n.left.isRed() && n.left.left.isRed()) {
n = n.rotateRight();
}
if (n.left.isRed() && n.right.isRed()) {
n = n.colorFlip();
}
return n;
}
moveRedLeft() {
let n = this.colorFlip();
if (n.right.left.isRed()) {
n = n.copy(null, null, null, null, n.right.rotateRight());
n = n.rotateLeft();
n = n.colorFlip();
}
return n;
}
moveRedRight() {
let n = this.colorFlip();
if (n.left.left.isRed()) {
n = n.rotateRight();
n = n.colorFlip();
}
return n;
}
rotateLeft() {
const nl = this.copy(null, null, LLRBNode.RED, null, this.right.left);
return this.right.copy(null, null, this.color, nl, null);
}
rotateRight() {
const nr = this.copy(null, null, LLRBNode.RED, this.left.right, null);
return this.left.copy(null, null, this.color, null, nr);
}
colorFlip() {
const left = this.left.copy(null, null, !this.left.color, null, null);
const right = this.right.copy(null, null, !this.right.color, null, null);
return this.copy(null, null, !this.color, left, right);
}
// For testing.
checkMaxDepth() {
const blackDepth = this.check();
if (Math.pow(2.0, blackDepth) <= this.size + 1) {
return true;
}
else {
return false;
}
}
// In a balanced RB tree, the black-depth (number of black nodes) from root to
// leaves is equal on both sides. This function verifies that or asserts.
check() {
if (this.isRed() && this.left.isRed()) {
throw fail();
}
if (this.right.isRed()) {
throw fail();
}
const blackDepth = this.left.check();
if (blackDepth !== this.right.check()) {
throw fail();
}
else {
return blackDepth + (this.isRed() ? 0 : 1);
}
}
} // end LLRBNode
// Empty node is shared between all LLRB trees.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
LLRBNode.EMPTY = null;
LLRBNode.RED = true;
LLRBNode.BLACK = false;
// Represents an empty node (a leaf node in the Red-Black Tree).
class LLRBEmptyNode {
constructor() {
this.size = 0;
}
get key() {
throw fail();
}
get value() {
throw fail();
}
get color() {
throw fail();
}
get left() {
throw fail();
}
get right() {
throw fail();
}
// Returns a copy of the current node.
copy(key, value, color, left, right) {
return this;
}
// Returns a copy of the tree, with the specified key/value added.
insert(key, value, comparator) {
return new LLRBNode(key, value);
}
// Returns a copy of the tree, with the specified key removed.
remove(key, comparator) {
return this;
}
isEmpty() {
return true;
}
inorderTraversal(action) {
return false;
}
reverseTraversal(action) {
return false;
}
minKey() {
return null;
}
maxKey() {
return null;
}
isRed() {
return false;
}
// For testing.
checkMaxDepth() {
return true;
}
check() {
return 0;
}
} // end LLRBEmptyNode
LLRBNode.EMPTY = new LLRBEmptyNode();
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* SortedSet is an immutable (copy-on-write) collection that holds elements
* in order specified by the provided comparator.
*
* NOTE: if provided comparator returns 0 for two elements, we consider them to
* be equal!
*/
class SortedSet {
constructor(comparator) {
this.comparator = comparator;
this.data = new SortedMap(this.comparator);
}
has(elem) {
return this.data.get(elem) !== null;
}
first() {
return this.data.minKey();
}
last() {
return this.data.maxKey();
}
get size() {
return this.data.size;
}
indexOf(elem) {
return this.data.indexOf(elem);
}
/** Iterates elements in order defined by "comparator" */
forEach(cb) {
this.data.inorderTraversal((k, v) => {
cb(k);
return false;
});
}
/** Iterates over `elem`s such that: range[0] &lt;= elem &lt; range[1]. */
forEachInRange(range, cb) {
const iter = this.data.getIteratorFrom(range[0]);
while (iter.hasNext()) {
const elem = iter.getNext();
if (this.comparator(elem.key, range[1]) >= 0) {
return;
}
cb(elem.key);
}
}
/**
* Iterates over `elem`s such that: start &lt;= elem until false is returned.
*/
forEachWhile(cb, start) {
let iter;
if (start !== undefined) {
iter = this.data.getIteratorFrom(start);
}
else {
iter = this.data.getIterator();
}
while (iter.hasNext()) {
const elem = iter.getNext();
const result = cb(elem.key);
if (!result) {
return;
}
}
}
/** Finds the least element greater than or equal to `elem`. */
firstAfterOrEqual(elem) {
const iter = this.data.getIteratorFrom(elem);
return iter.hasNext() ? iter.getNext().key : null;
}
getIterator() {
return new SortedSetIterator(this.data.getIterator());
}
getIteratorFrom(key) {
return new SortedSetIterator(this.data.getIteratorFrom(key));
}
/** Inserts or updates an element */
add(elem) {
return this.copy(this.data.remove(elem).insert(elem, true));
}
/** Deletes an element */
delete(elem) {
if (!this.has(elem)) {
return this;
}
return this.copy(this.data.remove(elem));
}
isEmpty() {
return this.data.isEmpty();
}
unionWith(other) {
let result = this;
// Make sure `result` always refers to the larger one of the two sets.
if (result.size < other.size) {
result = other;
other = this;
}
other.forEach(elem => {
result = result.add(elem);
});
return result;
}
isEqual(other) {
if (!(other instanceof SortedSet)) {
return false;
}
if (this.size !== other.size) {
return false;
}
const thisIt = this.data.getIterator();
const otherIt = other.data.getIterator();
while (thisIt.hasNext()) {
const thisElem = thisIt.getNext().key;
const otherElem = otherIt.getNext().key;
if (this.comparator(thisElem, otherElem) !== 0) {
return false;
}
}
return true;
}
toArray() {
const res = [];
this.forEach(targetId => {
res.push(targetId);
});
return res;
}
toString() {
const result = [];
this.forEach(elem => result.push(elem));
return 'SortedSet(' + result.toString() + ')';
}
copy(data) {
const result = new SortedSet(this.comparator);
result.data = data;
return result;
}
}
class SortedSetIterator {
constructor(iter) {
this.iter = iter;
}
getNext() {
return this.iter.getNext().key;
}
hasNext() {
return this.iter.hasNext();
}
}
/**
* Compares two sorted sets for equality using their natural ordering. The
* method computes the intersection and invokes `onAdd` for every element that
* is in `after` but not `before`. `onRemove` is invoked for every element in
* `before` but missing from `after`.
*
* The method creates a copy of both `before` and `after` and runs in O(n log
* n), where n is the size of the two lists.
*
* @param before - The elements that exist in the original set.
* @param after - The elements to diff against the original set.
* @param comparator - The comparator for the elements in before and after.
* @param onAdd - A function to invoke for every element that is part of `
* after` but not `before`.
* @param onRemove - A function to invoke for every element that is part of
* `before` but not `after`.
*/
function diffSortedSets(before, after, comparator, onAdd, onRemove) {
const beforeIt = before.getIterator();
const afterIt = after.getIterator();
let beforeValue = advanceIterator(beforeIt);
let afterValue = advanceIterator(afterIt);
// Walk through the two sets at the same time, using the ordering defined by
// `comparator`.
while (beforeValue || afterValue) {
let added = false;
let removed = false;
if (beforeValue && afterValue) {
const cmp = comparator(beforeValue, afterValue);
if (cmp < 0) {
// The element was removed if the next element in our ordered
// walkthrough is only in `before`.
removed = true;
}
else if (cmp > 0) {
// The element was added if the next element in our ordered walkthrough
// is only in `after`.
added = true;
}
}
else if (beforeValue != null) {
removed = true;
}
else {
added = true;
}
if (added) {
onAdd(afterValue);
afterValue = advanceIterator(afterIt);
}
else if (removed) {
onRemove(beforeValue);
beforeValue = advanceIterator(beforeIt);
}
else {
beforeValue = advanceIterator(beforeIt);
afterValue = advanceIterator(afterIt);
}
}
}
/**
* Returns the next element from the iterator or `undefined` if none available.
*/
function advanceIterator(it) {
return it.hasNext() ? it.getNext() : undefined;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a set of fields that can be used to partially patch a document.
* FieldMask is used in conjunction with ObjectValue.
* Examples:
* foo - Overwrites foo entirely with the provided value. If foo is not
* present in the companion ObjectValue, the field is deleted.
* foo.bar - Overwrites only the field bar of the object foo.
* If foo is not an object, foo is replaced with an object
* containing foo
*/
class FieldMask {
constructor(fields) {
this.fields = fields;
// TODO(dimond): validation of FieldMask
// Sort the field mask to support `FieldMask.isEqual()` and assert below.
fields.sort(FieldPath$1.comparator);
}
static empty() {
return new FieldMask([]);
}
/**
* Returns a new FieldMask object that is the result of adding all the given
* fields paths to this field mask.
*/
unionWith(extraFields) {
let mergedMaskSet = new SortedSet(FieldPath$1.comparator);
for (const fieldPath of this.fields) {
mergedMaskSet = mergedMaskSet.add(fieldPath);
}
for (const fieldPath of extraFields) {
mergedMaskSet = mergedMaskSet.add(fieldPath);
}
return new FieldMask(mergedMaskSet.toArray());
}
/**
* Verifies that `fieldPath` is included by at least one field in this field
* mask.
*
* This is an O(n) operation, where `n` is the size of the field mask.
*/
covers(fieldPath) {
for (const fieldMaskPath of this.fields) {
if (fieldMaskPath.isPrefixOf(fieldPath)) {
return true;
}
}
return false;
}
isEqual(other) {
return arrayEquals(this.fields, other.fields, (l, r) => l.isEqual(r));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Converts a Base64 encoded string to a binary string. */
function decodeBase64(encoded) {
// Note: We used to validate the base64 string here via a regular expression.
// This was removed to improve the performance of indexing.
return Buffer.from(encoded, 'base64').toString('binary');
}
/** Converts a binary string to a Base64 encoded string. */
function encodeBase64(raw) {
return Buffer.from(raw, 'binary').toString('base64');
}
/** True if and only if the Base64 conversion functions are available. */
function isBase64Available() {
return true;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Immutable class that represents a "proto" byte string.
*
* Proto byte strings can either be Base64-encoded strings or Uint8Arrays when
* sent on the wire. This class abstracts away this differentiation by holding
* the proto byte string in a common class that must be converted into a string
* before being sent as a proto.
* @internal
*/
class ByteString {
constructor(binaryString) {
this.binaryString = binaryString;
}
static fromBase64String(base64) {
const binaryString = decodeBase64(base64);
return new ByteString(binaryString);
}
static fromUint8Array(array) {
// TODO(indexing); Remove the copy of the byte string here as this method
// is frequently called during indexing.
const binaryString = binaryStringFromUint8Array(array);
return new ByteString(binaryString);
}
[Symbol.iterator]() {
let i = 0;
return {
next: () => {
if (i < this.binaryString.length) {
return { value: this.binaryString.charCodeAt(i++), done: false };
}
else {
return { value: undefined, done: true };
}
}
};
}
toBase64() {
return encodeBase64(this.binaryString);
}
toUint8Array() {
return uint8ArrayFromBinaryString(this.binaryString);
}
approximateByteSize() {
return this.binaryString.length * 2;
}
compareTo(other) {
return primitiveComparator(this.binaryString, other.binaryString);
}
isEqual(other) {
return this.binaryString === other.binaryString;
}
}
ByteString.EMPTY_BYTE_STRING = new ByteString('');
/**
* Helper function to convert an Uint8array to a binary string.
*/
function binaryStringFromUint8Array(array) {
let binaryString = '';
for (let i = 0; i < array.length; ++i) {
binaryString += String.fromCharCode(array[i]);
}
return binaryString;
}
/**
* Helper function to convert a binary string to an Uint8Array.
*/
function uint8ArrayFromBinaryString(binaryString) {
const buffer = new Uint8Array(binaryString.length);
for (let i = 0; i < binaryString.length; i++) {
buffer[i] = binaryString.charCodeAt(i);
}
return buffer;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// A RegExp matching ISO 8601 UTC timestamps with optional fraction.
const ISO_TIMESTAMP_REG_EXP = new RegExp(/^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.(\d+))?Z$/);
/**
* Converts the possible Proto values for a timestamp value into a "seconds and
* nanos" representation.
*/
function normalizeTimestamp(date) {
hardAssert(!!date);
// The json interface (for the browser) will return an iso timestamp string,
// while the proto js library (for node) will return a
// google.protobuf.Timestamp instance.
if (typeof date === 'string') {
// The date string can have higher precision (nanos) than the Date class
// (millis), so we do some custom parsing here.
// Parse the nanos right out of the string.
let nanos = 0;
const fraction = ISO_TIMESTAMP_REG_EXP.exec(date);
hardAssert(!!fraction);
if (fraction[1]) {
// Pad the fraction out to 9 digits (nanos).
let nanoStr = fraction[1];
nanoStr = (nanoStr + '000000000').substr(0, 9);
nanos = Number(nanoStr);
}
// Parse the date to get the seconds.
const parsedDate = new Date(date);
const seconds = Math.floor(parsedDate.getTime() / 1000);
return { seconds, nanos };
}
else {
// TODO(b/37282237): Use strings for Proto3 timestamps
// assert(!this.options.useProto3Json,
// 'The timestamp instance format requires Proto JS.');
const seconds = normalizeNumber(date.seconds);
const nanos = normalizeNumber(date.nanos);
return { seconds, nanos };
}
}
/**
* Converts the possible Proto types for numbers into a JavaScript number.
* Returns 0 if the value is not numeric.
*/
function normalizeNumber(value) {
// TODO(bjornick): Handle int64 greater than 53 bits.
if (typeof value === 'number') {
return value;
}
else if (typeof value === 'string') {
return Number(value);
}
else {
return 0;
}
}
/** Converts the possible Proto types for Blobs into a ByteString. */
function normalizeByteString(blob) {
if (typeof blob === 'string') {
return ByteString.fromBase64String(blob);
}
else {
return ByteString.fromUint8Array(blob);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a locally-applied ServerTimestamp.
*
* Server Timestamps are backed by MapValues that contain an internal field
* `__type__` with a value of `server_timestamp`. The previous value and local
* write time are stored in its `__previous_value__` and `__local_write_time__`
* fields respectively.
*
* Notes:
* - ServerTimestampValue instances are created as the result of applying a
* transform. They can only exist in the local view of a document. Therefore
* they do not need to be parsed or serialized.
* - When evaluated locally (e.g. for snapshot.data()), they by default
* evaluate to `null`. This behavior can be configured by passing custom
* FieldValueOptions to value().
* - With respect to other ServerTimestampValues, they sort by their
* localWriteTime.
*/
const SERVER_TIMESTAMP_SENTINEL = 'server_timestamp';
const TYPE_KEY = '__type__';
const PREVIOUS_VALUE_KEY = '__previous_value__';
const LOCAL_WRITE_TIME_KEY = '__local_write_time__';
function isServerTimestamp(value) {
var _a, _b;
const type = (_b = (((_a = value === null || value === void 0 ? void 0 : value.mapValue) === null || _a === void 0 ? void 0 : _a.fields) || {})[TYPE_KEY]) === null || _b === void 0 ? void 0 : _b.stringValue;
return type === SERVER_TIMESTAMP_SENTINEL;
}
/**
* Creates a new ServerTimestamp proto value (using the internal format).
*/
function serverTimestamp$1(localWriteTime, previousValue) {
const mapValue = {
fields: {
[TYPE_KEY]: {
stringValue: SERVER_TIMESTAMP_SENTINEL
},
[LOCAL_WRITE_TIME_KEY]: {
timestampValue: {
seconds: localWriteTime.seconds,
nanos: localWriteTime.nanoseconds
}
}
}
};
// We should avoid storing deeply nested server timestamp map values
// because we never use the intermediate "previous values".
// For example:
// previous: 42L, add: t1, result: t1 -> 42L
// previous: t1, add: t2, result: t2 -> 42L (NOT t2 -> t1 -> 42L)
// previous: t2, add: t3, result: t3 -> 42L (NOT t3 -> t2 -> t1 -> 42L)
// `getPreviousValue` recursively traverses server timestamps to find the
// least recent Value.
if (previousValue && isServerTimestamp(previousValue)) {
previousValue = getPreviousValue(previousValue);
}
if (previousValue) {
mapValue.fields[PREVIOUS_VALUE_KEY] = previousValue;
}
return { mapValue };
}
/**
* Returns the value of the field before this ServerTimestamp was set.
*
* Preserving the previous values allows the user to display the last resoled
* value until the backend responds with the timestamp.
*/
function getPreviousValue(value) {
const previousValue = value.mapValue.fields[PREVIOUS_VALUE_KEY];
if (isServerTimestamp(previousValue)) {
return getPreviousValue(previousValue);
}
return previousValue;
}
/**
* Returns the local time at which this timestamp was first set.
*/
function getLocalWriteTime(value) {
const localWriteTime = normalizeTimestamp(value.mapValue.fields[LOCAL_WRITE_TIME_KEY].timestampValue);
return new Timestamp(localWriteTime.seconds, localWriteTime.nanos);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class DatabaseInfo {
/**
* Constructs a DatabaseInfo using the provided host, databaseId and
* persistenceKey.
*
* @param databaseId - The database to use.
* @param appId - The Firebase App Id.
* @param persistenceKey - A unique identifier for this Firestore's local
* storage (used in conjunction with the databaseId).
* @param host - The Firestore backend host to connect to.
* @param ssl - Whether to use SSL when connecting.
* @param forceLongPolling - Whether to use the forceLongPolling option
* when using WebChannel as the network transport.
* @param autoDetectLongPolling - Whether to use the detectBufferingProxy
* option when using WebChannel as the network transport.
* @param longPollingOptions Options that configure long-polling.
* @param useFetchStreams Whether to use the Fetch API instead of
* XMLHTTPRequest
*/
constructor(databaseId, appId, persistenceKey, host, ssl, forceLongPolling, autoDetectLongPolling, longPollingOptions, useFetchStreams) {
this.databaseId = databaseId;
this.appId = appId;
this.persistenceKey = persistenceKey;
this.host = host;
this.ssl = ssl;
this.forceLongPolling = forceLongPolling;
this.autoDetectLongPolling = autoDetectLongPolling;
this.longPollingOptions = longPollingOptions;
this.useFetchStreams = useFetchStreams;
}
}
/** The default database name for a project. */
const DEFAULT_DATABASE_NAME = '(default)';
/**
* Represents the database ID a Firestore client is associated with.
* @internal
*/
class DatabaseId {
constructor(projectId, database) {
this.projectId = projectId;
this.database = database ? database : DEFAULT_DATABASE_NAME;
}
static empty() {
return new DatabaseId('', '');
}
get isDefaultDatabase() {
return this.database === DEFAULT_DATABASE_NAME;
}
isEqual(other) {
return (other instanceof DatabaseId &&
other.projectId === this.projectId &&
other.database === this.database);
}
}
function databaseIdFromApp(app, database) {
if (!Object.prototype.hasOwnProperty.apply(app.options, ['projectId'])) {
throw new FirestoreError(Code.INVALID_ARGUMENT, '"projectId" not provided in firebase.initializeApp.');
}
return new DatabaseId(app.options.projectId, database);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Sentinel value that sorts before any Mutation Batch ID. */
const BATCHID_UNKNOWN = -1;
/**
* Returns whether a variable is either undefined or null.
*/
function isNullOrUndefined(value) {
return value === null || value === undefined;
}
/** Returns whether the value represents -0. */
function isNegativeZero(value) {
// Detect if the value is -0.0. Based on polyfill from
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is
return value === 0 && 1 / value === 1 / -0;
}
/**
* Returns whether a value is an integer and in the safe integer range
* @param value - The value to test for being an integer and in the safe range
*/
function isSafeInteger(value) {
return (typeof value === 'number' &&
Number.isInteger(value) &&
!isNegativeZero(value) &&
value <= Number.MAX_SAFE_INTEGER &&
value >= Number.MIN_SAFE_INTEGER);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const MAX_VALUE_TYPE = '__max__';
const MAX_VALUE = {
mapValue: {
fields: {
'__type__': { stringValue: MAX_VALUE_TYPE }
}
}
};
const MIN_VALUE = {
nullValue: 'NULL_VALUE'
};
/** Extracts the backend's type order for the provided value. */
function typeOrder(value) {
if ('nullValue' in value) {
return 0 /* TypeOrder.NullValue */;
}
else if ('booleanValue' in value) {
return 1 /* TypeOrder.BooleanValue */;
}
else if ('integerValue' in value || 'doubleValue' in value) {
return 2 /* TypeOrder.NumberValue */;
}
else if ('timestampValue' in value) {
return 3 /* TypeOrder.TimestampValue */;
}
else if ('stringValue' in value) {
return 5 /* TypeOrder.StringValue */;
}
else if ('bytesValue' in value) {
return 6 /* TypeOrder.BlobValue */;
}
else if ('referenceValue' in value) {
return 7 /* TypeOrder.RefValue */;
}
else if ('geoPointValue' in value) {
return 8 /* TypeOrder.GeoPointValue */;
}
else if ('arrayValue' in value) {
return 9 /* TypeOrder.ArrayValue */;
}
else if ('mapValue' in value) {
if (isServerTimestamp(value)) {
return 4 /* TypeOrder.ServerTimestampValue */;
}
else if (isMaxValue(value)) {
return 9007199254740991 /* TypeOrder.MaxValue */;
}
return 10 /* TypeOrder.ObjectValue */;
}
else {
return fail();
}
}
/** Tests `left` and `right` for equality based on the backend semantics. */
function valueEquals(left, right) {
if (left === right) {
return true;
}
const leftType = typeOrder(left);
const rightType = typeOrder(right);
if (leftType !== rightType) {
return false;
}
switch (leftType) {
case 0 /* TypeOrder.NullValue */:
return true;
case 1 /* TypeOrder.BooleanValue */:
return left.booleanValue === right.booleanValue;
case 4 /* TypeOrder.ServerTimestampValue */:
return getLocalWriteTime(left).isEqual(getLocalWriteTime(right));
case 3 /* TypeOrder.TimestampValue */:
return timestampEquals(left, right);
case 5 /* TypeOrder.StringValue */:
return left.stringValue === right.stringValue;
case 6 /* TypeOrder.BlobValue */:
return blobEquals(left, right);
case 7 /* TypeOrder.RefValue */:
return left.referenceValue === right.referenceValue;
case 8 /* TypeOrder.GeoPointValue */:
return geoPointEquals(left, right);
case 2 /* TypeOrder.NumberValue */:
return numberEquals(left, right);
case 9 /* TypeOrder.ArrayValue */:
return arrayEquals(left.arrayValue.values || [], right.arrayValue.values || [], valueEquals);
case 10 /* TypeOrder.ObjectValue */:
return objectEquals(left, right);
case 9007199254740991 /* TypeOrder.MaxValue */:
return true;
default:
return fail();
}
}
function timestampEquals(left, right) {
if (typeof left.timestampValue === 'string' &&
typeof right.timestampValue === 'string' &&
left.timestampValue.length === right.timestampValue.length) {
// Use string equality for ISO 8601 timestamps
return left.timestampValue === right.timestampValue;
}
const leftTimestamp = normalizeTimestamp(left.timestampValue);
const rightTimestamp = normalizeTimestamp(right.timestampValue);
return (leftTimestamp.seconds === rightTimestamp.seconds &&
leftTimestamp.nanos === rightTimestamp.nanos);
}
function geoPointEquals(left, right) {
return (normalizeNumber(left.geoPointValue.latitude) ===
normalizeNumber(right.geoPointValue.latitude) &&
normalizeNumber(left.geoPointValue.longitude) ===
normalizeNumber(right.geoPointValue.longitude));
}
function blobEquals(left, right) {
return normalizeByteString(left.bytesValue).isEqual(normalizeByteString(right.bytesValue));
}
function numberEquals(left, right) {
if ('integerValue' in left && 'integerValue' in right) {
return (normalizeNumber(left.integerValue) === normalizeNumber(right.integerValue));
}
else if ('doubleValue' in left && 'doubleValue' in right) {
const n1 = normalizeNumber(left.doubleValue);
const n2 = normalizeNumber(right.doubleValue);
if (n1 === n2) {
return isNegativeZero(n1) === isNegativeZero(n2);
}
else {
return isNaN(n1) && isNaN(n2);
}
}
return false;
}
function objectEquals(left, right) {
const leftMap = left.mapValue.fields || {};
const rightMap = right.mapValue.fields || {};
if (objectSize(leftMap) !== objectSize(rightMap)) {
return false;
}
for (const key in leftMap) {
if (leftMap.hasOwnProperty(key)) {
if (rightMap[key] === undefined ||
!valueEquals(leftMap[key], rightMap[key])) {
return false;
}
}
}
return true;
}
/** Returns true if the ArrayValue contains the specified element. */
function arrayValueContains(haystack, needle) {
return ((haystack.values || []).find(v => valueEquals(v, needle)) !== undefined);
}
function valueCompare(left, right) {
if (left === right) {
return 0;
}
const leftType = typeOrder(left);
const rightType = typeOrder(right);
if (leftType !== rightType) {
return primitiveComparator(leftType, rightType);
}
switch (leftType) {
case 0 /* TypeOrder.NullValue */:
case 9007199254740991 /* TypeOrder.MaxValue */:
return 0;
case 1 /* TypeOrder.BooleanValue */:
return primitiveComparator(left.booleanValue, right.booleanValue);
case 2 /* TypeOrder.NumberValue */:
return compareNumbers(left, right);
case 3 /* TypeOrder.TimestampValue */:
return compareTimestamps(left.timestampValue, right.timestampValue);
case 4 /* TypeOrder.ServerTimestampValue */:
return compareTimestamps(getLocalWriteTime(left), getLocalWriteTime(right));
case 5 /* TypeOrder.StringValue */:
return primitiveComparator(left.stringValue, right.stringValue);
case 6 /* TypeOrder.BlobValue */:
return compareBlobs(left.bytesValue, right.bytesValue);
case 7 /* TypeOrder.RefValue */:
return compareReferences(left.referenceValue, right.referenceValue);
case 8 /* TypeOrder.GeoPointValue */:
return compareGeoPoints(left.geoPointValue, right.geoPointValue);
case 9 /* TypeOrder.ArrayValue */:
return compareArrays(left.arrayValue, right.arrayValue);
case 10 /* TypeOrder.ObjectValue */:
return compareMaps(left.mapValue, right.mapValue);
default:
throw fail();
}
}
function compareNumbers(left, right) {
const leftNumber = normalizeNumber(left.integerValue || left.doubleValue);
const rightNumber = normalizeNumber(right.integerValue || right.doubleValue);
if (leftNumber < rightNumber) {
return -1;
}
else if (leftNumber > rightNumber) {
return 1;
}
else if (leftNumber === rightNumber) {
return 0;
}
else {
// one or both are NaN.
if (isNaN(leftNumber)) {
return isNaN(rightNumber) ? 0 : -1;
}
else {
return 1;
}
}
}
function compareTimestamps(left, right) {
if (typeof left === 'string' &&
typeof right === 'string' &&
left.length === right.length) {
return primitiveComparator(left, right);
}
const leftTimestamp = normalizeTimestamp(left);
const rightTimestamp = normalizeTimestamp(right);
const comparison = primitiveComparator(leftTimestamp.seconds, rightTimestamp.seconds);
if (comparison !== 0) {
return comparison;
}
return primitiveComparator(leftTimestamp.nanos, rightTimestamp.nanos);
}
function compareReferences(leftPath, rightPath) {
const leftSegments = leftPath.split('/');
const rightSegments = rightPath.split('/');
for (let i = 0; i < leftSegments.length && i < rightSegments.length; i++) {
const comparison = primitiveComparator(leftSegments[i], rightSegments[i]);
if (comparison !== 0) {
return comparison;
}
}
return primitiveComparator(leftSegments.length, rightSegments.length);
}
function compareGeoPoints(left, right) {
const comparison = primitiveComparator(normalizeNumber(left.latitude), normalizeNumber(right.latitude));
if (comparison !== 0) {
return comparison;
}
return primitiveComparator(normalizeNumber(left.longitude), normalizeNumber(right.longitude));
}
function compareBlobs(left, right) {
const leftBytes = normalizeByteString(left);
const rightBytes = normalizeByteString(right);
return leftBytes.compareTo(rightBytes);
}
function compareArrays(left, right) {
const leftArray = left.values || [];
const rightArray = right.values || [];
for (let i = 0; i < leftArray.length && i < rightArray.length; ++i) {
const compare = valueCompare(leftArray[i], rightArray[i]);
if (compare) {
return compare;
}
}
return primitiveComparator(leftArray.length, rightArray.length);
}
function compareMaps(left, right) {
if (left === MAX_VALUE.mapValue && right === MAX_VALUE.mapValue) {
return 0;
}
else if (left === MAX_VALUE.mapValue) {
return 1;
}
else if (right === MAX_VALUE.mapValue) {
return -1;
}
const leftMap = left.fields || {};
const leftKeys = Object.keys(leftMap);
const rightMap = right.fields || {};
const rightKeys = Object.keys(rightMap);
// Even though MapValues are likely sorted correctly based on their insertion
// order (e.g. when received from the backend), local modifications can bring
// elements out of order. We need to re-sort the elements to ensure that
// canonical IDs are independent of insertion order.
leftKeys.sort();
rightKeys.sort();
for (let i = 0; i < leftKeys.length && i < rightKeys.length; ++i) {
const keyCompare = primitiveComparator(leftKeys[i], rightKeys[i]);
if (keyCompare !== 0) {
return keyCompare;
}
const compare = valueCompare(leftMap[leftKeys[i]], rightMap[rightKeys[i]]);
if (compare !== 0) {
return compare;
}
}
return primitiveComparator(leftKeys.length, rightKeys.length);
}
/**
* Generates the canonical ID for the provided field value (as used in Target
* serialization).
*/
function canonicalId(value) {
return canonifyValue(value);
}
function canonifyValue(value) {
if ('nullValue' in value) {
return 'null';
}
else if ('booleanValue' in value) {
return '' + value.booleanValue;
}
else if ('integerValue' in value) {
return '' + value.integerValue;
}
else if ('doubleValue' in value) {
return '' + value.doubleValue;
}
else if ('timestampValue' in value) {
return canonifyTimestamp(value.timestampValue);
}
else if ('stringValue' in value) {
return value.stringValue;
}
else if ('bytesValue' in value) {
return canonifyByteString(value.bytesValue);
}
else if ('referenceValue' in value) {
return canonifyReference(value.referenceValue);
}
else if ('geoPointValue' in value) {
return canonifyGeoPoint(value.geoPointValue);
}
else if ('arrayValue' in value) {
return canonifyArray(value.arrayValue);
}
else if ('mapValue' in value) {
return canonifyMap(value.mapValue);
}
else {
return fail();
}
}
function canonifyByteString(byteString) {
return normalizeByteString(byteString).toBase64();
}
function canonifyTimestamp(timestamp) {
const normalizedTimestamp = normalizeTimestamp(timestamp);
return `time(${normalizedTimestamp.seconds},${normalizedTimestamp.nanos})`;
}
function canonifyGeoPoint(geoPoint) {
return `geo(${geoPoint.latitude},${geoPoint.longitude})`;
}
function canonifyReference(referenceValue) {
return DocumentKey.fromName(referenceValue).toString();
}
function canonifyMap(mapValue) {
// Iteration order in JavaScript is not guaranteed. To ensure that we generate
// matching canonical IDs for identical maps, we need to sort the keys.
const sortedKeys = Object.keys(mapValue.fields || {}).sort();
let result = '{';
let first = true;
for (const key of sortedKeys) {
if (!first) {
result += ',';
}
else {
first = false;
}
result += `${key}:${canonifyValue(mapValue.fields[key])}`;
}
return result + '}';
}
function canonifyArray(arrayValue) {
let result = '[';
let first = true;
for (const value of arrayValue.values || []) {
if (!first) {
result += ',';
}
else {
first = false;
}
result += canonifyValue(value);
}
return result + ']';
}
/**
* Returns an approximate (and wildly inaccurate) in-memory size for the field
* value.
*
* The memory size takes into account only the actual user data as it resides
* in memory and ignores object overhead.
*/
function estimateByteSize(value) {
switch (typeOrder(value)) {
case 0 /* TypeOrder.NullValue */:
return 4;
case 1 /* TypeOrder.BooleanValue */:
return 4;
case 2 /* TypeOrder.NumberValue */:
return 8;
case 3 /* TypeOrder.TimestampValue */:
// Timestamps are made up of two distinct numbers (seconds + nanoseconds)
return 16;
case 4 /* TypeOrder.ServerTimestampValue */:
const previousValue = getPreviousValue(value);
return previousValue ? 16 + estimateByteSize(previousValue) : 16;
case 5 /* TypeOrder.StringValue */:
// See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures:
// "JavaScript's String type is [...] a set of elements of 16-bit unsigned
// integer values"
return value.stringValue.length * 2;
case 6 /* TypeOrder.BlobValue */:
return normalizeByteString(value.bytesValue).approximateByteSize();
case 7 /* TypeOrder.RefValue */:
return value.referenceValue.length;
case 8 /* TypeOrder.GeoPointValue */:
// GeoPoints are made up of two distinct numbers (latitude + longitude)
return 16;
case 9 /* TypeOrder.ArrayValue */:
return estimateArrayByteSize(value.arrayValue);
case 10 /* TypeOrder.ObjectValue */:
return estimateMapByteSize(value.mapValue);
default:
throw fail();
}
}
function estimateMapByteSize(mapValue) {
let size = 0;
forEach(mapValue.fields, (key, val) => {
size += key.length + estimateByteSize(val);
});
return size;
}
function estimateArrayByteSize(arrayValue) {
return (arrayValue.values || []).reduce((previousSize, value) => previousSize + estimateByteSize(value), 0);
}
/** Returns a reference value for the provided database and key. */
function refValue(databaseId, key) {
return {
referenceValue: `projects/${databaseId.projectId}/databases/${databaseId.database}/documents/${key.path.canonicalString()}`
};
}
/** Returns true if `value` is an IntegerValue . */
function isInteger(value) {
return !!value && 'integerValue' in value;
}
/** Returns true if `value` is a DoubleValue. */
function isDouble(value) {
return !!value && 'doubleValue' in value;
}
/** Returns true if `value` is either an IntegerValue or a DoubleValue. */
function isNumber(value) {
return isInteger(value) || isDouble(value);
}
/** Returns true if `value` is an ArrayValue. */
function isArray(value) {
return !!value && 'arrayValue' in value;
}
/** Returns true if `value` is a NullValue. */
function isNullValue(value) {
return !!value && 'nullValue' in value;
}
/** Returns true if `value` is NaN. */
function isNanValue(value) {
return !!value && 'doubleValue' in value && isNaN(Number(value.doubleValue));
}
/** Returns true if `value` is a MapValue. */
function isMapValue(value) {
return !!value && 'mapValue' in value;
}
/** Creates a deep copy of `source`. */
function deepClone(source) {
if (source.geoPointValue) {
return { geoPointValue: Object.assign({}, source.geoPointValue) };
}
else if (source.timestampValue &&
typeof source.timestampValue === 'object') {
return { timestampValue: Object.assign({}, source.timestampValue) };
}
else if (source.mapValue) {
const target = { mapValue: { fields: {} } };
forEach(source.mapValue.fields, (key, val) => (target.mapValue.fields[key] = deepClone(val)));
return target;
}
else if (source.arrayValue) {
const target = { arrayValue: { values: [] } };
for (let i = 0; i < (source.arrayValue.values || []).length; ++i) {
target.arrayValue.values[i] = deepClone(source.arrayValue.values[i]);
}
return target;
}
else {
return Object.assign({}, source);
}
}
/** Returns true if the Value represents the canonical {@link #MAX_VALUE} . */
function isMaxValue(value) {
return ((((value.mapValue || {}).fields || {})['__type__'] || {}).stringValue ===
MAX_VALUE_TYPE);
}
/** Returns the lowest value for the given value type (inclusive). */
function valuesGetLowerBound(value) {
if ('nullValue' in value) {
return MIN_VALUE;
}
else if ('booleanValue' in value) {
return { booleanValue: false };
}
else if ('integerValue' in value || 'doubleValue' in value) {
return { doubleValue: NaN };
}
else if ('timestampValue' in value) {
return { timestampValue: { seconds: Number.MIN_SAFE_INTEGER } };
}
else if ('stringValue' in value) {
return { stringValue: '' };
}
else if ('bytesValue' in value) {
return { bytesValue: '' };
}
else if ('referenceValue' in value) {
return refValue(DatabaseId.empty(), DocumentKey.empty());
}
else if ('geoPointValue' in value) {
return { geoPointValue: { latitude: -90, longitude: -180 } };
}
else if ('arrayValue' in value) {
return { arrayValue: {} };
}
else if ('mapValue' in value) {
return { mapValue: {} };
}
else {
return fail();
}
}
/** Returns the largest value for the given value type (exclusive). */
function valuesGetUpperBound(value) {
if ('nullValue' in value) {
return { booleanValue: false };
}
else if ('booleanValue' in value) {
return { doubleValue: NaN };
}
else if ('integerValue' in value || 'doubleValue' in value) {
return { timestampValue: { seconds: Number.MIN_SAFE_INTEGER } };
}
else if ('timestampValue' in value) {
return { stringValue: '' };
}
else if ('stringValue' in value) {
return { bytesValue: '' };
}
else if ('bytesValue' in value) {
return refValue(DatabaseId.empty(), DocumentKey.empty());
}
else if ('referenceValue' in value) {
return { geoPointValue: { latitude: -90, longitude: -180 } };
}
else if ('geoPointValue' in value) {
return { arrayValue: {} };
}
else if ('arrayValue' in value) {
return { mapValue: {} };
}
else if ('mapValue' in value) {
return MAX_VALUE;
}
else {
return fail();
}
}
function lowerBoundCompare(left, right) {
const cmp = valueCompare(left.value, right.value);
if (cmp !== 0) {
return cmp;
}
if (left.inclusive && !right.inclusive) {
return -1;
}
else if (!left.inclusive && right.inclusive) {
return 1;
}
return 0;
}
function upperBoundCompare(left, right) {
const cmp = valueCompare(left.value, right.value);
if (cmp !== 0) {
return cmp;
}
if (left.inclusive && !right.inclusive) {
return 1;
}
else if (!left.inclusive && right.inclusive) {
return -1;
}
return 0;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An ObjectValue represents a MapValue in the Firestore Proto and offers the
* ability to add and remove fields (via the ObjectValueBuilder).
*/
class ObjectValue {
constructor(value) {
this.value = value;
}
static empty() {
return new ObjectValue({ mapValue: {} });
}
/**
* Returns the value at the given path or null.
*
* @param path - the path to search
* @returns The value at the path or null if the path is not set.
*/
field(path) {
if (path.isEmpty()) {
return this.value;
}
else {
let currentLevel = this.value;
for (let i = 0; i < path.length - 1; ++i) {
currentLevel = (currentLevel.mapValue.fields || {})[path.get(i)];
if (!isMapValue(currentLevel)) {
return null;
}
}
currentLevel = (currentLevel.mapValue.fields || {})[path.lastSegment()];
return currentLevel || null;
}
}
/**
* Sets the field to the provided value.
*
* @param path - The field path to set.
* @param value - The value to set.
*/
set(path, value) {
const fieldsMap = this.getFieldsMap(path.popLast());
fieldsMap[path.lastSegment()] = deepClone(value);
}
/**
* Sets the provided fields to the provided values.
*
* @param data - A map of fields to values (or null for deletes).
*/
setAll(data) {
let parent = FieldPath$1.emptyPath();
let upserts = {};
let deletes = [];
data.forEach((value, path) => {
if (!parent.isImmediateParentOf(path)) {
// Insert the accumulated changes at this parent location
const fieldsMap = this.getFieldsMap(parent);
this.applyChanges(fieldsMap, upserts, deletes);
upserts = {};
deletes = [];
parent = path.popLast();
}
if (value) {
upserts[path.lastSegment()] = deepClone(value);
}
else {
deletes.push(path.lastSegment());
}
});
const fieldsMap = this.getFieldsMap(parent);
this.applyChanges(fieldsMap, upserts, deletes);
}
/**
* Removes the field at the specified path. If there is no field at the
* specified path, nothing is changed.
*
* @param path - The field path to remove.
*/
delete(path) {
const nestedValue = this.field(path.popLast());
if (isMapValue(nestedValue) && nestedValue.mapValue.fields) {
delete nestedValue.mapValue.fields[path.lastSegment()];
}
}
isEqual(other) {
return valueEquals(this.value, other.value);
}
/**
* Returns the map that contains the leaf element of `path`. If the parent
* entry does not yet exist, or if it is not a map, a new map will be created.
*/
getFieldsMap(path) {
let current = this.value;
if (!current.mapValue.fields) {
current.mapValue = { fields: {} };
}
for (let i = 0; i < path.length; ++i) {
let next = current.mapValue.fields[path.get(i)];
if (!isMapValue(next) || !next.mapValue.fields) {
next = { mapValue: { fields: {} } };
current.mapValue.fields[path.get(i)] = next;
}
current = next;
}
return current.mapValue.fields;
}
/**
* Modifies `fieldsMap` by adding, replacing or deleting the specified
* entries.
*/
applyChanges(fieldsMap, inserts, deletes) {
forEach(inserts, (key, val) => (fieldsMap[key] = val));
for (const field of deletes) {
delete fieldsMap[field];
}
}
clone() {
return new ObjectValue(deepClone(this.value));
}
}
/**
* Returns a FieldMask built from all fields in a MapValue.
*/
function extractFieldMask(value) {
const fields = [];
forEach(value.fields, (key, value) => {
const currentPath = new FieldPath$1([key]);
if (isMapValue(value)) {
const nestedMask = extractFieldMask(value.mapValue);
const nestedFields = nestedMask.fields;
if (nestedFields.length === 0) {
// Preserve the empty map by adding it to the FieldMask.
fields.push(currentPath);
}
else {
// For nested and non-empty ObjectValues, add the FieldPath of the
// leaf nodes.
for (const nestedPath of nestedFields) {
fields.push(currentPath.child(nestedPath));
}
}
}
else {
// For nested and non-empty ObjectValues, add the FieldPath of the leaf
// nodes.
fields.push(currentPath);
}
});
return new FieldMask(fields);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a document in Firestore with a key, version, data and whether it
* has local mutations applied to it.
*
* Documents can transition between states via `convertToFoundDocument()`,
* `convertToNoDocument()` and `convertToUnknownDocument()`. If a document does
* not transition to one of these states even after all mutations have been
* applied, `isValidDocument()` returns false and the document should be removed
* from all views.
*/
class MutableDocument {
constructor(key, documentType, version, readTime, createTime, data, documentState) {
this.key = key;
this.documentType = documentType;
this.version = version;
this.readTime = readTime;
this.createTime = createTime;
this.data = data;
this.documentState = documentState;
}
/**
* Creates a document with no known version or data, but which can serve as
* base document for mutations.
*/
static newInvalidDocument(documentKey) {
return new MutableDocument(documentKey, 0 /* DocumentType.INVALID */,
/* version */ SnapshotVersion.min(),
/* readTime */ SnapshotVersion.min(),
/* createTime */ SnapshotVersion.min(), ObjectValue.empty(), 0 /* DocumentState.SYNCED */);
}
/**
* Creates a new document that is known to exist with the given data at the
* given version.
*/
static newFoundDocument(documentKey, version, createTime, value) {
return new MutableDocument(documentKey, 1 /* DocumentType.FOUND_DOCUMENT */,
/* version */ version,
/* readTime */ SnapshotVersion.min(),
/* createTime */ createTime, value, 0 /* DocumentState.SYNCED */);
}
/** Creates a new document that is known to not exist at the given version. */
static newNoDocument(documentKey, version) {
return new MutableDocument(documentKey, 2 /* DocumentType.NO_DOCUMENT */,
/* version */ version,
/* readTime */ SnapshotVersion.min(),
/* createTime */ SnapshotVersion.min(), ObjectValue.empty(), 0 /* DocumentState.SYNCED */);
}
/**
* Creates a new document that is known to exist at the given version but
* whose data is not known (e.g. a document that was updated without a known
* base document).
*/
static newUnknownDocument(documentKey, version) {
return new MutableDocument(documentKey, 3 /* DocumentType.UNKNOWN_DOCUMENT */,
/* version */ version,
/* readTime */ SnapshotVersion.min(),
/* createTime */ SnapshotVersion.min(), ObjectValue.empty(), 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */);
}
/**
* Changes the document type to indicate that it exists and that its version
* and data are known.
*/
convertToFoundDocument(version, value) {
// If a document is switching state from being an invalid or deleted
// document to a valid (FOUND_DOCUMENT) document, either due to receiving an
// update from Watch or due to applying a local set mutation on top
// of a deleted document, our best guess about its createTime would be the
// version at which the document transitioned to a FOUND_DOCUMENT.
if (this.createTime.isEqual(SnapshotVersion.min()) &&
(this.documentType === 2 /* DocumentType.NO_DOCUMENT */ ||
this.documentType === 0 /* DocumentType.INVALID */)) {
this.createTime = version;
}
this.version = version;
this.documentType = 1 /* DocumentType.FOUND_DOCUMENT */;
this.data = value;
this.documentState = 0 /* DocumentState.SYNCED */;
return this;
}
/**
* Changes the document type to indicate that it doesn't exist at the given
* version.
*/
convertToNoDocument(version) {
this.version = version;
this.documentType = 2 /* DocumentType.NO_DOCUMENT */;
this.data = ObjectValue.empty();
this.documentState = 0 /* DocumentState.SYNCED */;
return this;
}
/**
* Changes the document type to indicate that it exists at a given version but
* that its data is not known (e.g. a document that was updated without a known
* base document).
*/
convertToUnknownDocument(version) {
this.version = version;
this.documentType = 3 /* DocumentType.UNKNOWN_DOCUMENT */;
this.data = ObjectValue.empty();
this.documentState = 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */;
return this;
}
setHasCommittedMutations() {
this.documentState = 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */;
return this;
}
setHasLocalMutations() {
this.documentState = 1 /* DocumentState.HAS_LOCAL_MUTATIONS */;
this.version = SnapshotVersion.min();
return this;
}
setReadTime(readTime) {
this.readTime = readTime;
return this;
}
get hasLocalMutations() {
return this.documentState === 1 /* DocumentState.HAS_LOCAL_MUTATIONS */;
}
get hasCommittedMutations() {
return this.documentState === 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */;
}
get hasPendingWrites() {
return this.hasLocalMutations || this.hasCommittedMutations;
}
isValidDocument() {
return this.documentType !== 0 /* DocumentType.INVALID */;
}
isFoundDocument() {
return this.documentType === 1 /* DocumentType.FOUND_DOCUMENT */;
}
isNoDocument() {
return this.documentType === 2 /* DocumentType.NO_DOCUMENT */;
}
isUnknownDocument() {
return this.documentType === 3 /* DocumentType.UNKNOWN_DOCUMENT */;
}
isEqual(other) {
return (other instanceof MutableDocument &&
this.key.isEqual(other.key) &&
this.version.isEqual(other.version) &&
this.documentType === other.documentType &&
this.documentState === other.documentState &&
this.data.isEqual(other.data));
}
mutableCopy() {
return new MutableDocument(this.key, this.documentType, this.version, this.readTime, this.createTime, this.data.clone(), this.documentState);
}
toString() {
return (`Document(${this.key}, ${this.version}, ${JSON.stringify(this.data.value)}, ` +
`{createTime: ${this.createTime}}), ` +
`{documentType: ${this.documentType}}), ` +
`{documentState: ${this.documentState}})`);
}
}
/**
* Compares the value for field `field` in the provided documents. Throws if
* the field does not exist in both documents.
*/
function compareDocumentsByField(field, d1, d2) {
const v1 = d1.data.field(field);
const v2 = d2.data.field(field);
if (v1 !== null && v2 !== null) {
return valueCompare(v1, v2);
}
else {
return fail();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a bound of a query.
*
* The bound is specified with the given components representing a position and
* whether it's just before or just after the position (relative to whatever the
* query order is).
*
* The position represents a logical index position for a query. It's a prefix
* of values for the (potentially implicit) order by clauses of a query.
*
* Bound provides a function to determine whether a document comes before or
* after a bound. This is influenced by whether the position is just before or
* just after the provided values.
*/
class Bound {
constructor(position, inclusive) {
this.position = position;
this.inclusive = inclusive;
}
}
function boundCompareToDocument(bound, orderBy, doc) {
let comparison = 0;
for (let i = 0; i < bound.position.length; i++) {
const orderByComponent = orderBy[i];
const component = bound.position[i];
if (orderByComponent.field.isKeyField()) {
comparison = DocumentKey.comparator(DocumentKey.fromName(component.referenceValue), doc.key);
}
else {
const docValue = doc.data.field(orderByComponent.field);
comparison = valueCompare(component, docValue);
}
if (orderByComponent.dir === "desc" /* Direction.DESCENDING */) {
comparison = comparison * -1;
}
if (comparison !== 0) {
break;
}
}
return comparison;
}
/**
* Returns true if a document sorts after a bound using the provided sort
* order.
*/
function boundSortsAfterDocument(bound, orderBy, doc) {
const comparison = boundCompareToDocument(bound, orderBy, doc);
return bound.inclusive ? comparison >= 0 : comparison > 0;
}
/**
* Returns true if a document sorts before a bound using the provided sort
* order.
*/
function boundSortsBeforeDocument(bound, orderBy, doc) {
const comparison = boundCompareToDocument(bound, orderBy, doc);
return bound.inclusive ? comparison <= 0 : comparison < 0;
}
function boundEquals(left, right) {
if (left === null) {
return right === null;
}
else if (right === null) {
return false;
}
if (left.inclusive !== right.inclusive ||
left.position.length !== right.position.length) {
return false;
}
for (let i = 0; i < left.position.length; i++) {
const leftPosition = left.position[i];
const rightPosition = right.position[i];
if (!valueEquals(leftPosition, rightPosition)) {
return false;
}
}
return true;
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An ordering on a field, in some Direction. Direction defaults to ASCENDING.
*/
class OrderBy {
constructor(field, dir = "asc" /* Direction.ASCENDING */) {
this.field = field;
this.dir = dir;
}
}
function canonifyOrderBy(orderBy) {
// TODO(b/29183165): Make this collision robust.
return orderBy.field.canonicalString() + orderBy.dir;
}
function stringifyOrderBy(orderBy) {
return `${orderBy.field.canonicalString()} (${orderBy.dir})`;
}
function orderByEquals(left, right) {
return left.dir === right.dir && left.field.isEqual(right.field);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class Filter {
}
class FieldFilter extends Filter {
constructor(field, op, value) {
super();
this.field = field;
this.op = op;
this.value = value;
}
/**
* Creates a filter based on the provided arguments.
*/
static create(field, op, value) {
if (field.isKeyField()) {
if (op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */) {
return this.createKeyFieldInFilter(field, op, value);
}
else {
return new KeyFieldFilter(field, op, value);
}
}
else if (op === "array-contains" /* Operator.ARRAY_CONTAINS */) {
return new ArrayContainsFilter(field, value);
}
else if (op === "in" /* Operator.IN */) {
return new InFilter(field, value);
}
else if (op === "not-in" /* Operator.NOT_IN */) {
return new NotInFilter(field, value);
}
else if (op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
return new ArrayContainsAnyFilter(field, value);
}
else {
return new FieldFilter(field, op, value);
}
}
static createKeyFieldInFilter(field, op, value) {
return op === "in" /* Operator.IN */
? new KeyFieldInFilter(field, value)
: new KeyFieldNotInFilter(field, value);
}
matches(doc) {
const other = doc.data.field(this.field);
// Types do not have to match in NOT_EQUAL filters.
if (this.op === "!=" /* Operator.NOT_EQUAL */) {
return (other !== null &&
this.matchesComparison(valueCompare(other, this.value)));
}
// Only compare types with matching backend order (such as double and int).
return (other !== null &&
typeOrder(this.value) === typeOrder(other) &&
this.matchesComparison(valueCompare(other, this.value)));
}
matchesComparison(comparison) {
switch (this.op) {
case "<" /* Operator.LESS_THAN */:
return comparison < 0;
case "<=" /* Operator.LESS_THAN_OR_EQUAL */:
return comparison <= 0;
case "==" /* Operator.EQUAL */:
return comparison === 0;
case "!=" /* Operator.NOT_EQUAL */:
return comparison !== 0;
case ">" /* Operator.GREATER_THAN */:
return comparison > 0;
case ">=" /* Operator.GREATER_THAN_OR_EQUAL */:
return comparison >= 0;
default:
return fail();
}
}
isInequality() {
return ([
"<" /* Operator.LESS_THAN */,
"<=" /* Operator.LESS_THAN_OR_EQUAL */,
">" /* Operator.GREATER_THAN */,
">=" /* Operator.GREATER_THAN_OR_EQUAL */,
"!=" /* Operator.NOT_EQUAL */,
"not-in" /* Operator.NOT_IN */
].indexOf(this.op) >= 0);
}
getFlattenedFilters() {
return [this];
}
getFilters() {
return [this];
}
}
class CompositeFilter extends Filter {
constructor(filters, op) {
super();
this.filters = filters;
this.op = op;
this.memoizedFlattenedFilters = null;
}
/**
* Creates a filter based on the provided arguments.
*/
static create(filters, op) {
return new CompositeFilter(filters, op);
}
matches(doc) {
if (compositeFilterIsConjunction(this)) {
// For conjunctions, all filters must match, so return false if any filter doesn't match.
return this.filters.find(filter => !filter.matches(doc)) === undefined;
}
else {
// For disjunctions, at least one filter should match.
return this.filters.find(filter => filter.matches(doc)) !== undefined;
}
}
getFlattenedFilters() {
if (this.memoizedFlattenedFilters !== null) {
return this.memoizedFlattenedFilters;
}
this.memoizedFlattenedFilters = this.filters.reduce((result, subfilter) => {
return result.concat(subfilter.getFlattenedFilters());
}, []);
return this.memoizedFlattenedFilters;
}
// Returns a mutable copy of `this.filters`
getFilters() {
return Object.assign([], this.filters);
}
}
function compositeFilterIsConjunction(compositeFilter) {
return compositeFilter.op === "and" /* CompositeOperator.AND */;
}
function compositeFilterIsDisjunction(compositeFilter) {
return compositeFilter.op === "or" /* CompositeOperator.OR */;
}
/**
* Returns true if this filter is a conjunction of field filters only. Returns false otherwise.
*/
function compositeFilterIsFlatConjunction(compositeFilter) {
return (compositeFilterIsFlat(compositeFilter) &&
compositeFilterIsConjunction(compositeFilter));
}
/**
* Returns true if this filter does not contain any composite filters. Returns false otherwise.
*/
function compositeFilterIsFlat(compositeFilter) {
for (const filter of compositeFilter.filters) {
if (filter instanceof CompositeFilter) {
return false;
}
}
return true;
}
function canonifyFilter(filter) {
if (filter instanceof FieldFilter) {
// TODO(b/29183165): Technically, this won't be unique if two values have
// the same description, such as the int 3 and the string "3". So we should
// add the types in here somehow, too.
return (filter.field.canonicalString() +
filter.op.toString() +
canonicalId(filter.value));
}
else if (compositeFilterIsFlatConjunction(filter)) {
// Older SDK versions use an implicit AND operation between their filters.
// In the new SDK versions, the developer may use an explicit AND filter.
// To stay consistent with the old usages, we add a special case to ensure
// the canonical ID for these two are the same. For example:
// `col.whereEquals("a", 1).whereEquals("b", 2)` should have the same
// canonical ID as `col.where(and(equals("a",1), equals("b",2)))`.
return filter.filters.map(filter => canonifyFilter(filter)).join(',');
}
else {
// filter instanceof CompositeFilter
const canonicalIdsString = filter.filters
.map(filter => canonifyFilter(filter))
.join(',');
return `${filter.op}(${canonicalIdsString})`;
}
}
function filterEquals(f1, f2) {
if (f1 instanceof FieldFilter) {
return fieldFilterEquals(f1, f2);
}
else if (f1 instanceof CompositeFilter) {
return compositeFilterEquals(f1, f2);
}
else {
fail();
}
}
function fieldFilterEquals(f1, f2) {
return (f2 instanceof FieldFilter &&
f1.op === f2.op &&
f1.field.isEqual(f2.field) &&
valueEquals(f1.value, f2.value));
}
function compositeFilterEquals(f1, f2) {
if (f2 instanceof CompositeFilter &&
f1.op === f2.op &&
f1.filters.length === f2.filters.length) {
const subFiltersMatch = f1.filters.reduce((result, f1Filter, index) => result && filterEquals(f1Filter, f2.filters[index]), true);
return subFiltersMatch;
}
return false;
}
/**
* Returns a new composite filter that contains all filter from
* `compositeFilter` plus all the given filters in `otherFilters`.
*/
function compositeFilterWithAddedFilters(compositeFilter, otherFilters) {
const mergedFilters = compositeFilter.filters.concat(otherFilters);
return CompositeFilter.create(mergedFilters, compositeFilter.op);
}
/** Returns a debug description for `filter`. */
function stringifyFilter(filter) {
if (filter instanceof FieldFilter) {
return stringifyFieldFilter(filter);
}
else if (filter instanceof CompositeFilter) {
return stringifyCompositeFilter(filter);
}
else {
return 'Filter';
}
}
function stringifyCompositeFilter(filter) {
return (filter.op.toString() +
` {` +
filter.getFilters().map(stringifyFilter).join(' ,') +
'}');
}
function stringifyFieldFilter(filter) {
return `${filter.field.canonicalString()} ${filter.op} ${canonicalId(filter.value)}`;
}
/** Filter that matches on key fields (i.e. '__name__'). */
class KeyFieldFilter extends FieldFilter {
constructor(field, op, value) {
super(field, op, value);
this.key = DocumentKey.fromName(value.referenceValue);
}
matches(doc) {
const comparison = DocumentKey.comparator(doc.key, this.key);
return this.matchesComparison(comparison);
}
}
/** Filter that matches on key fields within an array. */
class KeyFieldInFilter extends FieldFilter {
constructor(field, value) {
super(field, "in" /* Operator.IN */, value);
this.keys = extractDocumentKeysFromArrayValue("in" /* Operator.IN */, value);
}
matches(doc) {
return this.keys.some(key => key.isEqual(doc.key));
}
}
/** Filter that matches on key fields not present within an array. */
class KeyFieldNotInFilter extends FieldFilter {
constructor(field, value) {
super(field, "not-in" /* Operator.NOT_IN */, value);
this.keys = extractDocumentKeysFromArrayValue("not-in" /* Operator.NOT_IN */, value);
}
matches(doc) {
return !this.keys.some(key => key.isEqual(doc.key));
}
}
function extractDocumentKeysFromArrayValue(op, value) {
var _a;
return (((_a = value.arrayValue) === null || _a === void 0 ? void 0 : _a.values) || []).map(v => {
return DocumentKey.fromName(v.referenceValue);
});
}
/** A Filter that implements the array-contains operator. */
class ArrayContainsFilter extends FieldFilter {
constructor(field, value) {
super(field, "array-contains" /* Operator.ARRAY_CONTAINS */, value);
}
matches(doc) {
const other = doc.data.field(this.field);
return isArray(other) && arrayValueContains(other.arrayValue, this.value);
}
}
/** A Filter that implements the IN operator. */
class InFilter extends FieldFilter {
constructor(field, value) {
super(field, "in" /* Operator.IN */, value);
}
matches(doc) {
const other = doc.data.field(this.field);
return other !== null && arrayValueContains(this.value.arrayValue, other);
}
}
/** A Filter that implements the not-in operator. */
class NotInFilter extends FieldFilter {
constructor(field, value) {
super(field, "not-in" /* Operator.NOT_IN */, value);
}
matches(doc) {
if (arrayValueContains(this.value.arrayValue, { nullValue: 'NULL_VALUE' })) {
return false;
}
const other = doc.data.field(this.field);
return other !== null && !arrayValueContains(this.value.arrayValue, other);
}
}
/** A Filter that implements the array-contains-any operator. */
class ArrayContainsAnyFilter extends FieldFilter {
constructor(field, value) {
super(field, "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */, value);
}
matches(doc) {
const other = doc.data.field(this.field);
if (!isArray(other) || !other.arrayValue.values) {
return false;
}
return other.arrayValue.values.some(val => arrayValueContains(this.value.arrayValue, val));
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Visible for testing
class TargetImpl {
constructor(path, collectionGroup = null, orderBy = [], filters = [], limit = null, startAt = null, endAt = null) {
this.path = path;
this.collectionGroup = collectionGroup;
this.orderBy = orderBy;
this.filters = filters;
this.limit = limit;
this.startAt = startAt;
this.endAt = endAt;
this.memoizedCanonicalId = null;
}
}
/**
* Initializes a Target with a path and optional additional query constraints.
* Path must currently be empty if this is a collection group query.
*
* NOTE: you should always construct `Target` from `Query.toTarget` instead of
* using this factory method, because `Query` provides an implicit `orderBy`
* property.
*/
function newTarget(path, collectionGroup = null, orderBy = [], filters = [], limit = null, startAt = null, endAt = null) {
return new TargetImpl(path, collectionGroup, orderBy, filters, limit, startAt, endAt);
}
function canonifyTarget(target) {
const targetImpl = debugCast(target);
if (targetImpl.memoizedCanonicalId === null) {
let str = targetImpl.path.canonicalString();
if (targetImpl.collectionGroup !== null) {
str += '|cg:' + targetImpl.collectionGroup;
}
str += '|f:';
str += targetImpl.filters.map(f => canonifyFilter(f)).join(',');
str += '|ob:';
str += targetImpl.orderBy.map(o => canonifyOrderBy(o)).join(',');
if (!isNullOrUndefined(targetImpl.limit)) {
str += '|l:';
str += targetImpl.limit;
}
if (targetImpl.startAt) {
str += '|lb:';
str += targetImpl.startAt.inclusive ? 'b:' : 'a:';
str += targetImpl.startAt.position.map(p => canonicalId(p)).join(',');
}
if (targetImpl.endAt) {
str += '|ub:';
str += targetImpl.endAt.inclusive ? 'a:' : 'b:';
str += targetImpl.endAt.position.map(p => canonicalId(p)).join(',');
}
targetImpl.memoizedCanonicalId = str;
}
return targetImpl.memoizedCanonicalId;
}
function stringifyTarget(target) {
let str = target.path.canonicalString();
if (target.collectionGroup !== null) {
str += ' collectionGroup=' + target.collectionGroup;
}
if (target.filters.length > 0) {
str += `, filters: [${target.filters
.map(f => stringifyFilter(f))
.join(', ')}]`;
}
if (!isNullOrUndefined(target.limit)) {
str += ', limit: ' + target.limit;
}
if (target.orderBy.length > 0) {
str += `, orderBy: [${target.orderBy
.map(o => stringifyOrderBy(o))
.join(', ')}]`;
}
if (target.startAt) {
str += ', startAt: ';
str += target.startAt.inclusive ? 'b:' : 'a:';
str += target.startAt.position.map(p => canonicalId(p)).join(',');
}
if (target.endAt) {
str += ', endAt: ';
str += target.endAt.inclusive ? 'a:' : 'b:';
str += target.endAt.position.map(p => canonicalId(p)).join(',');
}
return `Target(${str})`;
}
function targetEquals(left, right) {
if (left.limit !== right.limit) {
return false;
}
if (left.orderBy.length !== right.orderBy.length) {
return false;
}
for (let i = 0; i < left.orderBy.length; i++) {
if (!orderByEquals(left.orderBy[i], right.orderBy[i])) {
return false;
}
}
if (left.filters.length !== right.filters.length) {
return false;
}
for (let i = 0; i < left.filters.length; i++) {
if (!filterEquals(left.filters[i], right.filters[i])) {
return false;
}
}
if (left.collectionGroup !== right.collectionGroup) {
return false;
}
if (!left.path.isEqual(right.path)) {
return false;
}
if (!boundEquals(left.startAt, right.startAt)) {
return false;
}
return boundEquals(left.endAt, right.endAt);
}
function targetIsDocumentTarget(target) {
return (DocumentKey.isDocumentKey(target.path) &&
target.collectionGroup === null &&
target.filters.length === 0);
}
/** Returns the field filters that target the given field path. */
function targetGetFieldFiltersForPath(target, path) {
return target.filters.filter(f => f instanceof FieldFilter && f.field.isEqual(path));
}
/**
* Returns the values that are used in ARRAY_CONTAINS or ARRAY_CONTAINS_ANY
* filters. Returns `null` if there are no such filters.
*/
function targetGetArrayValues(target, fieldIndex) {
const segment = fieldIndexGetArraySegment(fieldIndex);
if (segment === undefined) {
return null;
}
for (const fieldFilter of targetGetFieldFiltersForPath(target, segment.fieldPath)) {
switch (fieldFilter.op) {
case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */:
return fieldFilter.value.arrayValue.values || [];
case "array-contains" /* Operator.ARRAY_CONTAINS */:
return [fieldFilter.value];
// Remaining filters are not array filters.
}
}
return null;
}
/**
* Returns the list of values that are used in != or NOT_IN filters. Returns
* `null` if there are no such filters.
*/
function targetGetNotInValues(target, fieldIndex) {
const values = new Map();
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
for (const fieldFilter of targetGetFieldFiltersForPath(target, segment.fieldPath)) {
switch (fieldFilter.op) {
case "==" /* Operator.EQUAL */:
case "in" /* Operator.IN */:
// Encode equality prefix, which is encoded in the index value before
// the inequality (e.g. `a == 'a' && b != 'b'` is encoded to
// `value != 'ab'`).
values.set(segment.fieldPath.canonicalString(), fieldFilter.value);
break;
case "not-in" /* Operator.NOT_IN */:
case "!=" /* Operator.NOT_EQUAL */:
// NotIn/NotEqual is always a suffix. There cannot be any remaining
// segments and hence we can return early here.
values.set(segment.fieldPath.canonicalString(), fieldFilter.value);
return Array.from(values.values());
// Remaining filters cannot be used as notIn bounds.
}
}
}
return null;
}
/**
* Returns a lower bound of field values that can be used as a starting point to
* scan the index defined by `fieldIndex`. Returns `MIN_VALUE` if no lower bound
* exists.
*/
function targetGetLowerBound(target, fieldIndex) {
const values = [];
let inclusive = true;
// For each segment, retrieve a lower bound if there is a suitable filter or
// startAt.
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
const segmentBound = segment.kind === 0 /* IndexKind.ASCENDING */
? targetGetAscendingBound(target, segment.fieldPath, target.startAt)
: targetGetDescendingBound(target, segment.fieldPath, target.startAt);
values.push(segmentBound.value);
inclusive && (inclusive = segmentBound.inclusive);
}
return new Bound(values, inclusive);
}
/**
* Returns an upper bound of field values that can be used as an ending point
* when scanning the index defined by `fieldIndex`. Returns `MAX_VALUE` if no
* upper bound exists.
*/
function targetGetUpperBound(target, fieldIndex) {
const values = [];
let inclusive = true;
// For each segment, retrieve an upper bound if there is a suitable filter or
// endAt.
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
const segmentBound = segment.kind === 0 /* IndexKind.ASCENDING */
? targetGetDescendingBound(target, segment.fieldPath, target.endAt)
: targetGetAscendingBound(target, segment.fieldPath, target.endAt);
values.push(segmentBound.value);
inclusive && (inclusive = segmentBound.inclusive);
}
return new Bound(values, inclusive);
}
/**
* Returns the value to use as the lower bound for ascending index segment at
* the provided `fieldPath` (or the upper bound for an descending segment).
*/
function targetGetAscendingBound(target, fieldPath, bound) {
let value = MIN_VALUE;
let inclusive = true;
// Process all filters to find a value for the current field segment
for (const fieldFilter of targetGetFieldFiltersForPath(target, fieldPath)) {
let filterValue = MIN_VALUE;
let filterInclusive = true;
switch (fieldFilter.op) {
case "<" /* Operator.LESS_THAN */:
case "<=" /* Operator.LESS_THAN_OR_EQUAL */:
filterValue = valuesGetLowerBound(fieldFilter.value);
break;
case "==" /* Operator.EQUAL */:
case "in" /* Operator.IN */:
case ">=" /* Operator.GREATER_THAN_OR_EQUAL */:
filterValue = fieldFilter.value;
break;
case ">" /* Operator.GREATER_THAN */:
filterValue = fieldFilter.value;
filterInclusive = false;
break;
case "!=" /* Operator.NOT_EQUAL */:
case "not-in" /* Operator.NOT_IN */:
filterValue = MIN_VALUE;
break;
// Remaining filters cannot be used as lower bounds.
}
if (lowerBoundCompare({ value, inclusive }, { value: filterValue, inclusive: filterInclusive }) < 0) {
value = filterValue;
inclusive = filterInclusive;
}
}
// If there is an additional bound, compare the values against the existing
// range to see if we can narrow the scope.
if (bound !== null) {
for (let i = 0; i < target.orderBy.length; ++i) {
const orderBy = target.orderBy[i];
if (orderBy.field.isEqual(fieldPath)) {
const cursorValue = bound.position[i];
if (lowerBoundCompare({ value, inclusive }, { value: cursorValue, inclusive: bound.inclusive }) < 0) {
value = cursorValue;
inclusive = bound.inclusive;
}
break;
}
}
}
return { value, inclusive };
}
/**
* Returns the value to use as the upper bound for ascending index segment at
* the provided `fieldPath` (or the lower bound for a descending segment).
*/
function targetGetDescendingBound(target, fieldPath, bound) {
let value = MAX_VALUE;
let inclusive = true;
// Process all filters to find a value for the current field segment
for (const fieldFilter of targetGetFieldFiltersForPath(target, fieldPath)) {
let filterValue = MAX_VALUE;
let filterInclusive = true;
switch (fieldFilter.op) {
case ">=" /* Operator.GREATER_THAN_OR_EQUAL */:
case ">" /* Operator.GREATER_THAN */:
filterValue = valuesGetUpperBound(fieldFilter.value);
filterInclusive = false;
break;
case "==" /* Operator.EQUAL */:
case "in" /* Operator.IN */:
case "<=" /* Operator.LESS_THAN_OR_EQUAL */:
filterValue = fieldFilter.value;
break;
case "<" /* Operator.LESS_THAN */:
filterValue = fieldFilter.value;
filterInclusive = false;
break;
case "!=" /* Operator.NOT_EQUAL */:
case "not-in" /* Operator.NOT_IN */:
filterValue = MAX_VALUE;
break;
// Remaining filters cannot be used as upper bounds.
}
if (upperBoundCompare({ value, inclusive }, { value: filterValue, inclusive: filterInclusive }) > 0) {
value = filterValue;
inclusive = filterInclusive;
}
}
// If there is an additional bound, compare the values against the existing
// range to see if we can narrow the scope.
if (bound !== null) {
for (let i = 0; i < target.orderBy.length; ++i) {
const orderBy = target.orderBy[i];
if (orderBy.field.isEqual(fieldPath)) {
const cursorValue = bound.position[i];
if (upperBoundCompare({ value, inclusive }, { value: cursorValue, inclusive: bound.inclusive }) > 0) {
value = cursorValue;
inclusive = bound.inclusive;
}
break;
}
}
}
return { value, inclusive };
}
/** Returns the number of segments of a perfect index for this target. */
function targetGetSegmentCount(target) {
let fields = new SortedSet(FieldPath$1.comparator);
let hasArraySegment = false;
for (const filter of target.filters) {
for (const subFilter of filter.getFlattenedFilters()) {
// __name__ is not an explicit segment of any index, so we don't need to
// count it.
if (subFilter.field.isKeyField()) {
continue;
}
// ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filters must be counted separately.
// For instance, it is possible to have an index for "a ARRAY a ASC". Even
// though these are on the same field, they should be counted as two
// separate segments in an index.
if (subFilter.op === "array-contains" /* Operator.ARRAY_CONTAINS */ ||
subFilter.op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
hasArraySegment = true;
}
else {
fields = fields.add(subFilter.field);
}
}
}
for (const orderBy of target.orderBy) {
// __name__ is not an explicit segment of any index, so we don't need to
// count it.
if (!orderBy.field.isKeyField()) {
fields = fields.add(orderBy.field);
}
}
return fields.size + (hasArraySegment ? 1 : 0);
}
function targetHasLimit(target) {
return target.limit !== null;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Query encapsulates all the query attributes we support in the SDK. It can
* be run against the LocalStore, as well as be converted to a `Target` to
* query the RemoteStore results.
*
* Visible for testing.
*/
class QueryImpl {
/**
* Initializes a Query with a path and optional additional query constraints.
* Path must currently be empty if this is a collection group query.
*/
constructor(path, collectionGroup = null, explicitOrderBy = [], filters = [], limit = null, limitType = "F" /* LimitType.First */, startAt = null, endAt = null) {
this.path = path;
this.collectionGroup = collectionGroup;
this.explicitOrderBy = explicitOrderBy;
this.filters = filters;
this.limit = limit;
this.limitType = limitType;
this.startAt = startAt;
this.endAt = endAt;
this.memoizedNormalizedOrderBy = null;
// The corresponding `Target` of this `Query` instance, for use with
// non-aggregate queries.
this.memoizedTarget = null;
// The corresponding `Target` of this `Query` instance, for use with
// aggregate queries. Unlike targets for non-aggregate queries,
// aggregate query targets do not contain normalized order-bys, they only
// contain explicit order-bys.
this.memoizedAggregateTarget = null;
if (this.startAt) ;
if (this.endAt) ;
}
}
/** Creates a new Query instance with the options provided. */
function newQuery(path, collectionGroup, explicitOrderBy, filters, limit, limitType, startAt, endAt) {
return new QueryImpl(path, collectionGroup, explicitOrderBy, filters, limit, limitType, startAt, endAt);
}
/** Creates a new Query for a query that matches all documents at `path` */
function newQueryForPath(path) {
return new QueryImpl(path);
}
/**
* Helper to convert a collection group query into a collection query at a
* specific path. This is used when executing collection group queries, since
* we have to split the query into a set of collection queries at multiple
* paths.
*/
function asCollectionQueryAtPath(query, path) {
return new QueryImpl(path,
/*collectionGroup=*/ null, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, query.startAt, query.endAt);
}
/**
* Returns true if this query does not specify any query constraints that
* could remove results.
*/
function queryMatchesAllDocuments(query) {
return (query.filters.length === 0 &&
query.limit === null &&
query.startAt == null &&
query.endAt == null &&
(query.explicitOrderBy.length === 0 ||
(query.explicitOrderBy.length === 1 &&
query.explicitOrderBy[0].field.isKeyField())));
}
// Returns the sorted set of inequality filter fields used in this query.
function getInequalityFilterFields(query) {
let result = new SortedSet(FieldPath$1.comparator);
query.filters.forEach((filter) => {
const subFilters = filter.getFlattenedFilters();
subFilters.forEach((filter) => {
if (filter.isInequality()) {
result = result.add(filter.field);
}
});
});
return result;
}
/**
* Creates a new Query for a collection group query that matches all documents
* within the provided collection group.
*/
function newQueryForCollectionGroup(collectionId) {
return new QueryImpl(ResourcePath.emptyPath(), collectionId);
}
/**
* Returns whether the query matches a single document by path (rather than a
* collection).
*/
function isDocumentQuery$1(query) {
return (DocumentKey.isDocumentKey(query.path) &&
query.collectionGroup === null &&
query.filters.length === 0);
}
/**
* Returns whether the query matches a collection group rather than a specific
* collection.
*/
function isCollectionGroupQuery(query) {
return query.collectionGroup !== null;
}
/**
* Returns the normalized order-by constraint that is used to execute the Query,
* which can be different from the order-by constraints the user provided (e.g.
* the SDK and backend always orders by `__name__`). The normalized order-by
* includes implicit order-bys in addition to the explicit user provided
* order-bys.
*/
function queryNormalizedOrderBy(query) {
const queryImpl = debugCast(query);
if (queryImpl.memoizedNormalizedOrderBy === null) {
queryImpl.memoizedNormalizedOrderBy = [];
const fieldsNormalized = new Set();
// Any explicit order by fields should be added as is.
for (const orderBy of queryImpl.explicitOrderBy) {
queryImpl.memoizedNormalizedOrderBy.push(orderBy);
fieldsNormalized.add(orderBy.field.canonicalString());
}
// The order of the implicit ordering always matches the last explicit order by.
const lastDirection = queryImpl.explicitOrderBy.length > 0
? queryImpl.explicitOrderBy[queryImpl.explicitOrderBy.length - 1].dir
: "asc" /* Direction.ASCENDING */;
// Any inequality fields not explicitly ordered should be implicitly ordered in a lexicographical
// order. When there are multiple inequality filters on the same field, the field should be added
// only once.
// Note: `SortedSet<FieldPath>` sorts the key field before other fields. However, we want the key
// field to be sorted last.
const inequalityFields = getInequalityFilterFields(queryImpl);
inequalityFields.forEach(field => {
if (!fieldsNormalized.has(field.canonicalString()) &&
!field.isKeyField()) {
queryImpl.memoizedNormalizedOrderBy.push(new OrderBy(field, lastDirection));
}
});
// Add the document key field to the last if it is not explicitly ordered.
if (!fieldsNormalized.has(FieldPath$1.keyField().canonicalString())) {
queryImpl.memoizedNormalizedOrderBy.push(new OrderBy(FieldPath$1.keyField(), lastDirection));
}
}
return queryImpl.memoizedNormalizedOrderBy;
}
/**
* Converts this `Query` instance to its corresponding `Target` representation.
*/
function queryToTarget(query) {
const queryImpl = debugCast(query);
if (!queryImpl.memoizedTarget) {
queryImpl.memoizedTarget = _queryToTarget(queryImpl, queryNormalizedOrderBy(query));
}
return queryImpl.memoizedTarget;
}
/**
* Converts this `Query` instance to its corresponding `Target` representation,
* for use within an aggregate query. Unlike targets for non-aggregate queries,
* aggregate query targets do not contain normalized order-bys, they only
* contain explicit order-bys.
*/
function queryToAggregateTarget(query) {
const queryImpl = debugCast(query);
if (!queryImpl.memoizedAggregateTarget) {
// Do not include implicit order-bys for aggregate queries.
queryImpl.memoizedAggregateTarget = _queryToTarget(queryImpl, query.explicitOrderBy);
}
return queryImpl.memoizedAggregateTarget;
}
function _queryToTarget(queryImpl, orderBys) {
if (queryImpl.limitType === "F" /* LimitType.First */) {
return newTarget(queryImpl.path, queryImpl.collectionGroup, orderBys, queryImpl.filters, queryImpl.limit, queryImpl.startAt, queryImpl.endAt);
}
else {
// Flip the orderBy directions since we want the last results
orderBys = orderBys.map(orderBy => {
const dir = orderBy.dir === "desc" /* Direction.DESCENDING */
? "asc" /* Direction.ASCENDING */
: "desc" /* Direction.DESCENDING */;
return new OrderBy(orderBy.field, dir);
});
// We need to swap the cursors to match the now-flipped query ordering.
const startAt = queryImpl.endAt
? new Bound(queryImpl.endAt.position, queryImpl.endAt.inclusive)
: null;
const endAt = queryImpl.startAt
? new Bound(queryImpl.startAt.position, queryImpl.startAt.inclusive)
: null;
// Now return as a LimitType.First query.
return newTarget(queryImpl.path, queryImpl.collectionGroup, orderBys, queryImpl.filters, queryImpl.limit, startAt, endAt);
}
}
function queryWithAddedFilter(query, filter) {
const newFilters = query.filters.concat([filter]);
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), newFilters, query.limit, query.limitType, query.startAt, query.endAt);
}
function queryWithAddedOrderBy(query, orderBy) {
// TODO(dimond): validate that orderBy does not list the same key twice.
const newOrderBy = query.explicitOrderBy.concat([orderBy]);
return new QueryImpl(query.path, query.collectionGroup, newOrderBy, query.filters.slice(), query.limit, query.limitType, query.startAt, query.endAt);
}
function queryWithLimit(query, limit, limitType) {
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), limit, limitType, query.startAt, query.endAt);
}
function queryWithStartAt(query, bound) {
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, bound, query.endAt);
}
function queryWithEndAt(query, bound) {
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, query.startAt, bound);
}
function queryEquals(left, right) {
return (targetEquals(queryToTarget(left), queryToTarget(right)) &&
left.limitType === right.limitType);
}
// TODO(b/29183165): This is used to get a unique string from a query to, for
// example, use as a dictionary key, but the implementation is subject to
// collisions. Make it collision-free.
function canonifyQuery(query) {
return `${canonifyTarget(queryToTarget(query))}|lt:${query.limitType}`;
}
function stringifyQuery(query) {
return `Query(target=${stringifyTarget(queryToTarget(query))}; limitType=${query.limitType})`;
}
/** Returns whether `doc` matches the constraints of `query`. */
function queryMatches(query, doc) {
return (doc.isFoundDocument() &&
queryMatchesPathAndCollectionGroup(query, doc) &&
queryMatchesOrderBy(query, doc) &&
queryMatchesFilters(query, doc) &&
queryMatchesBounds(query, doc));
}
function queryMatchesPathAndCollectionGroup(query, doc) {
const docPath = doc.key.path;
if (query.collectionGroup !== null) {
// NOTE: this.path is currently always empty since we don't expose Collection
// Group queries rooted at a document path yet.
return (doc.key.hasCollectionId(query.collectionGroup) &&
query.path.isPrefixOf(docPath));
}
else if (DocumentKey.isDocumentKey(query.path)) {
// exact match for document queries
return query.path.isEqual(docPath);
}
else {
// shallow ancestor queries by default
return query.path.isImmediateParentOf(docPath);
}
}
/**
* A document must have a value for every ordering clause in order to show up
* in the results.
*/
function queryMatchesOrderBy(query, doc) {
// We must use `queryNormalizedOrderBy()` to get the list of all orderBys (both implicit and explicit).
// Note that for OR queries, orderBy applies to all disjunction terms and implicit orderBys must
// be taken into account. For example, the query "a > 1 || b==1" has an implicit "orderBy a" due
// to the inequality, and is evaluated as "a > 1 orderBy a || b==1 orderBy a".
// A document with content of {b:1} matches the filters, but does not match the orderBy because
// it's missing the field 'a'.
for (const orderBy of queryNormalizedOrderBy(query)) {
// order-by key always matches
if (!orderBy.field.isKeyField() && doc.data.field(orderBy.field) === null) {
return false;
}
}
return true;
}
function queryMatchesFilters(query, doc) {
for (const filter of query.filters) {
if (!filter.matches(doc)) {
return false;
}
}
return true;
}
/** Makes sure a document is within the bounds, if provided. */
function queryMatchesBounds(query, doc) {
if (query.startAt &&
!boundSortsBeforeDocument(query.startAt, queryNormalizedOrderBy(query), doc)) {
return false;
}
if (query.endAt &&
!boundSortsAfterDocument(query.endAt, queryNormalizedOrderBy(query), doc)) {
return false;
}
return true;
}
/**
* Returns the collection group that this query targets.
*
* PORTING NOTE: This is only used in the Web SDK to facilitate multi-tab
* synchronization for query results.
*/
function queryCollectionGroup(query) {
return (query.collectionGroup ||
(query.path.length % 2 === 1
? query.path.lastSegment()
: query.path.get(query.path.length - 2)));
}
/**
* Returns a new comparator function that can be used to compare two documents
* based on the Query's ordering constraint.
*/
function newQueryComparator(query) {
return (d1, d2) => {
let comparedOnKeyField = false;
for (const orderBy of queryNormalizedOrderBy(query)) {
const comp = compareDocs(orderBy, d1, d2);
if (comp !== 0) {
return comp;
}
comparedOnKeyField = comparedOnKeyField || orderBy.field.isKeyField();
}
return 0;
};
}
function compareDocs(orderBy, d1, d2) {
const comparison = orderBy.field.isKeyField()
? DocumentKey.comparator(d1.key, d2.key)
: compareDocumentsByField(orderBy.field, d1, d2);
switch (orderBy.dir) {
case "asc" /* Direction.ASCENDING */:
return comparison;
case "desc" /* Direction.DESCENDING */:
return -1 * comparison;
default:
return fail();
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A map implementation that uses objects as keys. Objects must have an
* associated equals function and must be immutable. Entries in the map are
* stored together with the key being produced from the mapKeyFn. This map
* automatically handles collisions of keys.
*/
class ObjectMap {
constructor(mapKeyFn, equalsFn) {
this.mapKeyFn = mapKeyFn;
this.equalsFn = equalsFn;
/**
* The inner map for a key/value pair. Due to the possibility of collisions we
* keep a list of entries that we do a linear search through to find an actual
* match. Note that collisions should be rare, so we still expect near
* constant time lookups in practice.
*/
this.inner = {};
/** The number of entries stored in the map */
this.innerSize = 0;
}
/** Get a value for this key, or undefined if it does not exist. */
get(key) {
const id = this.mapKeyFn(key);
const matches = this.inner[id];
if (matches === undefined) {
return undefined;
}
for (const [otherKey, value] of matches) {
if (this.equalsFn(otherKey, key)) {
return value;
}
}
return undefined;
}
has(key) {
return this.get(key) !== undefined;
}
/** Put this key and value in the map. */
set(key, value) {
const id = this.mapKeyFn(key);
const matches = this.inner[id];
if (matches === undefined) {
this.inner[id] = [[key, value]];
this.innerSize++;
return;
}
for (let i = 0; i < matches.length; i++) {
if (this.equalsFn(matches[i][0], key)) {
// This is updating an existing entry and does not increase `innerSize`.
matches[i] = [key, value];
return;
}
}
matches.push([key, value]);
this.innerSize++;
}
/**
* Remove this key from the map. Returns a boolean if anything was deleted.
*/
delete(key) {
const id = this.mapKeyFn(key);
const matches = this.inner[id];
if (matches === undefined) {
return false;
}
for (let i = 0; i < matches.length; i++) {
if (this.equalsFn(matches[i][0], key)) {
if (matches.length === 1) {
delete this.inner[id];
}
else {
matches.splice(i, 1);
}
this.innerSize--;
return true;
}
}
return false;
}
forEach(fn) {
forEach(this.inner, (_, entries) => {
for (const [k, v] of entries) {
fn(k, v);
}
});
}
isEmpty() {
return isEmpty(this.inner);
}
size() {
return this.innerSize;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const EMPTY_MUTABLE_DOCUMENT_MAP = new SortedMap(DocumentKey.comparator);
function mutableDocumentMap() {
return EMPTY_MUTABLE_DOCUMENT_MAP;
}
const EMPTY_DOCUMENT_MAP = new SortedMap(DocumentKey.comparator);
function documentMap(...docs) {
let map = EMPTY_DOCUMENT_MAP;
for (const doc of docs) {
map = map.insert(doc.key, doc);
}
return map;
}
function newOverlayedDocumentMap() {
return newDocumentKeyMap();
}
function convertOverlayedDocumentMapToDocumentMap(collection) {
let documents = EMPTY_DOCUMENT_MAP;
collection.forEach((k, v) => (documents = documents.insert(k, v.overlayedDocument)));
return documents;
}
function newOverlayMap() {
return newDocumentKeyMap();
}
function newMutationMap() {
return newDocumentKeyMap();
}
function newDocumentKeyMap() {
return new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
}
const EMPTY_DOCUMENT_VERSION_MAP = new SortedMap(DocumentKey.comparator);
function documentVersionMap() {
return EMPTY_DOCUMENT_VERSION_MAP;
}
const EMPTY_DOCUMENT_KEY_SET = new SortedSet(DocumentKey.comparator);
function documentKeySet(...keys) {
let set = EMPTY_DOCUMENT_KEY_SET;
for (const key of keys) {
set = set.add(key);
}
return set;
}
const EMPTY_TARGET_ID_SET = new SortedSet(primitiveComparator);
function targetIdSet() {
return EMPTY_TARGET_ID_SET;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Returns an DoubleValue for `value` that is encoded based the serializer's
* `useProto3Json` setting.
*/
function toDouble(serializer, value) {
if (serializer.useProto3Json) {
if (isNaN(value)) {
return { doubleValue: 'NaN' };
}
else if (value === Infinity) {
return { doubleValue: 'Infinity' };
}
else if (value === -Infinity) {
return { doubleValue: '-Infinity' };
}
}
return { doubleValue: isNegativeZero(value) ? '-0' : value };
}
/**
* Returns an IntegerValue for `value`.
*/
function toInteger(value) {
return { integerValue: '' + value };
}
/**
* Returns a value for a number that's appropriate to put into a proto.
* The return value is an IntegerValue if it can safely represent the value,
* otherwise a DoubleValue is returned.
*/
function toNumber(serializer, value) {
return isSafeInteger(value) ? toInteger(value) : toDouble(serializer, value);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Used to represent a field transform on a mutation. */
class TransformOperation {
constructor() {
// Make sure that the structural type of `TransformOperation` is unique.
// See https://github.com/microsoft/TypeScript/issues/5451
this._ = undefined;
}
}
/**
* Computes the local transform result against the provided `previousValue`,
* optionally using the provided localWriteTime.
*/
function applyTransformOperationToLocalView(transform, previousValue, localWriteTime) {
if (transform instanceof ServerTimestampTransform) {
return serverTimestamp$1(localWriteTime, previousValue);
}
else if (transform instanceof ArrayUnionTransformOperation) {
return applyArrayUnionTransformOperation(transform, previousValue);
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return applyArrayRemoveTransformOperation(transform, previousValue);
}
else {
return applyNumericIncrementTransformOperationToLocalView(transform, previousValue);
}
}
/**
* Computes a final transform result after the transform has been acknowledged
* by the server, potentially using the server-provided transformResult.
*/
function applyTransformOperationToRemoteDocument(transform, previousValue, transformResult) {
// The server just sends null as the transform result for array operations,
// so we have to calculate a result the same as we do for local
// applications.
if (transform instanceof ArrayUnionTransformOperation) {
return applyArrayUnionTransformOperation(transform, previousValue);
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return applyArrayRemoveTransformOperation(transform, previousValue);
}
return transformResult;
}
/**
* If this transform operation is not idempotent, returns the base value to
* persist for this transform. If a base value is returned, the transform
* operation is always applied to this base value, even if document has
* already been updated.
*
* Base values provide consistent behavior for non-idempotent transforms and
* allow us to return the same latency-compensated value even if the backend
* has already applied the transform operation. The base value is null for
* idempotent transforms, as they can be re-played even if the backend has
* already applied them.
*
* @returns a base value to store along with the mutation, or null for
* idempotent transforms.
*/
function computeTransformOperationBaseValue(transform, previousValue) {
if (transform instanceof NumericIncrementTransformOperation) {
return isNumber(previousValue) ? previousValue : { integerValue: 0 };
}
return null;
}
function transformOperationEquals(left, right) {
if (left instanceof ArrayUnionTransformOperation &&
right instanceof ArrayUnionTransformOperation) {
return arrayEquals(left.elements, right.elements, valueEquals);
}
else if (left instanceof ArrayRemoveTransformOperation &&
right instanceof ArrayRemoveTransformOperation) {
return arrayEquals(left.elements, right.elements, valueEquals);
}
else if (left instanceof NumericIncrementTransformOperation &&
right instanceof NumericIncrementTransformOperation) {
return valueEquals(left.operand, right.operand);
}
return (left instanceof ServerTimestampTransform &&
right instanceof ServerTimestampTransform);
}
/** Transforms a value into a server-generated timestamp. */
class ServerTimestampTransform extends TransformOperation {
}
/** Transforms an array value via a union operation. */
class ArrayUnionTransformOperation extends TransformOperation {
constructor(elements) {
super();
this.elements = elements;
}
}
function applyArrayUnionTransformOperation(transform, previousValue) {
const values = coercedFieldValuesArray(previousValue);
for (const toUnion of transform.elements) {
if (!values.some(element => valueEquals(element, toUnion))) {
values.push(toUnion);
}
}
return { arrayValue: { values } };
}
/** Transforms an array value via a remove operation. */
class ArrayRemoveTransformOperation extends TransformOperation {
constructor(elements) {
super();
this.elements = elements;
}
}
function applyArrayRemoveTransformOperation(transform, previousValue) {
let values = coercedFieldValuesArray(previousValue);
for (const toRemove of transform.elements) {
values = values.filter(element => !valueEquals(element, toRemove));
}
return { arrayValue: { values } };
}
/**
* Implements the backend semantics for locally computed NUMERIC_ADD (increment)
* transforms. Converts all field values to integers or doubles, but unlike the
* backend does not cap integer values at 2^63. Instead, JavaScript number
* arithmetic is used and precision loss can occur for values greater than 2^53.
*/
class NumericIncrementTransformOperation extends TransformOperation {
constructor(serializer, operand) {
super();
this.serializer = serializer;
this.operand = operand;
}
}
function applyNumericIncrementTransformOperationToLocalView(transform, previousValue) {
// PORTING NOTE: Since JavaScript's integer arithmetic is limited to 53 bit
// precision and resolves overflows by reducing precision, we do not
// manually cap overflows at 2^63.
const baseValue = computeTransformOperationBaseValue(transform, previousValue);
const sum = asNumber(baseValue) + asNumber(transform.operand);
if (isInteger(baseValue) && isInteger(transform.operand)) {
return toInteger(sum);
}
else {
return toDouble(transform.serializer, sum);
}
}
function asNumber(value) {
return normalizeNumber(value.integerValue || value.doubleValue);
}
function coercedFieldValuesArray(value) {
return isArray(value) && value.arrayValue.values
? value.arrayValue.values.slice()
: [];
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A field path and the TransformOperation to perform upon it. */
class FieldTransform {
constructor(field, transform) {
this.field = field;
this.transform = transform;
}
}
function fieldTransformEquals(left, right) {
return (left.field.isEqual(right.field) &&
transformOperationEquals(left.transform, right.transform));
}
function fieldTransformsAreEqual(left, right) {
if (left === undefined && right === undefined) {
return true;
}
if (left && right) {
return arrayEquals(left, right, (l, r) => fieldTransformEquals(l, r));
}
return false;
}
/** The result of successfully applying a mutation to the backend. */
class MutationResult {
constructor(
/**
* The version at which the mutation was committed:
*
* - For most operations, this is the updateTime in the WriteResult.
* - For deletes, the commitTime of the WriteResponse (because deletes are
* not stored and have no updateTime).
*
* Note that these versions can be different: No-op writes will not change
* the updateTime even though the commitTime advances.
*/
version,
/**
* The resulting fields returned from the backend after a mutation
* containing field transforms has been committed. Contains one FieldValue
* for each FieldTransform that was in the mutation.
*
* Will be empty if the mutation did not contain any field transforms.
*/
transformResults) {
this.version = version;
this.transformResults = transformResults;
}
}
/**
* Encodes a precondition for a mutation. This follows the model that the
* backend accepts with the special case of an explicit "empty" precondition
* (meaning no precondition).
*/
class Precondition {
constructor(updateTime, exists) {
this.updateTime = updateTime;
this.exists = exists;
}
/** Creates a new empty Precondition. */
static none() {
return new Precondition();
}
/** Creates a new Precondition with an exists flag. */
static exists(exists) {
return new Precondition(undefined, exists);
}
/** Creates a new Precondition based on a version a document exists at. */
static updateTime(version) {
return new Precondition(version);
}
/** Returns whether this Precondition is empty. */
get isNone() {
return this.updateTime === undefined && this.exists === undefined;
}
isEqual(other) {
return (this.exists === other.exists &&
(this.updateTime
? !!other.updateTime && this.updateTime.isEqual(other.updateTime)
: !other.updateTime));
}
}
/** Returns true if the preconditions is valid for the given document. */
function preconditionIsValidForDocument(precondition, document) {
if (precondition.updateTime !== undefined) {
return (document.isFoundDocument() &&
document.version.isEqual(precondition.updateTime));
}
else if (precondition.exists !== undefined) {
return precondition.exists === document.isFoundDocument();
}
else {
return true;
}
}
/**
* A mutation describes a self-contained change to a document. Mutations can
* create, replace, delete, and update subsets of documents.
*
* Mutations not only act on the value of the document but also its version.
*
* For local mutations (mutations that haven't been committed yet), we preserve
* the existing version for Set and Patch mutations. For Delete mutations, we
* reset the version to 0.
*
* Here's the expected transition table.
*
* MUTATION APPLIED TO RESULTS IN
*
* SetMutation Document(v3) Document(v3)
* SetMutation NoDocument(v3) Document(v0)
* SetMutation InvalidDocument(v0) Document(v0)
* PatchMutation Document(v3) Document(v3)
* PatchMutation NoDocument(v3) NoDocument(v3)
* PatchMutation InvalidDocument(v0) UnknownDocument(v3)
* DeleteMutation Document(v3) NoDocument(v0)
* DeleteMutation NoDocument(v3) NoDocument(v0)
* DeleteMutation InvalidDocument(v0) NoDocument(v0)
*
* For acknowledged mutations, we use the updateTime of the WriteResponse as
* the resulting version for Set and Patch mutations. As deletes have no
* explicit update time, we use the commitTime of the WriteResponse for
* Delete mutations.
*
* If a mutation is acknowledged by the backend but fails the precondition check
* locally, we transition to an `UnknownDocument` and rely on Watch to send us
* the updated version.
*
* Field transforms are used only with Patch and Set Mutations. We use the
* `updateTransforms` message to store transforms, rather than the `transforms`s
* messages.
*
* ## Subclassing Notes
*
* Every type of mutation needs to implement its own applyToRemoteDocument() and
* applyToLocalView() to implement the actual behavior of applying the mutation
* to some source document (see `setMutationApplyToRemoteDocument()` for an
* example).
*/
class Mutation {
}
/**
* A utility method to calculate a `Mutation` representing the overlay from the
* final state of the document, and a `FieldMask` representing the fields that
* are mutated by the local mutations.
*/
function calculateOverlayMutation(doc, mask) {
if (!doc.hasLocalMutations || (mask && mask.fields.length === 0)) {
return null;
}
// mask is null when sets or deletes are applied to the current document.
if (mask === null) {
if (doc.isNoDocument()) {
return new DeleteMutation(doc.key, Precondition.none());
}
else {
return new SetMutation(doc.key, doc.data, Precondition.none());
}
}
else {
const docValue = doc.data;
const patchValue = ObjectValue.empty();
let maskSet = new SortedSet(FieldPath$1.comparator);
for (let path of mask.fields) {
if (!maskSet.has(path)) {
let value = docValue.field(path);
// If we are deleting a nested field, we take the immediate parent as
// the mask used to construct the resulting mutation.
// Justification: Nested fields can create parent fields implicitly. If
// only a leaf entry is deleted in later mutations, the parent field
// should still remain, but we may have lost this information.
// Consider mutation (foo.bar 1), then mutation (foo.bar delete()).
// This leaves the final result (foo, {}). Despite the fact that `doc`
// has the correct result, `foo` is not in `mask`, and the resulting
// mutation would miss `foo`.
if (value === null && path.length > 1) {
path = path.popLast();
value = docValue.field(path);
}
if (value === null) {
patchValue.delete(path);
}
else {
patchValue.set(path, value);
}
maskSet = maskSet.add(path);
}
}
return new PatchMutation(doc.key, patchValue, new FieldMask(maskSet.toArray()), Precondition.none());
}
}
/**
* Applies this mutation to the given document for the purposes of computing a
* new remote document. If the input document doesn't match the expected state
* (e.g. it is invalid or outdated), the document type may transition to
* unknown.
*
* @param mutation - The mutation to apply.
* @param document - The document to mutate. The input document can be an
* invalid document if the client has no knowledge of the pre-mutation state
* of the document.
* @param mutationResult - The result of applying the mutation from the backend.
*/
function mutationApplyToRemoteDocument(mutation, document, mutationResult) {
if (mutation instanceof SetMutation) {
setMutationApplyToRemoteDocument(mutation, document, mutationResult);
}
else if (mutation instanceof PatchMutation) {
patchMutationApplyToRemoteDocument(mutation, document, mutationResult);
}
else {
deleteMutationApplyToRemoteDocument(mutation, document, mutationResult);
}
}
/**
* Applies this mutation to the given document for the purposes of computing
* the new local view of a document. If the input document doesn't match the
* expected state, the document is not modified.
*
* @param mutation - The mutation to apply.
* @param document - The document to mutate. The input document can be an
* invalid document if the client has no knowledge of the pre-mutation state
* of the document.
* @param previousMask - The fields that have been updated before applying this mutation.
* @param localWriteTime - A timestamp indicating the local write time of the
* batch this mutation is a part of.
* @returns A `FieldMask` representing the fields that are changed by applying this mutation.
*/
function mutationApplyToLocalView(mutation, document, previousMask, localWriteTime) {
if (mutation instanceof SetMutation) {
return setMutationApplyToLocalView(mutation, document, previousMask, localWriteTime);
}
else if (mutation instanceof PatchMutation) {
return patchMutationApplyToLocalView(mutation, document, previousMask, localWriteTime);
}
else {
return deleteMutationApplyToLocalView(mutation, document, previousMask);
}
}
/**
* If this mutation is not idempotent, returns the base value to persist with
* this mutation. If a base value is returned, the mutation is always applied
* to this base value, even if document has already been updated.
*
* The base value is a sparse object that consists of only the document
* fields for which this mutation contains a non-idempotent transformation
* (e.g. a numeric increment). The provided value guarantees consistent
* behavior for non-idempotent transforms and allow us to return the same
* latency-compensated value even if the backend has already applied the
* mutation. The base value is null for idempotent mutations, as they can be
* re-played even if the backend has already applied them.
*
* @returns a base value to store along with the mutation, or null for
* idempotent mutations.
*/
function mutationExtractBaseValue(mutation, document) {
let baseObject = null;
for (const fieldTransform of mutation.fieldTransforms) {
const existingValue = document.data.field(fieldTransform.field);
const coercedValue = computeTransformOperationBaseValue(fieldTransform.transform, existingValue || null);
if (coercedValue != null) {
if (baseObject === null) {
baseObject = ObjectValue.empty();
}
baseObject.set(fieldTransform.field, coercedValue);
}
}
return baseObject ? baseObject : null;
}
function mutationEquals(left, right) {
if (left.type !== right.type) {
return false;
}
if (!left.key.isEqual(right.key)) {
return false;
}
if (!left.precondition.isEqual(right.precondition)) {
return false;
}
if (!fieldTransformsAreEqual(left.fieldTransforms, right.fieldTransforms)) {
return false;
}
if (left.type === 0 /* MutationType.Set */) {
return left.value.isEqual(right.value);
}
if (left.type === 1 /* MutationType.Patch */) {
return (left.data.isEqual(right.data) &&
left.fieldMask.isEqual(right.fieldMask));
}
return true;
}
/**
* A mutation that creates or replaces the document at the given key with the
* object value contents.
*/
class SetMutation extends Mutation {
constructor(key, value, precondition, fieldTransforms = []) {
super();
this.key = key;
this.value = value;
this.precondition = precondition;
this.fieldTransforms = fieldTransforms;
this.type = 0 /* MutationType.Set */;
}
getFieldMask() {
return null;
}
}
function setMutationApplyToRemoteDocument(mutation, document, mutationResult) {
// Unlike setMutationApplyToLocalView, if we're applying a mutation to a
// remote document the server has accepted the mutation so the precondition
// must have held.
const newData = mutation.value.clone();
const transformResults = serverTransformResults(mutation.fieldTransforms, document, mutationResult.transformResults);
newData.setAll(transformResults);
document
.convertToFoundDocument(mutationResult.version, newData)
.setHasCommittedMutations();
}
function setMutationApplyToLocalView(mutation, document, previousMask, localWriteTime) {
if (!preconditionIsValidForDocument(mutation.precondition, document)) {
// The mutation failed to apply (e.g. a document ID created with add()
// caused a name collision).
return previousMask;
}
const newData = mutation.value.clone();
const transformResults = localTransformResults(mutation.fieldTransforms, localWriteTime, document);
newData.setAll(transformResults);
document
.convertToFoundDocument(document.version, newData)
.setHasLocalMutations();
return null; // SetMutation overwrites all fields.
}
/**
* A mutation that modifies fields of the document at the given key with the
* given values. The values are applied through a field mask:
*
* * When a field is in both the mask and the values, the corresponding field
* is updated.
* * When a field is in neither the mask nor the values, the corresponding
* field is unmodified.
* * When a field is in the mask but not in the values, the corresponding field
* is deleted.
* * When a field is not in the mask but is in the values, the values map is
* ignored.
*/
class PatchMutation extends Mutation {
constructor(key, data, fieldMask, precondition, fieldTransforms = []) {
super();
this.key = key;
this.data = data;
this.fieldMask = fieldMask;
this.precondition = precondition;
this.fieldTransforms = fieldTransforms;
this.type = 1 /* MutationType.Patch */;
}
getFieldMask() {
return this.fieldMask;
}
}
function patchMutationApplyToRemoteDocument(mutation, document, mutationResult) {
if (!preconditionIsValidForDocument(mutation.precondition, document)) {
// Since the mutation was not rejected, we know that the precondition
// matched on the backend. We therefore must not have the expected version
// of the document in our cache and convert to an UnknownDocument with a
// known updateTime.
document.convertToUnknownDocument(mutationResult.version);
return;
}
const transformResults = serverTransformResults(mutation.fieldTransforms, document, mutationResult.transformResults);
const newData = document.data;
newData.setAll(getPatch(mutation));
newData.setAll(transformResults);
document
.convertToFoundDocument(mutationResult.version, newData)
.setHasCommittedMutations();
}
function patchMutationApplyToLocalView(mutation, document, previousMask, localWriteTime) {
if (!preconditionIsValidForDocument(mutation.precondition, document)) {
return previousMask;
}
const transformResults = localTransformResults(mutation.fieldTransforms, localWriteTime, document);
const newData = document.data;
newData.setAll(getPatch(mutation));
newData.setAll(transformResults);
document
.convertToFoundDocument(document.version, newData)
.setHasLocalMutations();
if (previousMask === null) {
return null;
}
return previousMask
.unionWith(mutation.fieldMask.fields)
.unionWith(mutation.fieldTransforms.map(transform => transform.field));
}
/**
* Returns a FieldPath/Value map with the content of the PatchMutation.
*/
function getPatch(mutation) {
const result = new Map();
mutation.fieldMask.fields.forEach(fieldPath => {
if (!fieldPath.isEmpty()) {
const newValue = mutation.data.field(fieldPath);
result.set(fieldPath, newValue);
}
});
return result;
}
/**
* Creates a list of "transform results" (a transform result is a field value
* representing the result of applying a transform) for use after a mutation
* containing transforms has been acknowledged by the server.
*
* @param fieldTransforms - The field transforms to apply the result to.
* @param mutableDocument - The current state of the document after applying all
* previous mutations.
* @param serverTransformResults - The transform results received by the server.
* @returns The transform results list.
*/
function serverTransformResults(fieldTransforms, mutableDocument, serverTransformResults) {
const transformResults = new Map();
hardAssert(fieldTransforms.length === serverTransformResults.length);
for (let i = 0; i < serverTransformResults.length; i++) {
const fieldTransform = fieldTransforms[i];
const transform = fieldTransform.transform;
const previousValue = mutableDocument.data.field(fieldTransform.field);
transformResults.set(fieldTransform.field, applyTransformOperationToRemoteDocument(transform, previousValue, serverTransformResults[i]));
}
return transformResults;
}
/**
* Creates a list of "transform results" (a transform result is a field value
* representing the result of applying a transform) for use when applying a
* transform locally.
*
* @param fieldTransforms - The field transforms to apply the result to.
* @param localWriteTime - The local time of the mutation (used to
* generate ServerTimestampValues).
* @param mutableDocument - The document to apply transforms on.
* @returns The transform results list.
*/
function localTransformResults(fieldTransforms, localWriteTime, mutableDocument) {
const transformResults = new Map();
for (const fieldTransform of fieldTransforms) {
const transform = fieldTransform.transform;
const previousValue = mutableDocument.data.field(fieldTransform.field);
transformResults.set(fieldTransform.field, applyTransformOperationToLocalView(transform, previousValue, localWriteTime));
}
return transformResults;
}
/** A mutation that deletes the document at the given key. */
class DeleteMutation extends Mutation {
constructor(key, precondition) {
super();
this.key = key;
this.precondition = precondition;
this.type = 2 /* MutationType.Delete */;
this.fieldTransforms = [];
}
getFieldMask() {
return null;
}
}
function deleteMutationApplyToRemoteDocument(mutation, document, mutationResult) {
// Unlike applyToLocalView, if we're applying a mutation to a remote
// document the server has accepted the mutation so the precondition must
// have held.
document
.convertToNoDocument(mutationResult.version)
.setHasCommittedMutations();
}
function deleteMutationApplyToLocalView(mutation, document, previousMask) {
if (preconditionIsValidForDocument(mutation.precondition, document)) {
document.convertToNoDocument(document.version).setHasLocalMutations();
return null;
}
return previousMask;
}
/**
* A mutation that verifies the existence of the document at the given key with
* the provided precondition.
*
* The `verify` operation is only used in Transactions, and this class serves
* primarily to facilitate serialization into protos.
*/
class VerifyMutation extends Mutation {
constructor(key, precondition) {
super();
this.key = key;
this.precondition = precondition;
this.type = 3 /* MutationType.Verify */;
this.fieldTransforms = [];
}
getFieldMask() {
return null;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A batch of mutations that will be sent as one unit to the backend.
*/
class MutationBatch {
/**
* @param batchId - The unique ID of this mutation batch.
* @param localWriteTime - The original write time of this mutation.
* @param baseMutations - Mutations that are used to populate the base
* values when this mutation is applied locally. This can be used to locally
* overwrite values that are persisted in the remote document cache. Base
* mutations are never sent to the backend.
* @param mutations - The user-provided mutations in this mutation batch.
* User-provided mutations are applied both locally and remotely on the
* backend.
*/
constructor(batchId, localWriteTime, baseMutations, mutations) {
this.batchId = batchId;
this.localWriteTime = localWriteTime;
this.baseMutations = baseMutations;
this.mutations = mutations;
}
/**
* Applies all the mutations in this MutationBatch to the specified document
* to compute the state of the remote document
*
* @param document - The document to apply mutations to.
* @param batchResult - The result of applying the MutationBatch to the
* backend.
*/
applyToRemoteDocument(document, batchResult) {
const mutationResults = batchResult.mutationResults;
for (let i = 0; i < this.mutations.length; i++) {
const mutation = this.mutations[i];
if (mutation.key.isEqual(document.key)) {
const mutationResult = mutationResults[i];
mutationApplyToRemoteDocument(mutation, document, mutationResult);
}
}
}
/**
* Computes the local view of a document given all the mutations in this
* batch.
*
* @param document - The document to apply mutations to.
* @param mutatedFields - Fields that have been updated before applying this mutation batch.
* @returns A `FieldMask` representing all the fields that are mutated.
*/
applyToLocalView(document, mutatedFields) {
// First, apply the base state. This allows us to apply non-idempotent
// transform against a consistent set of values.
for (const mutation of this.baseMutations) {
if (mutation.key.isEqual(document.key)) {
mutatedFields = mutationApplyToLocalView(mutation, document, mutatedFields, this.localWriteTime);
}
}
// Second, apply all user-provided mutations.
for (const mutation of this.mutations) {
if (mutation.key.isEqual(document.key)) {
mutatedFields = mutationApplyToLocalView(mutation, document, mutatedFields, this.localWriteTime);
}
}
return mutatedFields;
}
/**
* Computes the local view for all provided documents given the mutations in
* this batch. Returns a `DocumentKey` to `Mutation` map which can be used to
* replace all the mutation applications.
*/
applyToLocalDocumentSet(documentMap, documentsWithoutRemoteVersion) {
// TODO(mrschmidt): This implementation is O(n^2). If we apply the mutations
// directly (as done in `applyToLocalView()`), we can reduce the complexity
// to O(n).
const overlays = newMutationMap();
this.mutations.forEach(m => {
const overlayedDocument = documentMap.get(m.key);
// TODO(mutabledocuments): This method should take a MutableDocumentMap
// and we should remove this cast.
const mutableDocument = overlayedDocument.overlayedDocument;
let mutatedFields = this.applyToLocalView(mutableDocument, overlayedDocument.mutatedFields);
// Set mutatedFields to null if the document is only from local mutations.
// This creates a Set or Delete mutation, instead of trying to create a
// patch mutation as the overlay.
mutatedFields = documentsWithoutRemoteVersion.has(m.key)
? null
: mutatedFields;
const overlay = calculateOverlayMutation(mutableDocument, mutatedFields);
if (overlay !== null) {
overlays.set(m.key, overlay);
}
if (!mutableDocument.isValidDocument()) {
mutableDocument.convertToNoDocument(SnapshotVersion.min());
}
});
return overlays;
}
keys() {
return this.mutations.reduce((keys, m) => keys.add(m.key), documentKeySet());
}
isEqual(other) {
return (this.batchId === other.batchId &&
arrayEquals(this.mutations, other.mutations, (l, r) => mutationEquals(l, r)) &&
arrayEquals(this.baseMutations, other.baseMutations, (l, r) => mutationEquals(l, r)));
}
}
/** The result of applying a mutation batch to the backend. */
class MutationBatchResult {
constructor(batch, commitVersion, mutationResults,
/**
* A pre-computed mapping from each mutated document to the resulting
* version.
*/
docVersions) {
this.batch = batch;
this.commitVersion = commitVersion;
this.mutationResults = mutationResults;
this.docVersions = docVersions;
}
/**
* Creates a new MutationBatchResult for the given batch and results. There
* must be one result for each mutation in the batch. This static factory
* caches a document=&gt;version mapping (docVersions).
*/
static from(batch, commitVersion, results) {
hardAssert(batch.mutations.length === results.length);
let versionMap = documentVersionMap();
const mutations = batch.mutations;
for (let i = 0; i < mutations.length; i++) {
versionMap = versionMap.insert(mutations[i].key, results[i].version);
}
return new MutationBatchResult(batch, commitVersion, results, versionMap);
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Representation of an overlay computed by Firestore.
*
* Holds information about a mutation and the largest batch id in Firestore when
* the mutation was created.
*/
class Overlay {
constructor(largestBatchId, mutation) {
this.largestBatchId = largestBatchId;
this.mutation = mutation;
}
getKey() {
return this.mutation.key;
}
isEqual(other) {
return other !== null && this.mutation === other.mutation;
}
toString() {
return `Overlay{
largestBatchId: ${this.largestBatchId},
mutation: ${this.mutation.toString()}
}`;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class ExistenceFilter {
constructor(count, unchangedNames) {
this.count = count;
this.unchangedNames = unchangedNames;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Error Codes describing the different ways GRPC can fail. These are copied
* directly from GRPC's sources here:
*
* https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
*
* Important! The names of these identifiers matter because the string forms
* are used for reverse lookups from the webchannel stream. Do NOT change the
* names of these identifiers or change this into a const enum.
*/
var RpcCode;
(function (RpcCode) {
RpcCode[RpcCode["OK"] = 0] = "OK";
RpcCode[RpcCode["CANCELLED"] = 1] = "CANCELLED";
RpcCode[RpcCode["UNKNOWN"] = 2] = "UNKNOWN";
RpcCode[RpcCode["INVALID_ARGUMENT"] = 3] = "INVALID_ARGUMENT";
RpcCode[RpcCode["DEADLINE_EXCEEDED"] = 4] = "DEADLINE_EXCEEDED";
RpcCode[RpcCode["NOT_FOUND"] = 5] = "NOT_FOUND";
RpcCode[RpcCode["ALREADY_EXISTS"] = 6] = "ALREADY_EXISTS";
RpcCode[RpcCode["PERMISSION_DENIED"] = 7] = "PERMISSION_DENIED";
RpcCode[RpcCode["UNAUTHENTICATED"] = 16] = "UNAUTHENTICATED";
RpcCode[RpcCode["RESOURCE_EXHAUSTED"] = 8] = "RESOURCE_EXHAUSTED";
RpcCode[RpcCode["FAILED_PRECONDITION"] = 9] = "FAILED_PRECONDITION";
RpcCode[RpcCode["ABORTED"] = 10] = "ABORTED";
RpcCode[RpcCode["OUT_OF_RANGE"] = 11] = "OUT_OF_RANGE";
RpcCode[RpcCode["UNIMPLEMENTED"] = 12] = "UNIMPLEMENTED";
RpcCode[RpcCode["INTERNAL"] = 13] = "INTERNAL";
RpcCode[RpcCode["UNAVAILABLE"] = 14] = "UNAVAILABLE";
RpcCode[RpcCode["DATA_LOSS"] = 15] = "DATA_LOSS";
})(RpcCode || (RpcCode = {}));
/**
* Determines whether an error code represents a permanent error when received
* in response to a non-write operation.
*
* See isPermanentWriteError for classifying write errors.
*/
function isPermanentError(code) {
switch (code) {
case Code.OK:
return fail();
case Code.CANCELLED:
case Code.UNKNOWN:
case Code.DEADLINE_EXCEEDED:
case Code.RESOURCE_EXHAUSTED:
case Code.INTERNAL:
case Code.UNAVAILABLE:
// Unauthenticated means something went wrong with our token and we need
// to retry with new credentials which will happen automatically.
case Code.UNAUTHENTICATED:
return false;
case Code.INVALID_ARGUMENT:
case Code.NOT_FOUND:
case Code.ALREADY_EXISTS:
case Code.PERMISSION_DENIED:
case Code.FAILED_PRECONDITION:
// Aborted might be retried in some scenarios, but that is dependant on
// the context and should handled individually by the calling code.
// See https://cloud.google.com/apis/design/errors.
case Code.ABORTED:
case Code.OUT_OF_RANGE:
case Code.UNIMPLEMENTED:
case Code.DATA_LOSS:
return true;
default:
return fail();
}
}
/**
* Determines whether an error code represents a permanent error when received
* in response to a write operation.
*
* Write operations must be handled specially because as of b/119437764, ABORTED
* errors on the write stream should be retried too (even though ABORTED errors
* are not generally retryable).
*
* Note that during the initial handshake on the write stream an ABORTED error
* signals that we should discard our stream token (i.e. it is permanent). This
* means a handshake error should be classified with isPermanentError, above.
*/
function isPermanentWriteError(code) {
return isPermanentError(code) && code !== Code.ABORTED;
}
/**
* Maps an error Code from GRPC status code number, like 0, 1, or 14. These
* are not the same as HTTP status codes.
*
* @returns The Code equivalent to the given GRPC status code. Fails if there
* is no match.
*/
function mapCodeFromRpcCode(code) {
if (code === undefined) {
// This shouldn't normally happen, but in certain error cases (like trying
// to send invalid proto messages) we may get an error with no GRPC code.
logError('GRPC error has no .code');
return Code.UNKNOWN;
}
switch (code) {
case RpcCode.OK:
return Code.OK;
case RpcCode.CANCELLED:
return Code.CANCELLED;
case RpcCode.UNKNOWN:
return Code.UNKNOWN;
case RpcCode.DEADLINE_EXCEEDED:
return Code.DEADLINE_EXCEEDED;
case RpcCode.RESOURCE_EXHAUSTED:
return Code.RESOURCE_EXHAUSTED;
case RpcCode.INTERNAL:
return Code.INTERNAL;
case RpcCode.UNAVAILABLE:
return Code.UNAVAILABLE;
case RpcCode.UNAUTHENTICATED:
return Code.UNAUTHENTICATED;
case RpcCode.INVALID_ARGUMENT:
return Code.INVALID_ARGUMENT;
case RpcCode.NOT_FOUND:
return Code.NOT_FOUND;
case RpcCode.ALREADY_EXISTS:
return Code.ALREADY_EXISTS;
case RpcCode.PERMISSION_DENIED:
return Code.PERMISSION_DENIED;
case RpcCode.FAILED_PRECONDITION:
return Code.FAILED_PRECONDITION;
case RpcCode.ABORTED:
return Code.ABORTED;
case RpcCode.OUT_OF_RANGE:
return Code.OUT_OF_RANGE;
case RpcCode.UNIMPLEMENTED:
return Code.UNIMPLEMENTED;
case RpcCode.DATA_LOSS:
return Code.DATA_LOSS;
default:
return fail();
}
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An error encountered while decoding base64 string.
*/
class Base64DecodeError extends Error {
constructor() {
super(...arguments);
this.name = 'Base64DecodeError';
}
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The global, singleton instance of TestingHooksSpi.
*
* This variable will be `null` in all cases _except_ when running from
* integration tests that have registered callbacks to be notified of events
* that happen during the test execution.
*/
let testingHooksSpi = null;
/**
* Sets the value of the `testingHooksSpi` object.
* @param instance the instance to set.
*/
function setTestingHooksSpi(instance) {
if (testingHooksSpi) {
throw new Error('a TestingHooksSpi instance is already set');
}
testingHooksSpi = instance;
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An instance of the Platform's 'TextEncoder' implementation.
*/
function newTextEncoder() {
return new TextEncoder();
}
/**
* An instance of the Platform's 'TextDecoder' implementation.
*/
function newTextDecoder() {
return new TextDecoder('utf-8');
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const MAX_64_BIT_UNSIGNED_INTEGER = new Integer([0xffffffff, 0xffffffff], 0);
// Hash a string using md5 hashing algorithm.
function getMd5HashValue(value) {
const encodedValue = newTextEncoder().encode(value);
const md5 = new Md5();
md5.update(encodedValue);
return new Uint8Array(md5.digest());
}
// Interpret the 16 bytes array as two 64-bit unsigned integers, encoded using
// 2s complement using little endian.
function get64BitUints(Bytes) {
const dataView = new DataView(Bytes.buffer);
const chunk1 = dataView.getUint32(0, /* littleEndian= */ true);
const chunk2 = dataView.getUint32(4, /* littleEndian= */ true);
const chunk3 = dataView.getUint32(8, /* littleEndian= */ true);
const chunk4 = dataView.getUint32(12, /* littleEndian= */ true);
const integer1 = new Integer([chunk1, chunk2], 0);
const integer2 = new Integer([chunk3, chunk4], 0);
return [integer1, integer2];
}
class BloomFilter {
constructor(bitmap, padding, hashCount) {
this.bitmap = bitmap;
this.padding = padding;
this.hashCount = hashCount;
if (padding < 0 || padding >= 8) {
throw new BloomFilterError(`Invalid padding: ${padding}`);
}
if (hashCount < 0) {
throw new BloomFilterError(`Invalid hash count: ${hashCount}`);
}
if (bitmap.length > 0 && this.hashCount === 0) {
// Only empty bloom filter can have 0 hash count.
throw new BloomFilterError(`Invalid hash count: ${hashCount}`);
}
if (bitmap.length === 0 && padding !== 0) {
// Empty bloom filter should have 0 padding.
throw new BloomFilterError(`Invalid padding when bitmap length is 0: ${padding}`);
}
this.bitCount = bitmap.length * 8 - padding;
// Set the bit count in Integer to avoid repetition in mightContain().
this.bitCountInInteger = Integer.fromNumber(this.bitCount);
}
// Calculate the ith hash value based on the hashed 64bit integers,
// and calculate its corresponding bit index in the bitmap to be checked.
getBitIndex(num1, num2, hashIndex) {
// Calculate hashed value h(i) = h1 + (i * h2).
let hashValue = num1.add(num2.multiply(Integer.fromNumber(hashIndex)));
// Wrap if hash value overflow 64bit.
if (hashValue.compare(MAX_64_BIT_UNSIGNED_INTEGER) === 1) {
hashValue = new Integer([hashValue.getBits(0), hashValue.getBits(1)], 0);
}
return hashValue.modulo(this.bitCountInInteger).toNumber();
}
// Return whether the bit on the given index in the bitmap is set to 1.
isBitSet(index) {
// To retrieve bit n, calculate: (bitmap[n / 8] & (0x01 << (n % 8))).
const byte = this.bitmap[Math.floor(index / 8)];
const offset = index % 8;
return (byte & (0x01 << offset)) !== 0;
}
mightContain(value) {
// Empty bitmap should always return false on membership check.
if (this.bitCount === 0) {
return false;
}
const md5HashedValue = getMd5HashValue(value);
const [hash1, hash2] = get64BitUints(md5HashedValue);
for (let i = 0; i < this.hashCount; i++) {
const index = this.getBitIndex(hash1, hash2, i);
if (!this.isBitSet(index)) {
return false;
}
}
return true;
}
/** Create bloom filter for testing purposes only. */
static create(bitCount, hashCount, contains) {
const padding = bitCount % 8 === 0 ? 0 : 8 - (bitCount % 8);
const bitmap = new Uint8Array(Math.ceil(bitCount / 8));
const bloomFilter = new BloomFilter(bitmap, padding, hashCount);
contains.forEach(item => bloomFilter.insert(item));
return bloomFilter;
}
insert(value) {
if (this.bitCount === 0) {
return;
}
const md5HashedValue = getMd5HashValue(value);
const [hash1, hash2] = get64BitUints(md5HashedValue);
for (let i = 0; i < this.hashCount; i++) {
const index = this.getBitIndex(hash1, hash2, i);
this.setBit(index);
}
}
setBit(index) {
const indexOfByte = Math.floor(index / 8);
const offset = index % 8;
this.bitmap[indexOfByte] |= 0x01 << offset;
}
}
class BloomFilterError extends Error {
constructor() {
super(...arguments);
this.name = 'BloomFilterError';
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An event from the RemoteStore. It is split into targetChanges (changes to the
* state or the set of documents in our watched targets) and documentUpdates
* (changes to the actual documents).
*/
class RemoteEvent {
constructor(
/**
* The snapshot version this event brings us up to, or MIN if not set.
*/
snapshotVersion,
/**
* A map from target to changes to the target. See TargetChange.
*/
targetChanges,
/**
* A map of targets that is known to be inconsistent, and the purpose for
* re-listening. Listens for these targets should be re-established without
* resume tokens.
*/
targetMismatches,
/**
* A set of which documents have changed or been deleted, along with the
* doc's new values (if not deleted).
*/
documentUpdates,
/**
* A set of which document updates are due only to limbo resolution targets.
*/
resolvedLimboDocuments) {
this.snapshotVersion = snapshotVersion;
this.targetChanges = targetChanges;
this.targetMismatches = targetMismatches;
this.documentUpdates = documentUpdates;
this.resolvedLimboDocuments = resolvedLimboDocuments;
}
/**
* HACK: Views require RemoteEvents in order to determine whether the view is
* CURRENT, but secondary tabs don't receive remote events. So this method is
* used to create a synthesized RemoteEvent that can be used to apply a
* CURRENT status change to a View, for queries executed in a different tab.
*/
// PORTING NOTE: Multi-tab only
static createSynthesizedRemoteEventForCurrentChange(targetId, current, resumeToken) {
const targetChanges = new Map();
targetChanges.set(targetId, TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current, resumeToken));
return new RemoteEvent(SnapshotVersion.min(), targetChanges, new SortedMap(primitiveComparator), mutableDocumentMap(), documentKeySet());
}
}
/**
* A TargetChange specifies the set of changes for a specific target as part of
* a RemoteEvent. These changes track which documents are added, modified or
* removed, as well as the target's resume token and whether the target is
* marked CURRENT.
* The actual changes *to* documents are not part of the TargetChange since
* documents may be part of multiple targets.
*/
class TargetChange {
constructor(
/**
* An opaque, server-assigned token that allows watching a query to be resumed
* after disconnecting without retransmitting all the data that matches the
* query. The resume token essentially identifies a point in time from which
* the server should resume sending results.
*/
resumeToken,
/**
* The "current" (synced) status of this target. Note that "current"
* has special meaning in the RPC protocol that implies that a target is
* both up-to-date and consistent with the rest of the watch stream.
*/
current,
/**
* The set of documents that were newly assigned to this target as part of
* this remote event.
*/
addedDocuments,
/**
* The set of documents that were already assigned to this target but received
* an update during this remote event.
*/
modifiedDocuments,
/**
* The set of documents that were removed from this target as part of this
* remote event.
*/
removedDocuments) {
this.resumeToken = resumeToken;
this.current = current;
this.addedDocuments = addedDocuments;
this.modifiedDocuments = modifiedDocuments;
this.removedDocuments = removedDocuments;
}
/**
* This method is used to create a synthesized TargetChanges that can be used to
* apply a CURRENT status change to a View (for queries executed in a different
* tab) or for new queries (to raise snapshots with correct CURRENT status).
*/
static createSynthesizedTargetChangeForCurrentChange(targetId, current, resumeToken) {
return new TargetChange(resumeToken, current, documentKeySet(), documentKeySet(), documentKeySet());
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a changed document and a list of target ids to which this change
* applies.
*
* If document has been deleted NoDocument will be provided.
*/
class DocumentWatchChange {
constructor(
/** The new document applies to all of these targets. */
updatedTargetIds,
/** The new document is removed from all of these targets. */
removedTargetIds,
/** The key of the document for this change. */
key,
/**
* The new document or NoDocument if it was deleted. Is null if the
* document went out of view without the server sending a new document.
*/
newDoc) {
this.updatedTargetIds = updatedTargetIds;
this.removedTargetIds = removedTargetIds;
this.key = key;
this.newDoc = newDoc;
}
}
class ExistenceFilterChange {
constructor(targetId, existenceFilter) {
this.targetId = targetId;
this.existenceFilter = existenceFilter;
}
}
class WatchTargetChange {
constructor(
/** What kind of change occurred to the watch target. */
state,
/** The target IDs that were added/removed/set. */
targetIds,
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken = ByteString.EMPTY_BYTE_STRING,
/** An RPC error indicating why the watch failed. */
cause = null) {
this.state = state;
this.targetIds = targetIds;
this.resumeToken = resumeToken;
this.cause = cause;
}
}
/** Tracks the internal state of a Watch target. */
class TargetState {
constructor() {
/**
* The number of pending responses (adds or removes) that we are waiting on.
* We only consider targets active that have no pending responses.
*/
this.pendingResponses = 0;
/**
* Keeps track of the document changes since the last raised snapshot.
*
* These changes are continuously updated as we receive document updates and
* always reflect the current set of changes against the last issued snapshot.
*/
this.documentChanges = snapshotChangesMap();
/** See public getters for explanations of these fields. */
this._resumeToken = ByteString.EMPTY_BYTE_STRING;
this._current = false;
/**
* Whether this target state should be included in the next snapshot. We
* initialize to true so that newly-added targets are included in the next
* RemoteEvent.
*/
this._hasPendingChanges = true;
}
/**
* Whether this target has been marked 'current'.
*
* 'Current' has special meaning in the RPC protocol: It implies that the
* Watch backend has sent us all changes up to the point at which the target
* was added and that the target is consistent with the rest of the watch
* stream.
*/
get current() {
return this._current;
}
/** The last resume token sent to us for this target. */
get resumeToken() {
return this._resumeToken;
}
/** Whether this target has pending target adds or target removes. */
get isPending() {
return this.pendingResponses !== 0;
}
/** Whether we have modified any state that should trigger a snapshot. */
get hasPendingChanges() {
return this._hasPendingChanges;
}
/**
* Applies the resume token to the TargetChange, but only when it has a new
* value. Empty resumeTokens are discarded.
*/
updateResumeToken(resumeToken) {
if (resumeToken.approximateByteSize() > 0) {
this._hasPendingChanges = true;
this._resumeToken = resumeToken;
}
}
/**
* Creates a target change from the current set of changes.
*
* To reset the document changes after raising this snapshot, call
* `clearPendingChanges()`.
*/
toTargetChange() {
let addedDocuments = documentKeySet();
let modifiedDocuments = documentKeySet();
let removedDocuments = documentKeySet();
this.documentChanges.forEach((key, changeType) => {
switch (changeType) {
case 0 /* ChangeType.Added */:
addedDocuments = addedDocuments.add(key);
break;
case 2 /* ChangeType.Modified */:
modifiedDocuments = modifiedDocuments.add(key);
break;
case 1 /* ChangeType.Removed */:
removedDocuments = removedDocuments.add(key);
break;
default:
fail();
}
});
return new TargetChange(this._resumeToken, this._current, addedDocuments, modifiedDocuments, removedDocuments);
}
/**
* Resets the document changes and sets `hasPendingChanges` to false.
*/
clearPendingChanges() {
this._hasPendingChanges = false;
this.documentChanges = snapshotChangesMap();
}
addDocumentChange(key, changeType) {
this._hasPendingChanges = true;
this.documentChanges = this.documentChanges.insert(key, changeType);
}
removeDocumentChange(key) {
this._hasPendingChanges = true;
this.documentChanges = this.documentChanges.remove(key);
}
recordPendingTargetRequest() {
this.pendingResponses += 1;
}
recordTargetResponse() {
this.pendingResponses -= 1;
hardAssert(this.pendingResponses >= 0);
}
markCurrent() {
this._hasPendingChanges = true;
this._current = true;
}
}
const LOG_TAG$g = 'WatchChangeAggregator';
/**
* A helper class to accumulate watch changes into a RemoteEvent.
*/
class WatchChangeAggregator {
constructor(metadataProvider) {
this.metadataProvider = metadataProvider;
/** The internal state of all tracked targets. */
this.targetStates = new Map();
/** Keeps track of the documents to update since the last raised snapshot. */
this.pendingDocumentUpdates = mutableDocumentMap();
/** A mapping of document keys to their set of target IDs. */
this.pendingDocumentTargetMapping = documentTargetMap();
/**
* A map of targets with existence filter mismatches. These targets are
* known to be inconsistent and their listens needs to be re-established by
* RemoteStore.
*/
this.pendingTargetResets = new SortedMap(primitiveComparator);
}
/**
* Processes and adds the DocumentWatchChange to the current set of changes.
*/
handleDocumentChange(docChange) {
for (const targetId of docChange.updatedTargetIds) {
if (docChange.newDoc && docChange.newDoc.isFoundDocument()) {
this.addDocumentToTarget(targetId, docChange.newDoc);
}
else {
this.removeDocumentFromTarget(targetId, docChange.key, docChange.newDoc);
}
}
for (const targetId of docChange.removedTargetIds) {
this.removeDocumentFromTarget(targetId, docChange.key, docChange.newDoc);
}
}
/** Processes and adds the WatchTargetChange to the current set of changes. */
handleTargetChange(targetChange) {
this.forEachTarget(targetChange, targetId => {
const targetState = this.ensureTargetState(targetId);
switch (targetChange.state) {
case 0 /* WatchTargetChangeState.NoChange */:
if (this.isActiveTarget(targetId)) {
targetState.updateResumeToken(targetChange.resumeToken);
}
break;
case 1 /* WatchTargetChangeState.Added */:
// We need to decrement the number of pending acks needed from watch
// for this targetId.
targetState.recordTargetResponse();
if (!targetState.isPending) {
// We have a freshly added target, so we need to reset any state
// that we had previously. This can happen e.g. when remove and add
// back a target for existence filter mismatches.
targetState.clearPendingChanges();
}
targetState.updateResumeToken(targetChange.resumeToken);
break;
case 2 /* WatchTargetChangeState.Removed */:
// We need to keep track of removed targets to we can post-filter and
// remove any target changes.
// We need to decrement the number of pending acks needed from watch
// for this targetId.
targetState.recordTargetResponse();
if (!targetState.isPending) {
this.removeTarget(targetId);
}
break;
case 3 /* WatchTargetChangeState.Current */:
if (this.isActiveTarget(targetId)) {
targetState.markCurrent();
targetState.updateResumeToken(targetChange.resumeToken);
}
break;
case 4 /* WatchTargetChangeState.Reset */:
if (this.isActiveTarget(targetId)) {
// Reset the target and synthesizes removes for all existing
// documents. The backend will re-add any documents that still
// match the target before it sends the next global snapshot.
this.resetTarget(targetId);
targetState.updateResumeToken(targetChange.resumeToken);
}
break;
default:
fail();
}
});
}
/**
* Iterates over all targetIds that the watch change applies to: either the
* targetIds explicitly listed in the change or the targetIds of all currently
* active targets.
*/
forEachTarget(targetChange, fn) {
if (targetChange.targetIds.length > 0) {
targetChange.targetIds.forEach(fn);
}
else {
this.targetStates.forEach((_, targetId) => {
if (this.isActiveTarget(targetId)) {
fn(targetId);
}
});
}
}
/**
* Handles existence filters and synthesizes deletes for filter mismatches.
* Targets that are invalidated by filter mismatches are added to
* `pendingTargetResets`.
*/
handleExistenceFilter(watchChange) {
const targetId = watchChange.targetId;
const expectedCount = watchChange.existenceFilter.count;
const targetData = this.targetDataForActiveTarget(targetId);
if (targetData) {
const target = targetData.target;
if (targetIsDocumentTarget(target)) {
if (expectedCount === 0) {
// The existence filter told us the document does not exist. We deduce
// that this document does not exist and apply a deleted document to
// our updates. Without applying this deleted document there might be
// another query that will raise this document as part of a snapshot
// until it is resolved, essentially exposing inconsistency between
// queries.
const key = new DocumentKey(target.path);
this.removeDocumentFromTarget(targetId, key, MutableDocument.newNoDocument(key, SnapshotVersion.min()));
}
else {
hardAssert(expectedCount === 1);
}
}
else {
const currentSize = this.getCurrentDocumentCountForTarget(targetId);
// Existence filter mismatch. Mark the documents as being in limbo, and
// raise a snapshot with `isFromCache:true`.
if (currentSize !== expectedCount) {
// Apply bloom filter to identify and mark removed documents.
const bloomFilter = this.parseBloomFilter(watchChange);
const status = bloomFilter
? this.applyBloomFilter(bloomFilter, watchChange, currentSize)
: 1 /* BloomFilterApplicationStatus.Skipped */;
if (status !== 0 /* BloomFilterApplicationStatus.Success */) {
// If bloom filter application fails, we reset the mapping and
// trigger re-run of the query.
this.resetTarget(targetId);
const purpose = status === 2 /* BloomFilterApplicationStatus.FalsePositive */
? "TargetPurposeExistenceFilterMismatchBloom" /* TargetPurpose.ExistenceFilterMismatchBloom */
: "TargetPurposeExistenceFilterMismatch" /* TargetPurpose.ExistenceFilterMismatch */;
this.pendingTargetResets = this.pendingTargetResets.insert(targetId, purpose);
}
testingHooksSpi === null || testingHooksSpi === void 0 ? void 0 : testingHooksSpi.notifyOnExistenceFilterMismatch(createExistenceFilterMismatchInfoForTestingHooks(currentSize, watchChange.existenceFilter, this.metadataProvider.getDatabaseId(), bloomFilter, status));
}
}
}
}
/**
* Parse the bloom filter from the "unchanged_names" field of an existence
* filter.
*/
parseBloomFilter(watchChange) {
const unchangedNames = watchChange.existenceFilter.unchangedNames;
if (!unchangedNames || !unchangedNames.bits) {
return null;
}
const { bits: { bitmap = '', padding = 0 }, hashCount = 0 } = unchangedNames;
let normalizedBitmap;
try {
normalizedBitmap = normalizeByteString(bitmap).toUint8Array();
}
catch (err) {
if (err instanceof Base64DecodeError) {
logWarn('Decoding the base64 bloom filter in existence filter failed (' +
err.message +
'); ignoring the bloom filter and falling back to full re-query.');
return null;
}
else {
throw err;
}
}
let bloomFilter;
try {
// BloomFilter throws error if the inputs are invalid.
bloomFilter = new BloomFilter(normalizedBitmap, padding, hashCount);
}
catch (err) {
if (err instanceof BloomFilterError) {
logWarn('BloomFilter error: ', err);
}
else {
logWarn('Applying bloom filter failed: ', err);
}
return null;
}
if (bloomFilter.bitCount === 0) {
return null;
}
return bloomFilter;
}
/**
* Apply bloom filter to remove the deleted documents, and return the
* application status.
*/
applyBloomFilter(bloomFilter, watchChange, currentCount) {
const expectedCount = watchChange.existenceFilter.count;
const removedDocumentCount = this.filterRemovedDocuments(bloomFilter, watchChange.targetId);
return expectedCount === currentCount - removedDocumentCount
? 0 /* BloomFilterApplicationStatus.Success */
: 2 /* BloomFilterApplicationStatus.FalsePositive */;
}
/**
* Filter out removed documents based on bloom filter membership result and
* return number of documents removed.
*/
filterRemovedDocuments(bloomFilter, targetId) {
const existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
let removalCount = 0;
existingKeys.forEach(key => {
const databaseId = this.metadataProvider.getDatabaseId();
const documentPath = `projects/${databaseId.projectId}` +
`/databases/${databaseId.database}` +
`/documents/${key.path.canonicalString()}`;
if (!bloomFilter.mightContain(documentPath)) {
this.removeDocumentFromTarget(targetId, key, /*updatedDocument=*/ null);
removalCount++;
}
});
return removalCount;
}
/**
* Converts the currently accumulated state into a remote event at the
* provided snapshot version. Resets the accumulated changes before returning.
*/
createRemoteEvent(snapshotVersion) {
const targetChanges = new Map();
this.targetStates.forEach((targetState, targetId) => {
const targetData = this.targetDataForActiveTarget(targetId);
if (targetData) {
if (targetState.current && targetIsDocumentTarget(targetData.target)) {
// Document queries for document that don't exist can produce an empty
// result set. To update our local cache, we synthesize a document
// delete if we have not previously received the document. This
// resolves the limbo state of the document, removing it from
// limboDocumentRefs.
//
// TODO(dimond): Ideally we would have an explicit lookup target
// instead resulting in an explicit delete message and we could
// remove this special logic.
const key = new DocumentKey(targetData.target.path);
if (this.pendingDocumentUpdates.get(key) === null &&
!this.targetContainsDocument(targetId, key)) {
this.removeDocumentFromTarget(targetId, key, MutableDocument.newNoDocument(key, snapshotVersion));
}
}
if (targetState.hasPendingChanges) {
targetChanges.set(targetId, targetState.toTargetChange());
targetState.clearPendingChanges();
}
}
});
let resolvedLimboDocuments = documentKeySet();
// We extract the set of limbo-only document updates as the GC logic
// special-cases documents that do not appear in the target cache.
//
// TODO(gsoltis): Expand on this comment once GC is available in the JS
// client.
this.pendingDocumentTargetMapping.forEach((key, targets) => {
let isOnlyLimboTarget = true;
targets.forEachWhile(targetId => {
const targetData = this.targetDataForActiveTarget(targetId);
if (targetData &&
targetData.purpose !== "TargetPurposeLimboResolution" /* TargetPurpose.LimboResolution */) {
isOnlyLimboTarget = false;
return false;
}
return true;
});
if (isOnlyLimboTarget) {
resolvedLimboDocuments = resolvedLimboDocuments.add(key);
}
});
this.pendingDocumentUpdates.forEach((_, doc) => doc.setReadTime(snapshotVersion));
const remoteEvent = new RemoteEvent(snapshotVersion, targetChanges, this.pendingTargetResets, this.pendingDocumentUpdates, resolvedLimboDocuments);
this.pendingDocumentUpdates = mutableDocumentMap();
this.pendingDocumentTargetMapping = documentTargetMap();
this.pendingTargetResets = new SortedMap(primitiveComparator);
return remoteEvent;
}
/**
* Adds the provided document to the internal list of document updates and
* its document key to the given target's mapping.
*/
// Visible for testing.
addDocumentToTarget(targetId, document) {
if (!this.isActiveTarget(targetId)) {
return;
}
const changeType = this.targetContainsDocument(targetId, document.key)
? 2 /* ChangeType.Modified */
: 0 /* ChangeType.Added */;
const targetState = this.ensureTargetState(targetId);
targetState.addDocumentChange(document.key, changeType);
this.pendingDocumentUpdates = this.pendingDocumentUpdates.insert(document.key, document);
this.pendingDocumentTargetMapping =
this.pendingDocumentTargetMapping.insert(document.key, this.ensureDocumentTargetMapping(document.key).add(targetId));
}
/**
* Removes the provided document from the target mapping. If the
* document no longer matches the target, but the document's state is still
* known (e.g. we know that the document was deleted or we received the change
* that caused the filter mismatch), the new document can be provided
* to update the remote document cache.
*/
// Visible for testing.
removeDocumentFromTarget(targetId, key, updatedDocument) {
if (!this.isActiveTarget(targetId)) {
return;
}
const targetState = this.ensureTargetState(targetId);
if (this.targetContainsDocument(targetId, key)) {
targetState.addDocumentChange(key, 1 /* ChangeType.Removed */);
}
else {
// The document may have entered and left the target before we raised a
// snapshot, so we can just ignore the change.
targetState.removeDocumentChange(key);
}
this.pendingDocumentTargetMapping =
this.pendingDocumentTargetMapping.insert(key, this.ensureDocumentTargetMapping(key).delete(targetId));
if (updatedDocument) {
this.pendingDocumentUpdates = this.pendingDocumentUpdates.insert(key, updatedDocument);
}
}
removeTarget(targetId) {
this.targetStates.delete(targetId);
}
/**
* Returns the current count of documents in the target. This includes both
* the number of documents that the LocalStore considers to be part of the
* target as well as any accumulated changes.
*/
getCurrentDocumentCountForTarget(targetId) {
const targetState = this.ensureTargetState(targetId);
const targetChange = targetState.toTargetChange();
return (this.metadataProvider.getRemoteKeysForTarget(targetId).size +
targetChange.addedDocuments.size -
targetChange.removedDocuments.size);
}
/**
* Increment the number of acks needed from watch before we can consider the
* server to be 'in-sync' with the client's active targets.
*/
recordPendingTargetRequest(targetId) {
// For each request we get we need to record we need a response for it.
const targetState = this.ensureTargetState(targetId);
targetState.recordPendingTargetRequest();
}
ensureTargetState(targetId) {
let result = this.targetStates.get(targetId);
if (!result) {
result = new TargetState();
this.targetStates.set(targetId, result);
}
return result;
}
ensureDocumentTargetMapping(key) {
let targetMapping = this.pendingDocumentTargetMapping.get(key);
if (!targetMapping) {
targetMapping = new SortedSet(primitiveComparator);
this.pendingDocumentTargetMapping =
this.pendingDocumentTargetMapping.insert(key, targetMapping);
}
return targetMapping;
}
/**
* Verifies that the user is still interested in this target (by calling
* `getTargetDataForTarget()`) and that we are not waiting for pending ADDs
* from watch.
*/
isActiveTarget(targetId) {
const targetActive = this.targetDataForActiveTarget(targetId) !== null;
if (!targetActive) {
logDebug(LOG_TAG$g, 'Detected inactive target', targetId);
}
return targetActive;
}
/**
* Returns the TargetData for an active target (i.e. a target that the user
* is still interested in that has no outstanding target change requests).
*/
targetDataForActiveTarget(targetId) {
const targetState = this.targetStates.get(targetId);
return targetState && targetState.isPending
? null
: this.metadataProvider.getTargetDataForTarget(targetId);
}
/**
* Resets the state of a Watch target to its initial state (e.g. sets
* 'current' to false, clears the resume token and removes its target mapping
* from all documents).
*/
resetTarget(targetId) {
this.targetStates.set(targetId, new TargetState());
// Trigger removal for any documents currently mapped to this target.
// These removals will be part of the initial snapshot if Watch does not
// resend these documents.
const existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
existingKeys.forEach(key => {
this.removeDocumentFromTarget(targetId, key, /*updatedDocument=*/ null);
});
}
/**
* Returns whether the LocalStore considers the document to be part of the
* specified target.
*/
targetContainsDocument(targetId, key) {
const existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
return existingKeys.has(key);
}
}
function documentTargetMap() {
return new SortedMap(DocumentKey.comparator);
}
function snapshotChangesMap() {
return new SortedMap(DocumentKey.comparator);
}
function createExistenceFilterMismatchInfoForTestingHooks(localCacheCount, existenceFilter, databaseId, bloomFilter, bloomFilterStatus) {
var _a, _b, _c, _d, _e, _f;
const result = {
localCacheCount,
existenceFilterCount: existenceFilter.count,
databaseId: databaseId.database,
projectId: databaseId.projectId
};
const unchangedNames = existenceFilter.unchangedNames;
if (unchangedNames) {
result.bloomFilter = {
applied: bloomFilterStatus === 0 /* BloomFilterApplicationStatus.Success */,
hashCount: (_a = unchangedNames === null || unchangedNames === void 0 ? void 0 : unchangedNames.hashCount) !== null && _a !== void 0 ? _a : 0,
bitmapLength: (_d = (_c = (_b = unchangedNames === null || unchangedNames === void 0 ? void 0 : unchangedNames.bits) === null || _b === void 0 ? void 0 : _b.bitmap) === null || _c === void 0 ? void 0 : _c.length) !== null && _d !== void 0 ? _d : 0,
padding: (_f = (_e = unchangedNames === null || unchangedNames === void 0 ? void 0 : unchangedNames.bits) === null || _e === void 0 ? void 0 : _e.padding) !== null && _f !== void 0 ? _f : 0,
mightContain: (value) => { var _a; return (_a = bloomFilter === null || bloomFilter === void 0 ? void 0 : bloomFilter.mightContain(value)) !== null && _a !== void 0 ? _a : false; }
};
}
return result;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const DIRECTIONS = (() => {
const dirs = {};
dirs["asc" /* Direction.ASCENDING */] = 'ASCENDING';
dirs["desc" /* Direction.DESCENDING */] = 'DESCENDING';
return dirs;
})();
const OPERATORS = (() => {
const ops = {};
ops["<" /* Operator.LESS_THAN */] = 'LESS_THAN';
ops["<=" /* Operator.LESS_THAN_OR_EQUAL */] = 'LESS_THAN_OR_EQUAL';
ops[">" /* Operator.GREATER_THAN */] = 'GREATER_THAN';
ops[">=" /* Operator.GREATER_THAN_OR_EQUAL */] = 'GREATER_THAN_OR_EQUAL';
ops["==" /* Operator.EQUAL */] = 'EQUAL';
ops["!=" /* Operator.NOT_EQUAL */] = 'NOT_EQUAL';
ops["array-contains" /* Operator.ARRAY_CONTAINS */] = 'ARRAY_CONTAINS';
ops["in" /* Operator.IN */] = 'IN';
ops["not-in" /* Operator.NOT_IN */] = 'NOT_IN';
ops["array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */] = 'ARRAY_CONTAINS_ANY';
return ops;
})();
const COMPOSITE_OPERATORS = (() => {
const ops = {};
ops["and" /* CompositeOperator.AND */] = 'AND';
ops["or" /* CompositeOperator.OR */] = 'OR';
return ops;
})();
function assertPresent(value, description) {
}
/**
* This class generates JsonObject values for the Datastore API suitable for
* sending to either GRPC stub methods or via the JSON/HTTP REST API.
*
* The serializer supports both Protobuf.js and Proto3 JSON formats. By
* setting `useProto3Json` to true, the serializer will use the Proto3 JSON
* format.
*
* For a description of the Proto3 JSON format check
* https://developers.google.com/protocol-buffers/docs/proto3#json
*
* TODO(klimt): We can remove the databaseId argument if we keep the full
* resource name in documents.
*/
class JsonProtoSerializer {
constructor(databaseId, useProto3Json) {
this.databaseId = databaseId;
this.useProto3Json = useProto3Json;
}
}
function fromRpcStatus(status) {
const code = status.code === undefined ? Code.UNKNOWN : mapCodeFromRpcCode(status.code);
return new FirestoreError(code, status.message || '');
}
/**
* Returns a value for a number (or null) that's appropriate to put into
* a google.protobuf.Int32Value proto.
* DO NOT USE THIS FOR ANYTHING ELSE.
* This method cheats. It's typed as returning "number" because that's what
* our generated proto interfaces say Int32Value must be. But GRPC actually
* expects a { value: <number> } struct.
*/
function toInt32Proto(serializer, val) {
if (serializer.useProto3Json || isNullOrUndefined(val)) {
return val;
}
else {
return { value: val };
}
}
/**
* Returns a number (or null) from a google.protobuf.Int32Value proto.
*/
function fromInt32Proto(val) {
let result;
if (typeof val === 'object') {
result = val.value;
}
else {
result = val;
}
return isNullOrUndefined(result) ? null : result;
}
/**
* Returns a value for a Date that's appropriate to put into a proto.
*/
function toTimestamp(serializer, timestamp) {
if (serializer.useProto3Json) {
// Serialize to ISO-8601 date format, but with full nano resolution.
// Since JS Date has only millis, let's only use it for the seconds and
// then manually add the fractions to the end.
const jsDateStr = new Date(timestamp.seconds * 1000).toISOString();
// Remove .xxx frac part and Z in the end.
const strUntilSeconds = jsDateStr.replace(/\.\d*/, '').replace('Z', '');
// Pad the fraction out to 9 digits (nanos).
const nanoStr = ('000000000' + timestamp.nanoseconds).slice(-9);
return `${strUntilSeconds}.${nanoStr}Z`;
}
else {
return {
seconds: '' + timestamp.seconds,
nanos: timestamp.nanoseconds
// eslint-disable-next-line @typescript-eslint/no-explicit-any
};
}
}
function fromTimestamp(date) {
const timestamp = normalizeTimestamp(date);
return new Timestamp(timestamp.seconds, timestamp.nanos);
}
/**
* Returns a value for bytes that's appropriate to put in a proto.
*
* Visible for testing.
*/
function toBytes(serializer, bytes) {
if (serializer.useProto3Json) {
return bytes.toBase64();
}
else {
return bytes.toUint8Array();
}
}
/**
* Returns a ByteString based on the proto string value.
*/
function fromBytes(serializer, value) {
if (serializer.useProto3Json) {
hardAssert(value === undefined || typeof value === 'string');
return ByteString.fromBase64String(value ? value : '');
}
else {
hardAssert(value === undefined || value instanceof Uint8Array);
return ByteString.fromUint8Array(value ? value : new Uint8Array());
}
}
function toVersion(serializer, version) {
return toTimestamp(serializer, version.toTimestamp());
}
function fromVersion(version) {
hardAssert(!!version);
return SnapshotVersion.fromTimestamp(fromTimestamp(version));
}
function toResourceName(databaseId, path) {
return toResourcePath(databaseId, path).canonicalString();
}
function toResourcePath(databaseId, path) {
const resourcePath = fullyQualifiedPrefixPath(databaseId).child('documents');
return path === undefined ? resourcePath : resourcePath.child(path);
}
function fromResourceName(name) {
const resource = ResourcePath.fromString(name);
hardAssert(isValidResourceName(resource));
return resource;
}
function toName(serializer, key) {
return toResourceName(serializer.databaseId, key.path);
}
function fromName(serializer, name) {
const resource = fromResourceName(name);
if (resource.get(1) !== serializer.databaseId.projectId) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different project: ' +
resource.get(1) +
' vs ' +
serializer.databaseId.projectId);
}
if (resource.get(3) !== serializer.databaseId.database) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different database: ' +
resource.get(3) +
' vs ' +
serializer.databaseId.database);
}
return new DocumentKey(extractLocalPathFromResourceName(resource));
}
function toQueryPath(serializer, path) {
return toResourceName(serializer.databaseId, path);
}
function fromQueryPath(name) {
const resourceName = fromResourceName(name);
// In v1beta1 queries for collections at the root did not have a trailing
// "/documents". In v1 all resource paths contain "/documents". Preserve the
// ability to read the v1beta1 form for compatibility with queries persisted
// in the local target cache.
if (resourceName.length === 4) {
return ResourcePath.emptyPath();
}
return extractLocalPathFromResourceName(resourceName);
}
function getEncodedDatabaseId(serializer) {
const path = new ResourcePath([
'projects',
serializer.databaseId.projectId,
'databases',
serializer.databaseId.database
]);
return path.canonicalString();
}
function fullyQualifiedPrefixPath(databaseId) {
return new ResourcePath([
'projects',
databaseId.projectId,
'databases',
databaseId.database
]);
}
function extractLocalPathFromResourceName(resourceName) {
hardAssert(resourceName.length > 4 && resourceName.get(4) === 'documents');
return resourceName.popFirst(5);
}
/** Creates a Document proto from key and fields (but no create/update time) */
function toMutationDocument(serializer, key, fields) {
return {
name: toName(serializer, key),
fields: fields.value.mapValue.fields
};
}
function toDocument(serializer, document) {
return {
name: toName(serializer, document.key),
fields: document.data.value.mapValue.fields,
updateTime: toTimestamp(serializer, document.version.toTimestamp()),
createTime: toTimestamp(serializer, document.createTime.toTimestamp())
};
}
function fromDocument(serializer, document, hasCommittedMutations) {
const key = fromName(serializer, document.name);
const version = fromVersion(document.updateTime);
// If we read a document from persistence that is missing createTime, it's due
// to older SDK versions not storing this information. In such cases, we'll
// set the createTime to zero. This can be removed in the long term.
const createTime = document.createTime
? fromVersion(document.createTime)
: SnapshotVersion.min();
const data = new ObjectValue({ mapValue: { fields: document.fields } });
const result = MutableDocument.newFoundDocument(key, version, createTime, data);
if (hasCommittedMutations) {
result.setHasCommittedMutations();
}
return hasCommittedMutations ? result.setHasCommittedMutations() : result;
}
function fromFound(serializer, doc) {
hardAssert(!!doc.found);
assertPresent(doc.found.name);
assertPresent(doc.found.updateTime);
const key = fromName(serializer, doc.found.name);
const version = fromVersion(doc.found.updateTime);
const createTime = doc.found.createTime
? fromVersion(doc.found.createTime)
: SnapshotVersion.min();
const data = new ObjectValue({ mapValue: { fields: doc.found.fields } });
return MutableDocument.newFoundDocument(key, version, createTime, data);
}
function fromMissing(serializer, result) {
hardAssert(!!result.missing);
hardAssert(!!result.readTime);
const key = fromName(serializer, result.missing);
const version = fromVersion(result.readTime);
return MutableDocument.newNoDocument(key, version);
}
function fromBatchGetDocumentsResponse(serializer, result) {
if ('found' in result) {
return fromFound(serializer, result);
}
else if ('missing' in result) {
return fromMissing(serializer, result);
}
return fail();
}
function fromWatchChange(serializer, change) {
let watchChange;
if ('targetChange' in change) {
assertPresent(change.targetChange);
// proto3 default value is unset in JSON (undefined), so use 'NO_CHANGE'
// if unset
const state = fromWatchTargetChangeState(change.targetChange.targetChangeType || 'NO_CHANGE');
const targetIds = change.targetChange.targetIds || [];
const resumeToken = fromBytes(serializer, change.targetChange.resumeToken);
const causeProto = change.targetChange.cause;
const cause = causeProto && fromRpcStatus(causeProto);
watchChange = new WatchTargetChange(state, targetIds, resumeToken, cause || null);
}
else if ('documentChange' in change) {
assertPresent(change.documentChange);
const entityChange = change.documentChange;
assertPresent(entityChange.document);
assertPresent(entityChange.document.name);
assertPresent(entityChange.document.updateTime);
const key = fromName(serializer, entityChange.document.name);
const version = fromVersion(entityChange.document.updateTime);
const createTime = entityChange.document.createTime
? fromVersion(entityChange.document.createTime)
: SnapshotVersion.min();
const data = new ObjectValue({
mapValue: { fields: entityChange.document.fields }
});
const doc = MutableDocument.newFoundDocument(key, version, createTime, data);
const updatedTargetIds = entityChange.targetIds || [];
const removedTargetIds = entityChange.removedTargetIds || [];
watchChange = new DocumentWatchChange(updatedTargetIds, removedTargetIds, doc.key, doc);
}
else if ('documentDelete' in change) {
assertPresent(change.documentDelete);
const docDelete = change.documentDelete;
assertPresent(docDelete.document);
const key = fromName(serializer, docDelete.document);
const version = docDelete.readTime
? fromVersion(docDelete.readTime)
: SnapshotVersion.min();
const doc = MutableDocument.newNoDocument(key, version);
const removedTargetIds = docDelete.removedTargetIds || [];
watchChange = new DocumentWatchChange([], removedTargetIds, doc.key, doc);
}
else if ('documentRemove' in change) {
assertPresent(change.documentRemove);
const docRemove = change.documentRemove;
assertPresent(docRemove.document);
const key = fromName(serializer, docRemove.document);
const removedTargetIds = docRemove.removedTargetIds || [];
watchChange = new DocumentWatchChange([], removedTargetIds, key, null);
}
else if ('filter' in change) {
// TODO(dimond): implement existence filter parsing with strategy.
assertPresent(change.filter);
const filter = change.filter;
assertPresent(filter.targetId);
const { count = 0, unchangedNames } = filter;
const existenceFilter = new ExistenceFilter(count, unchangedNames);
const targetId = filter.targetId;
watchChange = new ExistenceFilterChange(targetId, existenceFilter);
}
else {
return fail();
}
return watchChange;
}
function fromWatchTargetChangeState(state) {
if (state === 'NO_CHANGE') {
return 0 /* WatchTargetChangeState.NoChange */;
}
else if (state === 'ADD') {
return 1 /* WatchTargetChangeState.Added */;
}
else if (state === 'REMOVE') {
return 2 /* WatchTargetChangeState.Removed */;
}
else if (state === 'CURRENT') {
return 3 /* WatchTargetChangeState.Current */;
}
else if (state === 'RESET') {
return 4 /* WatchTargetChangeState.Reset */;
}
else {
return fail();
}
}
function versionFromListenResponse(change) {
// We have only reached a consistent snapshot for the entire stream if there
// is a read_time set and it applies to all targets (i.e. the list of
// targets is empty). The backend is guaranteed to send such responses.
if (!('targetChange' in change)) {
return SnapshotVersion.min();
}
const targetChange = change.targetChange;
if (targetChange.targetIds && targetChange.targetIds.length) {
return SnapshotVersion.min();
}
if (!targetChange.readTime) {
return SnapshotVersion.min();
}
return fromVersion(targetChange.readTime);
}
function toMutation(serializer, mutation) {
let result;
if (mutation instanceof SetMutation) {
result = {
update: toMutationDocument(serializer, mutation.key, mutation.value)
};
}
else if (mutation instanceof DeleteMutation) {
result = { delete: toName(serializer, mutation.key) };
}
else if (mutation instanceof PatchMutation) {
result = {
update: toMutationDocument(serializer, mutation.key, mutation.data),
updateMask: toDocumentMask(mutation.fieldMask)
};
}
else if (mutation instanceof VerifyMutation) {
result = {
verify: toName(serializer, mutation.key)
};
}
else {
return fail();
}
if (mutation.fieldTransforms.length > 0) {
result.updateTransforms = mutation.fieldTransforms.map(transform => toFieldTransform(serializer, transform));
}
if (!mutation.precondition.isNone) {
result.currentDocument = toPrecondition(serializer, mutation.precondition);
}
return result;
}
function fromMutation(serializer, proto) {
const precondition = proto.currentDocument
? fromPrecondition(proto.currentDocument)
: Precondition.none();
const fieldTransforms = proto.updateTransforms
? proto.updateTransforms.map(transform => fromFieldTransform(serializer, transform))
: [];
if (proto.update) {
assertPresent(proto.update.name);
const key = fromName(serializer, proto.update.name);
const value = new ObjectValue({
mapValue: { fields: proto.update.fields }
});
if (proto.updateMask) {
const fieldMask = fromDocumentMask(proto.updateMask);
return new PatchMutation(key, value, fieldMask, precondition, fieldTransforms);
}
else {
return new SetMutation(key, value, precondition, fieldTransforms);
}
}
else if (proto.delete) {
const key = fromName(serializer, proto.delete);
return new DeleteMutation(key, precondition);
}
else if (proto.verify) {
const key = fromName(serializer, proto.verify);
return new VerifyMutation(key, precondition);
}
else {
return fail();
}
}
function toPrecondition(serializer, precondition) {
if (precondition.updateTime !== undefined) {
return {
updateTime: toVersion(serializer, precondition.updateTime)
};
}
else if (precondition.exists !== undefined) {
return { exists: precondition.exists };
}
else {
return fail();
}
}
function fromPrecondition(precondition) {
if (precondition.updateTime !== undefined) {
return Precondition.updateTime(fromVersion(precondition.updateTime));
}
else if (precondition.exists !== undefined) {
return Precondition.exists(precondition.exists);
}
else {
return Precondition.none();
}
}
function fromWriteResult(proto, commitTime) {
// NOTE: Deletes don't have an updateTime.
let version = proto.updateTime
? fromVersion(proto.updateTime)
: fromVersion(commitTime);
if (version.isEqual(SnapshotVersion.min())) {
// The Firestore Emulator currently returns an update time of 0 for
// deletes of non-existing documents (rather than null). This breaks the
// test "get deleted doc while offline with source=cache" as NoDocuments
// with version 0 are filtered by IndexedDb's RemoteDocumentCache.
// TODO(#2149): Remove this when Emulator is fixed
version = fromVersion(commitTime);
}
return new MutationResult(version, proto.transformResults || []);
}
function fromWriteResults(protos, commitTime) {
if (protos && protos.length > 0) {
hardAssert(commitTime !== undefined);
return protos.map(proto => fromWriteResult(proto, commitTime));
}
else {
return [];
}
}
function toFieldTransform(serializer, fieldTransform) {
const transform = fieldTransform.transform;
if (transform instanceof ServerTimestampTransform) {
return {
fieldPath: fieldTransform.field.canonicalString(),
setToServerValue: 'REQUEST_TIME'
};
}
else if (transform instanceof ArrayUnionTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
appendMissingElements: {
values: transform.elements
}
};
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
removeAllFromArray: {
values: transform.elements
}
};
}
else if (transform instanceof NumericIncrementTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
increment: transform.operand
};
}
else {
throw fail();
}
}
function fromFieldTransform(serializer, proto) {
let transform = null;
if ('setToServerValue' in proto) {
hardAssert(proto.setToServerValue === 'REQUEST_TIME');
transform = new ServerTimestampTransform();
}
else if ('appendMissingElements' in proto) {
const values = proto.appendMissingElements.values || [];
transform = new ArrayUnionTransformOperation(values);
}
else if ('removeAllFromArray' in proto) {
const values = proto.removeAllFromArray.values || [];
transform = new ArrayRemoveTransformOperation(values);
}
else if ('increment' in proto) {
transform = new NumericIncrementTransformOperation(serializer, proto.increment);
}
else {
fail();
}
const fieldPath = FieldPath$1.fromServerFormat(proto.fieldPath);
return new FieldTransform(fieldPath, transform);
}
function toDocumentsTarget(serializer, target) {
return { documents: [toQueryPath(serializer, target.path)] };
}
function fromDocumentsTarget(documentsTarget) {
const count = documentsTarget.documents.length;
hardAssert(count === 1);
const name = documentsTarget.documents[0];
return queryToTarget(newQueryForPath(fromQueryPath(name)));
}
function toQueryTarget(serializer, target) {
// Dissect the path into parent, collectionId, and optional key filter.
const queryTarget = { structuredQuery: {} };
const path = target.path;
let parent;
if (target.collectionGroup !== null) {
parent = path;
queryTarget.structuredQuery.from = [
{
collectionId: target.collectionGroup,
allDescendants: true
}
];
}
else {
parent = path.popLast();
queryTarget.structuredQuery.from = [{ collectionId: path.lastSegment() }];
}
queryTarget.parent = toQueryPath(serializer, parent);
const where = toFilters(target.filters);
if (where) {
queryTarget.structuredQuery.where = where;
}
const orderBy = toOrder(target.orderBy);
if (orderBy) {
queryTarget.structuredQuery.orderBy = orderBy;
}
const limit = toInt32Proto(serializer, target.limit);
if (limit !== null) {
queryTarget.structuredQuery.limit = limit;
}
if (target.startAt) {
queryTarget.structuredQuery.startAt = toStartAtCursor(target.startAt);
}
if (target.endAt) {
queryTarget.structuredQuery.endAt = toEndAtCursor(target.endAt);
}
return { queryTarget, parent };
}
function toRunAggregationQueryRequest(serializer, target, aggregates) {
const { queryTarget, parent } = toQueryTarget(serializer, target);
const aliasMap = {};
const aggregations = [];
let aggregationNum = 0;
aggregates.forEach(aggregate => {
// Map all client-side aliases to a unique short-form
// alias. This avoids issues with client-side aliases that
// exceed the 1500-byte string size limit.
const serverAlias = `aggregate_${aggregationNum++}`;
aliasMap[serverAlias] = aggregate.alias;
if (aggregate.aggregateType === 'count') {
aggregations.push({
alias: serverAlias,
count: {}
});
}
else if (aggregate.aggregateType === 'avg') {
aggregations.push({
alias: serverAlias,
avg: {
field: toFieldPathReference(aggregate.fieldPath)
}
});
}
else if (aggregate.aggregateType === 'sum') {
aggregations.push({
alias: serverAlias,
sum: {
field: toFieldPathReference(aggregate.fieldPath)
}
});
}
});
return {
request: {
structuredAggregationQuery: {
aggregations,
structuredQuery: queryTarget.structuredQuery
},
parent: queryTarget.parent
},
aliasMap,
parent
};
}
function convertQueryTargetToQuery(target) {
let path = fromQueryPath(target.parent);
const query = target.structuredQuery;
const fromCount = query.from ? query.from.length : 0;
let collectionGroup = null;
if (fromCount > 0) {
hardAssert(fromCount === 1);
const from = query.from[0];
if (from.allDescendants) {
collectionGroup = from.collectionId;
}
else {
path = path.child(from.collectionId);
}
}
let filterBy = [];
if (query.where) {
filterBy = fromFilters(query.where);
}
let orderBy = [];
if (query.orderBy) {
orderBy = fromOrder(query.orderBy);
}
let limit = null;
if (query.limit) {
limit = fromInt32Proto(query.limit);
}
let startAt = null;
if (query.startAt) {
startAt = fromStartAtCursor(query.startAt);
}
let endAt = null;
if (query.endAt) {
endAt = fromEndAtCursor(query.endAt);
}
return newQuery(path, collectionGroup, orderBy, filterBy, limit, "F" /* LimitType.First */, startAt, endAt);
}
function fromQueryTarget(target) {
return queryToTarget(convertQueryTargetToQuery(target));
}
function toListenRequestLabels(serializer, targetData) {
const value = toLabel(targetData.purpose);
if (value == null) {
return null;
}
else {
return {
'goog-listen-tags': value
};
}
}
function toLabel(purpose) {
switch (purpose) {
case "TargetPurposeListen" /* TargetPurpose.Listen */:
return null;
case "TargetPurposeExistenceFilterMismatch" /* TargetPurpose.ExistenceFilterMismatch */:
return 'existence-filter-mismatch';
case "TargetPurposeExistenceFilterMismatchBloom" /* TargetPurpose.ExistenceFilterMismatchBloom */:
return 'existence-filter-mismatch-bloom';
case "TargetPurposeLimboResolution" /* TargetPurpose.LimboResolution */:
return 'limbo-document';
default:
return fail();
}
}
function toTarget(serializer, targetData) {
let result;
const target = targetData.target;
if (targetIsDocumentTarget(target)) {
result = { documents: toDocumentsTarget(serializer, target) };
}
else {
result = { query: toQueryTarget(serializer, target).queryTarget };
}
result.targetId = targetData.targetId;
if (targetData.resumeToken.approximateByteSize() > 0) {
result.resumeToken = toBytes(serializer, targetData.resumeToken);
const expectedCount = toInt32Proto(serializer, targetData.expectedCount);
if (expectedCount !== null) {
result.expectedCount = expectedCount;
}
}
else if (targetData.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
// TODO(wuandy): Consider removing above check because it is most likely true.
// Right now, many tests depend on this behaviour though (leaving min() out
// of serialization).
result.readTime = toTimestamp(serializer, targetData.snapshotVersion.toTimestamp());
const expectedCount = toInt32Proto(serializer, targetData.expectedCount);
if (expectedCount !== null) {
result.expectedCount = expectedCount;
}
}
return result;
}
function toFilters(filters) {
if (filters.length === 0) {
return;
}
return toFilter(CompositeFilter.create(filters, "and" /* CompositeOperator.AND */));
}
function fromFilters(filter) {
const result = fromFilter(filter);
if (result instanceof CompositeFilter &&
compositeFilterIsFlatConjunction(result)) {
return result.getFilters();
}
return [result];
}
function fromFilter(filter) {
if (filter.unaryFilter !== undefined) {
return fromUnaryFilter(filter);
}
else if (filter.fieldFilter !== undefined) {
return fromFieldFilter(filter);
}
else if (filter.compositeFilter !== undefined) {
return fromCompositeFilter(filter);
}
else {
return fail();
}
}
function toOrder(orderBys) {
if (orderBys.length === 0) {
return;
}
return orderBys.map(order => toPropertyOrder(order));
}
function fromOrder(orderBys) {
return orderBys.map(order => fromPropertyOrder(order));
}
function toStartAtCursor(cursor) {
return {
before: cursor.inclusive,
values: cursor.position
};
}
function toEndAtCursor(cursor) {
return {
before: !cursor.inclusive,
values: cursor.position
};
}
function fromStartAtCursor(cursor) {
const inclusive = !!cursor.before;
const position = cursor.values || [];
return new Bound(position, inclusive);
}
function fromEndAtCursor(cursor) {
const inclusive = !cursor.before;
const position = cursor.values || [];
return new Bound(position, inclusive);
}
// visible for testing
function toDirection(dir) {
return DIRECTIONS[dir];
}
// visible for testing
function fromDirection(dir) {
switch (dir) {
case 'ASCENDING':
return "asc" /* Direction.ASCENDING */;
case 'DESCENDING':
return "desc" /* Direction.DESCENDING */;
default:
return undefined;
}
}
// visible for testing
function toOperatorName(op) {
return OPERATORS[op];
}
function toCompositeOperatorName(op) {
return COMPOSITE_OPERATORS[op];
}
function fromOperatorName(op) {
switch (op) {
case 'EQUAL':
return "==" /* Operator.EQUAL */;
case 'NOT_EQUAL':
return "!=" /* Operator.NOT_EQUAL */;
case 'GREATER_THAN':
return ">" /* Operator.GREATER_THAN */;
case 'GREATER_THAN_OR_EQUAL':
return ">=" /* Operator.GREATER_THAN_OR_EQUAL */;
case 'LESS_THAN':
return "<" /* Operator.LESS_THAN */;
case 'LESS_THAN_OR_EQUAL':
return "<=" /* Operator.LESS_THAN_OR_EQUAL */;
case 'ARRAY_CONTAINS':
return "array-contains" /* Operator.ARRAY_CONTAINS */;
case 'IN':
return "in" /* Operator.IN */;
case 'NOT_IN':
return "not-in" /* Operator.NOT_IN */;
case 'ARRAY_CONTAINS_ANY':
return "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
case 'OPERATOR_UNSPECIFIED':
return fail();
default:
return fail();
}
}
function fromCompositeOperatorName(op) {
switch (op) {
case 'AND':
return "and" /* CompositeOperator.AND */;
case 'OR':
return "or" /* CompositeOperator.OR */;
default:
return fail();
}
}
function toFieldPathReference(path) {
return { fieldPath: path.canonicalString() };
}
function fromFieldPathReference(fieldReference) {
return FieldPath$1.fromServerFormat(fieldReference.fieldPath);
}
// visible for testing
function toPropertyOrder(orderBy) {
return {
field: toFieldPathReference(orderBy.field),
direction: toDirection(orderBy.dir)
};
}
function fromPropertyOrder(orderBy) {
return new OrderBy(fromFieldPathReference(orderBy.field), fromDirection(orderBy.direction));
}
// visible for testing
function toFilter(filter) {
if (filter instanceof FieldFilter) {
return toUnaryOrFieldFilter(filter);
}
else if (filter instanceof CompositeFilter) {
return toCompositeFilter(filter);
}
else {
return fail();
}
}
function toCompositeFilter(filter) {
const protos = filter.getFilters().map(filter => toFilter(filter));
if (protos.length === 1) {
return protos[0];
}
return {
compositeFilter: {
op: toCompositeOperatorName(filter.op),
filters: protos
}
};
}
function toUnaryOrFieldFilter(filter) {
if (filter.op === "==" /* Operator.EQUAL */) {
if (isNanValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NAN'
}
};
}
else if (isNullValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NULL'
}
};
}
}
else if (filter.op === "!=" /* Operator.NOT_EQUAL */) {
if (isNanValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NOT_NAN'
}
};
}
else if (isNullValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NOT_NULL'
}
};
}
}
return {
fieldFilter: {
field: toFieldPathReference(filter.field),
op: toOperatorName(filter.op),
value: filter.value
}
};
}
function fromUnaryFilter(filter) {
switch (filter.unaryFilter.op) {
case 'IS_NAN':
const nanField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(nanField, "==" /* Operator.EQUAL */, {
doubleValue: NaN
});
case 'IS_NULL':
const nullField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(nullField, "==" /* Operator.EQUAL */, {
nullValue: 'NULL_VALUE'
});
case 'IS_NOT_NAN':
const notNanField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(notNanField, "!=" /* Operator.NOT_EQUAL */, {
doubleValue: NaN
});
case 'IS_NOT_NULL':
const notNullField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(notNullField, "!=" /* Operator.NOT_EQUAL */, {
nullValue: 'NULL_VALUE'
});
case 'OPERATOR_UNSPECIFIED':
return fail();
default:
return fail();
}
}
function fromFieldFilter(filter) {
return FieldFilter.create(fromFieldPathReference(filter.fieldFilter.field), fromOperatorName(filter.fieldFilter.op), filter.fieldFilter.value);
}
function fromCompositeFilter(filter) {
return CompositeFilter.create(filter.compositeFilter.filters.map(filter => fromFilter(filter)), fromCompositeOperatorName(filter.compositeFilter.op));
}
function toDocumentMask(fieldMask) {
const canonicalFields = [];
fieldMask.fields.forEach(field => canonicalFields.push(field.canonicalString()));
return {
fieldPaths: canonicalFields
};
}
function fromDocumentMask(proto) {
const paths = proto.fieldPaths || [];
return new FieldMask(paths.map(path => FieldPath$1.fromServerFormat(path)));
}
function isValidResourceName(path) {
// Resource names have at least 4 components (project ID, database ID)
return (path.length >= 4 &&
path.get(0) === 'projects' &&
path.get(2) === 'databases');
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable set of metadata that the local store tracks for each target.
*/
class TargetData {
constructor(
/** The target being listened to. */
target,
/**
* The target ID to which the target corresponds; Assigned by the
* LocalStore for user listens and by the SyncEngine for limbo watches.
*/
targetId,
/** The purpose of the target. */
purpose,
/**
* The sequence number of the last transaction during which this target data
* was modified.
*/
sequenceNumber,
/** The latest snapshot version seen for this target. */
snapshotVersion = SnapshotVersion.min(),
/**
* The maximum snapshot version at which the associated view
* contained no limbo documents.
*/
lastLimboFreeSnapshotVersion = SnapshotVersion.min(),
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken = ByteString.EMPTY_BYTE_STRING,
/**
* The number of documents that last matched the query at the resume token or
* read time. Documents are counted only when making a listen request with
* resume token or read time, otherwise, keep it null.
*/
expectedCount = null) {
this.target = target;
this.targetId = targetId;
this.purpose = purpose;
this.sequenceNumber = sequenceNumber;
this.snapshotVersion = snapshotVersion;
this.lastLimboFreeSnapshotVersion = lastLimboFreeSnapshotVersion;
this.resumeToken = resumeToken;
this.expectedCount = expectedCount;
}
/** Creates a new target data instance with an updated sequence number. */
withSequenceNumber(sequenceNumber) {
return new TargetData(this.target, this.targetId, this.purpose, sequenceNumber, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken, this.expectedCount);
}
/**
* Creates a new target data instance with an updated resume token and
* snapshot version.
*/
withResumeToken(resumeToken, snapshotVersion) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, snapshotVersion, this.lastLimboFreeSnapshotVersion, resumeToken,
/* expectedCount= */ null);
}
/**
* Creates a new target data instance with an updated expected count.
*/
withExpectedCount(expectedCount) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken, expectedCount);
}
/**
* Creates a new target data instance with an updated last limbo free
* snapshot version number.
*/
withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, lastLimboFreeSnapshotVersion, this.resumeToken, this.expectedCount);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Serializer for values stored in the LocalStore. */
class LocalSerializer {
constructor(remoteSerializer) {
this.remoteSerializer = remoteSerializer;
}
}
/** Decodes a remote document from storage locally to a Document. */
function fromDbRemoteDocument(localSerializer, remoteDoc) {
let doc;
if (remoteDoc.document) {
doc = fromDocument(localSerializer.remoteSerializer, remoteDoc.document, !!remoteDoc.hasCommittedMutations);
}
else if (remoteDoc.noDocument) {
const key = DocumentKey.fromSegments(remoteDoc.noDocument.path);
const version = fromDbTimestamp(remoteDoc.noDocument.readTime);
doc = MutableDocument.newNoDocument(key, version);
if (remoteDoc.hasCommittedMutations) {
doc.setHasCommittedMutations();
}
}
else if (remoteDoc.unknownDocument) {
const key = DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
const version = fromDbTimestamp(remoteDoc.unknownDocument.version);
doc = MutableDocument.newUnknownDocument(key, version);
}
else {
return fail();
}
if (remoteDoc.readTime) {
doc.setReadTime(fromDbTimestampKey(remoteDoc.readTime));
}
return doc;
}
/** Encodes a document for storage locally. */
function toDbRemoteDocument(localSerializer, document) {
const key = document.key;
const remoteDoc = {
prefixPath: key.getCollectionPath().popLast().toArray(),
collectionGroup: key.collectionGroup,
documentId: key.path.lastSegment(),
readTime: toDbTimestampKey(document.readTime),
hasCommittedMutations: document.hasCommittedMutations
};
if (document.isFoundDocument()) {
remoteDoc.document = toDocument(localSerializer.remoteSerializer, document);
}
else if (document.isNoDocument()) {
remoteDoc.noDocument = {
path: key.path.toArray(),
readTime: toDbTimestamp(document.version)
};
}
else if (document.isUnknownDocument()) {
remoteDoc.unknownDocument = {
path: key.path.toArray(),
version: toDbTimestamp(document.version)
};
}
else {
return fail();
}
return remoteDoc;
}
function toDbTimestampKey(snapshotVersion) {
const timestamp = snapshotVersion.toTimestamp();
return [timestamp.seconds, timestamp.nanoseconds];
}
function fromDbTimestampKey(dbTimestampKey) {
const timestamp = new Timestamp(dbTimestampKey[0], dbTimestampKey[1]);
return SnapshotVersion.fromTimestamp(timestamp);
}
function toDbTimestamp(snapshotVersion) {
const timestamp = snapshotVersion.toTimestamp();
return { seconds: timestamp.seconds, nanoseconds: timestamp.nanoseconds };
}
function fromDbTimestamp(dbTimestamp) {
const timestamp = new Timestamp(dbTimestamp.seconds, dbTimestamp.nanoseconds);
return SnapshotVersion.fromTimestamp(timestamp);
}
/** Encodes a batch of mutations into a DbMutationBatch for local storage. */
function toDbMutationBatch(localSerializer, userId, batch) {
const serializedBaseMutations = batch.baseMutations.map(m => toMutation(localSerializer.remoteSerializer, m));
const serializedMutations = batch.mutations.map(m => toMutation(localSerializer.remoteSerializer, m));
return {
userId,
batchId: batch.batchId,
localWriteTimeMs: batch.localWriteTime.toMillis(),
baseMutations: serializedBaseMutations,
mutations: serializedMutations
};
}
/** Decodes a DbMutationBatch into a MutationBatch */
function fromDbMutationBatch(localSerializer, dbBatch) {
const baseMutations = (dbBatch.baseMutations || []).map(m => fromMutation(localSerializer.remoteSerializer, m));
// Squash old transform mutations into existing patch or set mutations.
// The replacement of representing `transforms` with `update_transforms`
// on the SDK means that old `transform` mutations stored in IndexedDB need
// to be updated to `update_transforms`.
// TODO(b/174608374): Remove this code once we perform a schema migration.
for (let i = 0; i < dbBatch.mutations.length - 1; ++i) {
const currentMutation = dbBatch.mutations[i];
const hasTransform = i + 1 < dbBatch.mutations.length &&
dbBatch.mutations[i + 1].transform !== undefined;
if (hasTransform) {
const transformMutation = dbBatch.mutations[i + 1];
currentMutation.updateTransforms =
transformMutation.transform.fieldTransforms;
dbBatch.mutations.splice(i + 1, 1);
++i;
}
}
const mutations = dbBatch.mutations.map(m => fromMutation(localSerializer.remoteSerializer, m));
const timestamp = Timestamp.fromMillis(dbBatch.localWriteTimeMs);
return new MutationBatch(dbBatch.batchId, timestamp, baseMutations, mutations);
}
/** Decodes a DbTarget into TargetData */
function fromDbTarget(dbTarget) {
const version = fromDbTimestamp(dbTarget.readTime);
const lastLimboFreeSnapshotVersion = dbTarget.lastLimboFreeSnapshotVersion !== undefined
? fromDbTimestamp(dbTarget.lastLimboFreeSnapshotVersion)
: SnapshotVersion.min();
let target;
if (isDocumentQuery(dbTarget.query)) {
target = fromDocumentsTarget(dbTarget.query);
}
else {
target = fromQueryTarget(dbTarget.query);
}
return new TargetData(target, dbTarget.targetId, "TargetPurposeListen" /* TargetPurpose.Listen */, dbTarget.lastListenSequenceNumber, version, lastLimboFreeSnapshotVersion, ByteString.fromBase64String(dbTarget.resumeToken));
}
/** Encodes TargetData into a DbTarget for storage locally. */
function toDbTarget(localSerializer, targetData) {
const dbTimestamp = toDbTimestamp(targetData.snapshotVersion);
const dbLastLimboFreeTimestamp = toDbTimestamp(targetData.lastLimboFreeSnapshotVersion);
let queryProto;
if (targetIsDocumentTarget(targetData.target)) {
queryProto = toDocumentsTarget(localSerializer.remoteSerializer, targetData.target);
}
else {
queryProto = toQueryTarget(localSerializer.remoteSerializer, targetData.target).queryTarget;
}
// We can't store the resumeToken as a ByteString in IndexedDb, so we
// convert it to a base64 string for storage.
const resumeToken = targetData.resumeToken.toBase64();
// lastListenSequenceNumber is always 0 until we do real GC.
return {
targetId: targetData.targetId,
canonicalId: canonifyTarget(targetData.target),
readTime: dbTimestamp,
resumeToken,
lastListenSequenceNumber: targetData.sequenceNumber,
lastLimboFreeSnapshotVersion: dbLastLimboFreeTimestamp,
query: queryProto
};
}
/**
* A helper function for figuring out what kind of query has been stored.
*/
function isDocumentQuery(dbQuery) {
return dbQuery.documents !== undefined;
}
/** Encodes a DbBundle to a BundleMetadata object. */
function fromDbBundle(dbBundle) {
return {
id: dbBundle.bundleId,
createTime: fromDbTimestamp(dbBundle.createTime),
version: dbBundle.version
};
}
/** Encodes a BundleMetadata to a DbBundle. */
function toDbBundle(metadata) {
return {
bundleId: metadata.id,
createTime: toDbTimestamp(fromVersion(metadata.createTime)),
version: metadata.version
};
}
/** Encodes a DbNamedQuery to a NamedQuery. */
function fromDbNamedQuery(dbNamedQuery) {
return {
name: dbNamedQuery.name,
query: fromBundledQuery(dbNamedQuery.bundledQuery),
readTime: fromDbTimestamp(dbNamedQuery.readTime)
};
}
/** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */
function toDbNamedQuery(query) {
return {
name: query.name,
readTime: toDbTimestamp(fromVersion(query.readTime)),
bundledQuery: query.bundledQuery
};
}
/**
* Encodes a `BundledQuery` from bundle proto to a Query object.
*
* This reconstructs the original query used to build the bundle being loaded,
* including features exists only in SDKs (for example: limit-to-last).
*/
function fromBundledQuery(bundledQuery) {
const query = convertQueryTargetToQuery({
parent: bundledQuery.parent,
structuredQuery: bundledQuery.structuredQuery
});
if (bundledQuery.limitType === 'LAST') {
return queryWithLimit(query, query.limit, "L" /* LimitType.Last */);
}
return query;
}
/** Encodes a NamedQuery proto object to a NamedQuery model object. */
function fromProtoNamedQuery(namedQuery) {
return {
name: namedQuery.name,
query: fromBundledQuery(namedQuery.bundledQuery),
readTime: fromVersion(namedQuery.readTime)
};
}
/** Decodes a BundleMetadata proto into a BundleMetadata object. */
function fromBundleMetadata(metadata) {
return {
id: metadata.id,
version: metadata.version,
createTime: fromVersion(metadata.createTime)
};
}
/** Encodes a DbDocumentOverlay object to an Overlay model object. */
function fromDbDocumentOverlay(localSerializer, dbDocumentOverlay) {
return new Overlay(dbDocumentOverlay.largestBatchId, fromMutation(localSerializer.remoteSerializer, dbDocumentOverlay.overlayMutation));
}
/** Decodes an Overlay model object into a DbDocumentOverlay object. */
function toDbDocumentOverlay(localSerializer, userId, overlay) {
const [_, collectionPath, documentId] = toDbDocumentOverlayKey(userId, overlay.mutation.key);
return {
userId,
collectionPath,
documentId,
collectionGroup: overlay.mutation.key.getCollectionGroup(),
largestBatchId: overlay.largestBatchId,
overlayMutation: toMutation(localSerializer.remoteSerializer, overlay.mutation)
};
}
/**
* Returns the DbDocumentOverlayKey corresponding to the given user and
* document key.
*/
function toDbDocumentOverlayKey(userId, docKey) {
const docId = docKey.path.lastSegment();
const collectionPath = encodeResourcePath(docKey.path.popLast());
return [userId, collectionPath, docId];
}
function toDbIndexConfiguration(index) {
return {
indexId: index.indexId,
collectionGroup: index.collectionGroup,
fields: index.fields.map(s => [s.fieldPath.canonicalString(), s.kind])
};
}
function fromDbIndexConfiguration(index, state) {
const decodedState = state
? new IndexState(state.sequenceNumber, new IndexOffset(fromDbTimestamp(state.readTime), new DocumentKey(decodeResourcePath(state.documentKey)), state.largestBatchId))
: IndexState.empty();
const decodedSegments = index.fields.map(([fieldPath, kind]) => new IndexSegment(FieldPath$1.fromServerFormat(fieldPath), kind));
return new FieldIndex(index.indexId, index.collectionGroup, decodedSegments, decodedState);
}
function toDbIndexState(indexId, uid, sequenceNumber, offset) {
return {
indexId,
uid,
sequenceNumber,
readTime: toDbTimestamp(offset.readTime),
documentKey: encodeResourcePath(offset.documentKey.path),
largestBatchId: offset.largestBatchId
};
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class IndexedDbBundleCache {
getBundleMetadata(transaction, bundleId) {
return bundlesStore(transaction)
.get(bundleId)
.next(bundle => {
if (bundle) {
return fromDbBundle(bundle);
}
return undefined;
});
}
saveBundleMetadata(transaction, bundleMetadata) {
return bundlesStore(transaction).put(toDbBundle(bundleMetadata));
}
getNamedQuery(transaction, queryName) {
return namedQueriesStore(transaction)
.get(queryName)
.next(query => {
if (query) {
return fromDbNamedQuery(query);
}
return undefined;
});
}
saveNamedQuery(transaction, query) {
return namedQueriesStore(transaction).put(toDbNamedQuery(query));
}
}
/**
* Helper to get a typed SimpleDbStore for the bundles object store.
*/
function bundlesStore(txn) {
return getStore(txn, DbBundleStore);
}
/**
* Helper to get a typed SimpleDbStore for the namedQueries object store.
*/
function namedQueriesStore(txn) {
return getStore(txn, DbNamedQueryStore);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of DocumentOverlayCache using IndexedDb.
*/
class IndexedDbDocumentOverlayCache {
/**
* @param serializer - The document serializer.
* @param userId - The userId for which we are accessing overlays.
*/
constructor(serializer, userId) {
this.serializer = serializer;
this.userId = userId;
}
static forUser(serializer, user) {
const userId = user.uid || '';
return new IndexedDbDocumentOverlayCache(serializer, userId);
}
getOverlay(transaction, key) {
return documentOverlayStore(transaction)
.get(toDbDocumentOverlayKey(this.userId, key))
.next(dbOverlay => {
if (dbOverlay) {
return fromDbDocumentOverlay(this.serializer, dbOverlay);
}
return null;
});
}
getOverlays(transaction, keys) {
const result = newOverlayMap();
return PersistencePromise.forEach(keys, (key) => {
return this.getOverlay(transaction, key).next(overlay => {
if (overlay !== null) {
result.set(key, overlay);
}
});
}).next(() => result);
}
saveOverlays(transaction, largestBatchId, overlays) {
const promises = [];
overlays.forEach((_, mutation) => {
const overlay = new Overlay(largestBatchId, mutation);
promises.push(this.saveOverlay(transaction, overlay));
});
return PersistencePromise.waitFor(promises);
}
removeOverlaysForBatchId(transaction, documentKeys, batchId) {
const collectionPaths = new Set();
// Get the set of unique collection paths.
documentKeys.forEach(key => collectionPaths.add(encodeResourcePath(key.getCollectionPath())));
const promises = [];
collectionPaths.forEach(collectionPath => {
const range = IDBKeyRange.bound([this.userId, collectionPath, batchId], [this.userId, collectionPath, batchId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
promises.push(documentOverlayStore(transaction).deleteAll(DbDocumentOverlayCollectionPathOverlayIndex, range));
});
return PersistencePromise.waitFor(promises);
}
getOverlaysForCollection(transaction, collection, sinceBatchId) {
const result = newOverlayMap();
const collectionPath = encodeResourcePath(collection);
// We want batch IDs larger than `sinceBatchId`, and so the lower bound
// is not inclusive.
const range = IDBKeyRange.bound([this.userId, collectionPath, sinceBatchId], [this.userId, collectionPath, Number.POSITIVE_INFINITY],
/*lowerOpen=*/ true);
return documentOverlayStore(transaction)
.loadAll(DbDocumentOverlayCollectionPathOverlayIndex, range)
.next(dbOverlays => {
for (const dbOverlay of dbOverlays) {
const overlay = fromDbDocumentOverlay(this.serializer, dbOverlay);
result.set(overlay.getKey(), overlay);
}
return result;
});
}
getOverlaysForCollectionGroup(transaction, collectionGroup, sinceBatchId, count) {
const result = newOverlayMap();
let currentBatchId = undefined;
// We want batch IDs larger than `sinceBatchId`, and so the lower bound
// is not inclusive.
const range = IDBKeyRange.bound([this.userId, collectionGroup, sinceBatchId], [this.userId, collectionGroup, Number.POSITIVE_INFINITY],
/*lowerOpen=*/ true);
return documentOverlayStore(transaction)
.iterate({
index: DbDocumentOverlayCollectionGroupOverlayIndex,
range
}, (_, dbOverlay, control) => {
// We do not want to return partial batch overlays, even if the size
// of the result set exceeds the given `count` argument. Therefore, we
// continue to aggregate results even after the result size exceeds
// `count` if there are more overlays from the `currentBatchId`.
const overlay = fromDbDocumentOverlay(this.serializer, dbOverlay);
if (result.size() < count ||
overlay.largestBatchId === currentBatchId) {
result.set(overlay.getKey(), overlay);
currentBatchId = overlay.largestBatchId;
}
else {
control.done();
}
})
.next(() => result);
}
saveOverlay(transaction, overlay) {
return documentOverlayStore(transaction).put(toDbDocumentOverlay(this.serializer, this.userId, overlay));
}
}
/**
* Helper to get a typed SimpleDbStore for the document overlay object store.
*/
function documentOverlayStore(txn) {
return getStore(txn, DbDocumentOverlayStore);
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Note: This code is copied from the backend. Code that is not used by
// Firestore was removed.
const INDEX_TYPE_NULL = 5;
const INDEX_TYPE_BOOLEAN = 10;
const INDEX_TYPE_NAN = 13;
const INDEX_TYPE_NUMBER = 15;
const INDEX_TYPE_TIMESTAMP = 20;
const INDEX_TYPE_STRING = 25;
const INDEX_TYPE_BLOB = 30;
const INDEX_TYPE_REFERENCE = 37;
const INDEX_TYPE_GEOPOINT = 45;
const INDEX_TYPE_ARRAY = 50;
const INDEX_TYPE_MAP = 55;
const INDEX_TYPE_REFERENCE_SEGMENT = 60;
// A terminator that indicates that a truncatable value was not truncated.
// This must be smaller than all other type labels.
const NOT_TRUNCATED = 2;
/** Firestore index value writer. */
class FirestoreIndexValueWriter {
constructor() { }
// The write methods below short-circuit writing terminators for values
// containing a (terminating) truncated value.
//
// As an example, consider the resulting encoding for:
//
// ["bar", [2, "foo"]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TERM, TERM, TERM)
// ["bar", [2, truncated("foo")]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TRUNC)
// ["bar", truncated(["foo"])] -> (STRING, "bar", TERM, ARRAY. STRING, "foo", TERM, TRUNC)
/** Writes an index value. */
writeIndexValue(value, encoder) {
this.writeIndexValueAux(value, encoder);
// Write separator to split index values
// (see go/firestore-storage-format#encodings).
encoder.writeInfinity();
}
writeIndexValueAux(indexValue, encoder) {
if ('nullValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NULL);
}
else if ('booleanValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_BOOLEAN);
encoder.writeNumber(indexValue.booleanValue ? 1 : 0);
}
else if ('integerValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NUMBER);
encoder.writeNumber(normalizeNumber(indexValue.integerValue));
}
else if ('doubleValue' in indexValue) {
const n = normalizeNumber(indexValue.doubleValue);
if (isNaN(n)) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NAN);
}
else {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NUMBER);
if (isNegativeZero(n)) {
// -0.0, 0 and 0.0 are all considered the same
encoder.writeNumber(0.0);
}
else {
encoder.writeNumber(n);
}
}
}
else if ('timestampValue' in indexValue) {
const timestamp = indexValue.timestampValue;
this.writeValueTypeLabel(encoder, INDEX_TYPE_TIMESTAMP);
if (typeof timestamp === 'string') {
encoder.writeString(timestamp);
}
else {
encoder.writeString(`${timestamp.seconds || ''}`);
encoder.writeNumber(timestamp.nanos || 0);
}
}
else if ('stringValue' in indexValue) {
this.writeIndexString(indexValue.stringValue, encoder);
this.writeTruncationMarker(encoder);
}
else if ('bytesValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_BLOB);
encoder.writeBytes(normalizeByteString(indexValue.bytesValue));
this.writeTruncationMarker(encoder);
}
else if ('referenceValue' in indexValue) {
this.writeIndexEntityRef(indexValue.referenceValue, encoder);
}
else if ('geoPointValue' in indexValue) {
const geoPoint = indexValue.geoPointValue;
this.writeValueTypeLabel(encoder, INDEX_TYPE_GEOPOINT);
encoder.writeNumber(geoPoint.latitude || 0);
encoder.writeNumber(geoPoint.longitude || 0);
}
else if ('mapValue' in indexValue) {
if (isMaxValue(indexValue)) {
this.writeValueTypeLabel(encoder, Number.MAX_SAFE_INTEGER);
}
else {
this.writeIndexMap(indexValue.mapValue, encoder);
this.writeTruncationMarker(encoder);
}
}
else if ('arrayValue' in indexValue) {
this.writeIndexArray(indexValue.arrayValue, encoder);
this.writeTruncationMarker(encoder);
}
else {
fail();
}
}
writeIndexString(stringIndexValue, encoder) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_STRING);
this.writeUnlabeledIndexString(stringIndexValue, encoder);
}
writeUnlabeledIndexString(stringIndexValue, encoder) {
encoder.writeString(stringIndexValue);
}
writeIndexMap(mapIndexValue, encoder) {
const map = mapIndexValue.fields || {};
this.writeValueTypeLabel(encoder, INDEX_TYPE_MAP);
for (const key of Object.keys(map)) {
this.writeIndexString(key, encoder);
this.writeIndexValueAux(map[key], encoder);
}
}
writeIndexArray(arrayIndexValue, encoder) {
const values = arrayIndexValue.values || [];
this.writeValueTypeLabel(encoder, INDEX_TYPE_ARRAY);
for (const element of values) {
this.writeIndexValueAux(element, encoder);
}
}
writeIndexEntityRef(referenceValue, encoder) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_REFERENCE);
const path = DocumentKey.fromName(referenceValue).path;
path.forEach(segment => {
this.writeValueTypeLabel(encoder, INDEX_TYPE_REFERENCE_SEGMENT);
this.writeUnlabeledIndexString(segment, encoder);
});
}
writeValueTypeLabel(encoder, typeOrder) {
encoder.writeNumber(typeOrder);
}
writeTruncationMarker(encoder) {
// While the SDK does not implement truncation, the truncation marker is
// used to terminate all variable length values (which are strings, bytes,
// references, arrays and maps).
encoder.writeNumber(NOT_TRUNCATED);
}
}
FirestoreIndexValueWriter.INSTANCE = new FirestoreIndexValueWriter();
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law | agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES | CONDITIONS OF ANY KIND, either express | implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** These constants are taken from the backend. */
const MIN_SURROGATE = '\uD800';
const MAX_SURROGATE = '\uDBFF';
const ESCAPE1 = 0x00;
const NULL_BYTE = 0xff; // Combined with ESCAPE1
const SEPARATOR = 0x01; // Combined with ESCAPE1
const ESCAPE2 = 0xff;
const INFINITY = 0xff; // Combined with ESCAPE2
const FF_BYTE = 0x00; // Combined with ESCAPE2
const LONG_SIZE = 64;
const BYTE_SIZE = 8;
/**
* The default size of the buffer. This is arbitrary, but likely larger than
* most index values so that less copies of the underlying buffer will be made.
* For large values, a single copy will made to double the buffer length.
*/
const DEFAULT_BUFFER_SIZE = 1024;
/** Converts a JavaScript number to a byte array (using big endian encoding). */
function doubleToLongBits(value) {
const dv = new DataView(new ArrayBuffer(8));
dv.setFloat64(0, value, /* littleEndian= */ false);
return new Uint8Array(dv.buffer);
}
/**
* Counts the number of zeros in a byte.
*
* Visible for testing.
*/
function numberOfLeadingZerosInByte(x) {
if (x === 0) {
return 8;
}
let zeros = 0;
if (x >> 4 === 0) {
// Test if the first four bits are zero.
zeros += 4;
x = x << 4;
}
if (x >> 6 === 0) {
// Test if the first two (or next two) bits are zero.
zeros += 2;
x = x << 2;
}
if (x >> 7 === 0) {
// Test if the remaining bit is zero.
zeros += 1;
}
return zeros;
}
/** Counts the number of leading zeros in the given byte array. */
function numberOfLeadingZeros(bytes) {
let leadingZeros = 0;
for (let i = 0; i < 8; ++i) {
const zeros = numberOfLeadingZerosInByte(bytes[i] & 0xff);
leadingZeros += zeros;
if (zeros !== 8) {
break;
}
}
return leadingZeros;
}
/**
* Returns the number of bytes required to store "value". Leading zero bytes
* are skipped.
*/
function unsignedNumLength(value) {
// This is just the number of bytes for the unsigned representation of the number.
const numBits = LONG_SIZE - numberOfLeadingZeros(value);
return Math.ceil(numBits / BYTE_SIZE);
}
/**
* OrderedCodeWriter is a minimal-allocation implementation of the writing
* behavior defined by the backend.
*
* The code is ported from its Java counterpart.
*/
class OrderedCodeWriter {
constructor() {
this.buffer = new Uint8Array(DEFAULT_BUFFER_SIZE);
this.position = 0;
}
writeBytesAscending(value) {
const it = value[Symbol.iterator]();
let byte = it.next();
while (!byte.done) {
this.writeByteAscending(byte.value);
byte = it.next();
}
this.writeSeparatorAscending();
}
writeBytesDescending(value) {
const it = value[Symbol.iterator]();
let byte = it.next();
while (!byte.done) {
this.writeByteDescending(byte.value);
byte = it.next();
}
this.writeSeparatorDescending();
}
/** Writes utf8 bytes into this byte sequence, ascending. */
writeUtf8Ascending(sequence) {
for (const c of sequence) {
const charCode = c.charCodeAt(0);
if (charCode < 0x80) {
this.writeByteAscending(charCode);
}
else if (charCode < 0x800) {
this.writeByteAscending((0x0f << 6) | (charCode >>> 6));
this.writeByteAscending(0x80 | (0x3f & charCode));
}
else if (c < MIN_SURROGATE || MAX_SURROGATE < c) {
this.writeByteAscending((0x0f << 5) | (charCode >>> 12));
this.writeByteAscending(0x80 | (0x3f & (charCode >>> 6)));
this.writeByteAscending(0x80 | (0x3f & charCode));
}
else {
const codePoint = c.codePointAt(0);
this.writeByteAscending((0x0f << 4) | (codePoint >>> 18));
this.writeByteAscending(0x80 | (0x3f & (codePoint >>> 12)));
this.writeByteAscending(0x80 | (0x3f & (codePoint >>> 6)));
this.writeByteAscending(0x80 | (0x3f & codePoint));
}
}
this.writeSeparatorAscending();
}
/** Writes utf8 bytes into this byte sequence, descending */
writeUtf8Descending(sequence) {
for (const c of sequence) {
const charCode = c.charCodeAt(0);
if (charCode < 0x80) {
this.writeByteDescending(charCode);
}
else if (charCode < 0x800) {
this.writeByteDescending((0x0f << 6) | (charCode >>> 6));
this.writeByteDescending(0x80 | (0x3f & charCode));
}
else if (c < MIN_SURROGATE || MAX_SURROGATE < c) {
this.writeByteDescending((0x0f << 5) | (charCode >>> 12));
this.writeByteDescending(0x80 | (0x3f & (charCode >>> 6)));
this.writeByteDescending(0x80 | (0x3f & charCode));
}
else {
const codePoint = c.codePointAt(0);
this.writeByteDescending((0x0f << 4) | (codePoint >>> 18));
this.writeByteDescending(0x80 | (0x3f & (codePoint >>> 12)));
this.writeByteDescending(0x80 | (0x3f & (codePoint >>> 6)));
this.writeByteDescending(0x80 | (0x3f & codePoint));
}
}
this.writeSeparatorDescending();
}
writeNumberAscending(val) {
// Values are encoded with a single byte length prefix, followed by the
// actual value in big-endian format with leading 0 bytes dropped.
const value = this.toOrderedBits(val);
const len = unsignedNumLength(value);
this.ensureAvailable(1 + len);
this.buffer[this.position++] = len & 0xff; // Write the length
for (let i = value.length - len; i < value.length; ++i) {
this.buffer[this.position++] = value[i] & 0xff;
}
}
writeNumberDescending(val) {
// Values are encoded with a single byte length prefix, followed by the
// inverted value in big-endian format with leading 0 bytes dropped.
const value = this.toOrderedBits(val);
const len = unsignedNumLength(value);
this.ensureAvailable(1 + len);
this.buffer[this.position++] = ~(len & 0xff); // Write the length
for (let i = value.length - len; i < value.length; ++i) {
this.buffer[this.position++] = ~(value[i] & 0xff);
}
}
/**
* Writes the "infinity" byte sequence that sorts after all other byte
* sequences written in ascending order.
*/
writeInfinityAscending() {
this.writeEscapedByteAscending(ESCAPE2);
this.writeEscapedByteAscending(INFINITY);
}
/**
* Writes the "infinity" byte sequence that sorts before all other byte
* sequences written in descending order.
*/
writeInfinityDescending() {
this.writeEscapedByteDescending(ESCAPE2);
this.writeEscapedByteDescending(INFINITY);
}
/**
* Resets the buffer such that it is the same as when it was newly
* constructed.
*/
reset() {
this.position = 0;
}
seed(encodedBytes) {
this.ensureAvailable(encodedBytes.length);
this.buffer.set(encodedBytes, this.position);
this.position += encodedBytes.length;
}
/** Makes a copy of the encoded bytes in this buffer. */
encodedBytes() {
return this.buffer.slice(0, this.position);
}
/**
* Encodes `val` into an encoding so that the order matches the IEEE 754
* floating-point comparison results with the following exceptions:
* -0.0 < 0.0
* all non-NaN < NaN
* NaN = NaN
*/
toOrderedBits(val) {
const value = doubleToLongBits(val);
// Check if the first bit is set. We use a bit mask since value[0] is
// encoded as a number from 0 to 255.
const isNegative = (value[0] & 0x80) !== 0;
// Revert the two complement to get natural ordering
value[0] ^= isNegative ? 0xff : 0x80;
for (let i = 1; i < value.length; ++i) {
value[i] ^= isNegative ? 0xff : 0x00;
}
return value;
}
/** Writes a single byte ascending to the buffer. */
writeByteAscending(b) {
const masked = b & 0xff;
if (masked === ESCAPE1) {
this.writeEscapedByteAscending(ESCAPE1);
this.writeEscapedByteAscending(NULL_BYTE);
}
else if (masked === ESCAPE2) {
this.writeEscapedByteAscending(ESCAPE2);
this.writeEscapedByteAscending(FF_BYTE);
}
else {
this.writeEscapedByteAscending(masked);
}
}
/** Writes a single byte descending to the buffer. */
writeByteDescending(b) {
const masked = b & 0xff;
if (masked === ESCAPE1) {
this.writeEscapedByteDescending(ESCAPE1);
this.writeEscapedByteDescending(NULL_BYTE);
}
else if (masked === ESCAPE2) {
this.writeEscapedByteDescending(ESCAPE2);
this.writeEscapedByteDescending(FF_BYTE);
}
else {
this.writeEscapedByteDescending(b);
}
}
writeSeparatorAscending() {
this.writeEscapedByteAscending(ESCAPE1);
this.writeEscapedByteAscending(SEPARATOR);
}
writeSeparatorDescending() {
this.writeEscapedByteDescending(ESCAPE1);
this.writeEscapedByteDescending(SEPARATOR);
}
writeEscapedByteAscending(b) {
this.ensureAvailable(1);
this.buffer[this.position++] = b;
}
writeEscapedByteDescending(b) {
this.ensureAvailable(1);
this.buffer[this.position++] = ~b;
}
ensureAvailable(bytes) {
const minCapacity = bytes + this.position;
if (minCapacity <= this.buffer.length) {
return;
}
// Try doubling.
let newLength = this.buffer.length * 2;
// Still not big enough? Just allocate the right size.
if (newLength < minCapacity) {
newLength = minCapacity;
}
// Create the new buffer.
const newBuffer = new Uint8Array(newLength);
newBuffer.set(this.buffer); // copy old data
this.buffer = newBuffer;
}
}
class AscendingIndexByteEncoder {
constructor(orderedCode) {
this.orderedCode = orderedCode;
}
writeBytes(value) {
this.orderedCode.writeBytesAscending(value);
}
writeString(value) {
this.orderedCode.writeUtf8Ascending(value);
}
writeNumber(value) {
this.orderedCode.writeNumberAscending(value);
}
writeInfinity() {
this.orderedCode.writeInfinityAscending();
}
}
class DescendingIndexByteEncoder {
constructor(orderedCode) {
this.orderedCode = orderedCode;
}
writeBytes(value) {
this.orderedCode.writeBytesDescending(value);
}
writeString(value) {
this.orderedCode.writeUtf8Descending(value);
}
writeNumber(value) {
this.orderedCode.writeNumberDescending(value);
}
writeInfinity() {
this.orderedCode.writeInfinityDescending();
}
}
/**
* Implements `DirectionalIndexByteEncoder` using `OrderedCodeWriter` for the
* actual encoding.
*/
class IndexByteEncoder {
constructor() {
this.orderedCode = new OrderedCodeWriter();
this.ascending = new AscendingIndexByteEncoder(this.orderedCode);
this.descending = new DescendingIndexByteEncoder(this.orderedCode);
}
seed(encodedBytes) {
this.orderedCode.seed(encodedBytes);
}
forKind(kind) {
return kind === 0 /* IndexKind.ASCENDING */ ? this.ascending : this.descending;
}
encodedBytes() {
return this.orderedCode.encodedBytes();
}
reset() {
this.orderedCode.reset();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Represents an index entry saved by the SDK in persisted storage. */
class IndexEntry {
constructor(indexId, documentKey, arrayValue, directionalValue) {
this.indexId = indexId;
this.documentKey = documentKey;
this.arrayValue = arrayValue;
this.directionalValue = directionalValue;
}
/**
* Returns an IndexEntry entry that sorts immediately after the current
* directional value.
*/
successor() {
const currentLength = this.directionalValue.length;
const newLength = currentLength === 0 || this.directionalValue[currentLength - 1] === 255
? currentLength + 1
: currentLength;
const successor = new Uint8Array(newLength);
successor.set(this.directionalValue, 0);
if (newLength !== currentLength) {
successor.set([0], this.directionalValue.length);
}
else {
++successor[successor.length - 1];
}
return new IndexEntry(this.indexId, this.documentKey, this.arrayValue, successor);
}
}
function indexEntryComparator(left, right) {
let cmp = left.indexId - right.indexId;
if (cmp !== 0) {
return cmp;
}
cmp = compareByteArrays(left.arrayValue, right.arrayValue);
if (cmp !== 0) {
return cmp;
}
cmp = compareByteArrays(left.directionalValue, right.directionalValue);
if (cmp !== 0) {
return cmp;
}
return DocumentKey.comparator(left.documentKey, right.documentKey);
}
function compareByteArrays(left, right) {
for (let i = 0; i < left.length && i < right.length; ++i) {
const compare = left[i] - right[i];
if (compare !== 0) {
return compare;
}
}
return left.length - right.length;
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A light query planner for Firestore.
*
* This class matches a `FieldIndex` against a Firestore Query `Target`. It
* determines whether a given index can be used to serve the specified target.
*
* The following table showcases some possible index configurations:
*
* Query | Index
* -----------------------------------------------------------------------------
* where('a', '==', 'a').where('b', '==', 'b') | a ASC, b DESC
* where('a', '==', 'a').where('b', '==', 'b') | a ASC
* where('a', '==', 'a').where('b', '==', 'b') | b DESC
* where('a', '>=', 'a').orderBy('a') | a ASC
* where('a', '>=', 'a').orderBy('a', 'desc') | a DESC
* where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC, b ASC
* where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC
* where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS, b ASCENDING
* where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS
*/
class TargetIndexMatcher {
constructor(target) {
// The inequality filters of the target (if it exists).
// Note: The sort on FieldFilters is not required. Using SortedSet here just to utilize the custom
// comparator.
this.inequalityFilters = new SortedSet((lhs, rhs) => FieldPath$1.comparator(lhs.field, rhs.field));
this.collectionId =
target.collectionGroup != null
? target.collectionGroup
: target.path.lastSegment();
this.orderBys = target.orderBy;
this.equalityFilters = [];
for (const filter of target.filters) {
const fieldFilter = filter;
if (fieldFilter.isInequality()) {
this.inequalityFilters = this.inequalityFilters.add(fieldFilter);
}
else {
this.equalityFilters.push(fieldFilter);
}
}
}
get hasMultipleInequality() {
return this.inequalityFilters.size > 1;
}
/**
* Returns whether the index can be used to serve the TargetIndexMatcher's
* target.
*
* An index is considered capable of serving the target when:
* - The target uses all index segments for its filters and orderBy clauses.
* The target can have additional filter and orderBy clauses, but not
* fewer.
* - If an ArrayContains/ArrayContainsAnyfilter is used, the index must also
* have a corresponding `CONTAINS` segment.
* - All directional index segments can be mapped to the target as a series of
* equality filters, a single inequality filter and a series of orderBy
* clauses.
* - The segments that represent the equality filters may appear out of order.
* - The optional segment for the inequality filter must appear after all
* equality segments.
* - The segments that represent that orderBy clause of the target must appear
* in order after all equality and inequality segments. Single orderBy
* clauses cannot be skipped, but a continuous orderBy suffix may be
* omitted.
*/
servedByIndex(index) {
hardAssert(index.collectionGroup === this.collectionId);
if (this.hasMultipleInequality) {
// Only single inequality is supported for now.
// TODO(Add support for multiple inequality query): b/298441043
return false;
}
// If there is an array element, find a matching filter.
const arraySegment = fieldIndexGetArraySegment(index);
if (arraySegment !== undefined &&
!this.hasMatchingEqualityFilter(arraySegment)) {
return false;
}
const segments = fieldIndexGetDirectionalSegments(index);
let equalitySegments = new Set();
let segmentIndex = 0;
let orderBysIndex = 0;
// Process all equalities first. Equalities can appear out of order.
for (; segmentIndex < segments.length; ++segmentIndex) {
// We attempt to greedily match all segments to equality filters. If a
// filter matches an index segment, we can mark the segment as used.
if (this.hasMatchingEqualityFilter(segments[segmentIndex])) {
equalitySegments = equalitySegments.add(segments[segmentIndex].fieldPath.canonicalString());
}
else {
// If we cannot find a matching filter, we need to verify whether the
// remaining segments map to the target's inequality and its orderBy
// clauses.
break;
}
}
// If we already have processed all segments, all segments are used to serve
// the equality filters and we do not need to map any segments to the
// target's inequality and orderBy clauses.
if (segmentIndex === segments.length) {
return true;
}
if (this.inequalityFilters.size > 0) {
// Only a single inequality is currently supported. Get the only entry in the set.
const inequalityFilter = this.inequalityFilters.getIterator().getNext();
// If there is an inequality filter and the field was not in one of the
// equality filters above, the next segment must match both the filter
// and the first orderBy clause.
if (!equalitySegments.has(inequalityFilter.field.canonicalString())) {
const segment = segments[segmentIndex];
if (!this.matchesFilter(inequalityFilter, segment) ||
!this.matchesOrderBy(this.orderBys[orderBysIndex++], segment)) {
return false;
}
}
++segmentIndex;
}
// All remaining segments need to represent the prefix of the target's
// orderBy.
for (; segmentIndex < segments.length; ++segmentIndex) {
const segment = segments[segmentIndex];
if (orderBysIndex >= this.orderBys.length ||
!this.matchesOrderBy(this.orderBys[orderBysIndex++], segment)) {
return false;
}
}
return true;
}
/**
* Returns a full matched field index for this target. Currently multiple
* inequality query is not supported so function returns null.
*/
buildTargetIndex() {
if (this.hasMultipleInequality) {
return null;
}
// We want to make sure only one segment created for one field. For example,
// in case like a == 3 and a > 2, Index {a ASCENDING} will only be created
// once.
let uniqueFields = new SortedSet(FieldPath$1.comparator);
const segments = [];
for (const filter of this.equalityFilters) {
if (filter.field.isKeyField()) {
continue;
}
const isArrayOperator = filter.op === "array-contains" /* Operator.ARRAY_CONTAINS */ ||
filter.op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
if (isArrayOperator) {
segments.push(new IndexSegment(filter.field, 2 /* IndexKind.CONTAINS */));
}
else {
if (uniqueFields.has(filter.field)) {
continue;
}
uniqueFields = uniqueFields.add(filter.field);
segments.push(new IndexSegment(filter.field, 0 /* IndexKind.ASCENDING */));
}
}
// Note: We do not explicitly check `this.inequalityFilter` but rather rely
// on the target defining an appropriate "order by" to ensure that the
// required index segment is added. The query engine would reject a query
// with an inequality filter that lacks the required order-by clause.
for (const orderBy of this.orderBys) {
// Stop adding more segments if we see a order-by on key. Typically this
// is the default implicit order-by which is covered in the index_entry
// table as a separate column. If it is not the default order-by, the
// generated index will be missing some segments optimized for order-bys,
// which is probably fine.
if (orderBy.field.isKeyField()) {
continue;
}
if (uniqueFields.has(orderBy.field)) {
continue;
}
uniqueFields = uniqueFields.add(orderBy.field);
segments.push(new IndexSegment(orderBy.field, orderBy.dir === "asc" /* Direction.ASCENDING */
? 0 /* IndexKind.ASCENDING */
: 1 /* IndexKind.DESCENDING */));
}
return new FieldIndex(FieldIndex.UNKNOWN_ID, this.collectionId, segments, IndexState.empty());
}
hasMatchingEqualityFilter(segment) {
for (const filter of this.equalityFilters) {
if (this.matchesFilter(filter, segment)) {
return true;
}
}
return false;
}
matchesFilter(filter, segment) {
if (filter === undefined || !filter.field.isEqual(segment.fieldPath)) {
return false;
}
const isArrayOperator = filter.op === "array-contains" /* Operator.ARRAY_CONTAINS */ ||
filter.op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
return (segment.kind === 2 /* IndexKind.CONTAINS */) === isArrayOperator;
}
matchesOrderBy(orderBy, segment) {
if (!orderBy.field.isEqual(segment.fieldPath)) {
return false;
}
return ((segment.kind === 0 /* IndexKind.ASCENDING */ &&
orderBy.dir === "asc" /* Direction.ASCENDING */) ||
(segment.kind === 1 /* IndexKind.DESCENDING */ &&
orderBy.dir === "desc" /* Direction.DESCENDING */));
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides utility functions that help with boolean logic transformations needed for handling
* complex filters used in queries.
*/
/**
* The `in` filter is only a syntactic sugar over a disjunction of equalities. For instance: `a in
* [1,2,3]` is in fact `a==1 || a==2 || a==3`. This method expands any `in` filter in the given
* input into a disjunction of equality filters and returns the expanded filter.
*/
function computeInExpansion(filter) {
var _a, _b;
hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
if (filter instanceof FieldFilter) {
if (filter instanceof InFilter) {
const expandedFilters = ((_b = (_a = filter.value.arrayValue) === null || _a === void 0 ? void 0 : _a.values) === null || _b === void 0 ? void 0 : _b.map(value => FieldFilter.create(filter.field, "==" /* Operator.EQUAL */, value))) || [];
return CompositeFilter.create(expandedFilters, "or" /* CompositeOperator.OR */);
}
else {
// We have reached other kinds of field filters.
return filter;
}
}
// We have a composite filter.
const expandedFilters = filter.filters.map(subfilter => computeInExpansion(subfilter));
return CompositeFilter.create(expandedFilters, filter.op);
}
/**
* Given a composite filter, returns the list of terms in its disjunctive normal form.
*
* <p>Each element in the return value is one term of the resulting DNF. For instance: For the
* input: (A || B) && C, the DNF form is: (A && C) || (B && C), and the return value is a list
* with two elements: a composite filter that performs (A && C), and a composite filter that
* performs (B && C).
*
* @param filter the composite filter to calculate DNF transform for.
* @return the terms in the DNF transform.
*/
function getDnfTerms(filter) {
if (filter.getFilters().length === 0) {
return [];
}
const result = computeDistributedNormalForm(computeInExpansion(filter));
hardAssert(isDisjunctiveNormalForm(result));
if (isSingleFieldFilter(result) || isFlatConjunction(result)) {
return [result];
}
return result.getFilters();
}
/** Returns true if the given filter is a single field filter. e.g. (a == 10). */
function isSingleFieldFilter(filter) {
return filter instanceof FieldFilter;
}
/**
* Returns true if the given filter is the conjunction of one or more field filters. e.g. (a == 10
* && b == 20)
*/
function isFlatConjunction(filter) {
return (filter instanceof CompositeFilter &&
compositeFilterIsFlatConjunction(filter));
}
/**
* Returns whether or not the given filter is in disjunctive normal form (DNF).
*
* <p>In boolean logic, a disjunctive normal form (DNF) is a canonical normal form of a logical
* formula consisting of a disjunction of conjunctions; it can also be described as an OR of ANDs.
*
* <p>For more info, visit: https://en.wikipedia.org/wiki/Disjunctive_normal_form
*/
function isDisjunctiveNormalForm(filter) {
return (isSingleFieldFilter(filter) ||
isFlatConjunction(filter) ||
isDisjunctionOfFieldFiltersAndFlatConjunctions(filter));
}
/**
* Returns true if the given filter is the disjunction of one or more "flat conjunctions" and
* field filters. e.g. (a == 10) || (b==20 && c==30)
*/
function isDisjunctionOfFieldFiltersAndFlatConjunctions(filter) {
if (filter instanceof CompositeFilter) {
if (compositeFilterIsDisjunction(filter)) {
for (const subFilter of filter.getFilters()) {
if (!isSingleFieldFilter(subFilter) && !isFlatConjunction(subFilter)) {
return false;
}
}
return true;
}
}
return false;
}
function computeDistributedNormalForm(filter) {
hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
if (filter instanceof FieldFilter) {
return filter;
}
if (filter.filters.length === 1) {
return computeDistributedNormalForm(filter.filters[0]);
}
// Compute DNF for each of the subfilters first
const result = filter.filters.map(subfilter => computeDistributedNormalForm(subfilter));
let newFilter = CompositeFilter.create(result, filter.op);
newFilter = applyAssociation(newFilter);
if (isDisjunctiveNormalForm(newFilter)) {
return newFilter;
}
hardAssert(newFilter instanceof CompositeFilter);
hardAssert(compositeFilterIsConjunction(newFilter));
hardAssert(newFilter.filters.length > 1);
return newFilter.filters.reduce((runningResult, filter) => applyDistribution(runningResult, filter));
}
function applyDistribution(lhs, rhs) {
hardAssert(lhs instanceof FieldFilter || lhs instanceof CompositeFilter);
hardAssert(rhs instanceof FieldFilter || rhs instanceof CompositeFilter);
let result;
if (lhs instanceof FieldFilter) {
if (rhs instanceof FieldFilter) {
// FieldFilter FieldFilter
result = applyDistributionFieldFilters(lhs, rhs);
}
else {
// FieldFilter CompositeFilter
result = applyDistributionFieldAndCompositeFilters(lhs, rhs);
}
}
else {
if (rhs instanceof FieldFilter) {
// CompositeFilter FieldFilter
result = applyDistributionFieldAndCompositeFilters(rhs, lhs);
}
else {
// CompositeFilter CompositeFilter
result = applyDistributionCompositeFilters(lhs, rhs);
}
}
return applyAssociation(result);
}
function applyDistributionFieldFilters(lhs, rhs) {
// Conjunction distribution for two field filters is the conjunction of them.
return CompositeFilter.create([lhs, rhs], "and" /* CompositeOperator.AND */);
}
function applyDistributionCompositeFilters(lhs, rhs) {
hardAssert(lhs.filters.length > 0 && rhs.filters.length > 0);
// There are four cases:
// (A & B) & (C & D) --> (A & B & C & D)
// (A & B) & (C | D) --> (A & B & C) | (A & B & D)
// (A | B) & (C & D) --> (C & D & A) | (C & D & B)
// (A | B) & (C | D) --> (A & C) | (A & D) | (B & C) | (B & D)
// Case 1 is a merge.
if (compositeFilterIsConjunction(lhs) && compositeFilterIsConjunction(rhs)) {
return compositeFilterWithAddedFilters(lhs, rhs.getFilters());
}
// Case 2,3,4 all have at least one side (lhs or rhs) that is a disjunction. In all three cases
// we should take each element of the disjunction and distribute it over the other side, and
// return the disjunction of the distribution results.
const disjunctionSide = compositeFilterIsDisjunction(lhs) ? lhs : rhs;
const otherSide = compositeFilterIsDisjunction(lhs) ? rhs : lhs;
const results = disjunctionSide.filters.map(subfilter => applyDistribution(subfilter, otherSide));
return CompositeFilter.create(results, "or" /* CompositeOperator.OR */);
}
function applyDistributionFieldAndCompositeFilters(fieldFilter, compositeFilter) {
// There are two cases:
// A & (B & C) --> (A & B & C)
// A & (B | C) --> (A & B) | (A & C)
if (compositeFilterIsConjunction(compositeFilter)) {
// Case 1
return compositeFilterWithAddedFilters(compositeFilter, fieldFilter.getFilters());
}
else {
// Case 2
const newFilters = compositeFilter.filters.map(subfilter => applyDistribution(fieldFilter, subfilter));
return CompositeFilter.create(newFilters, "or" /* CompositeOperator.OR */);
}
}
/**
* Applies the associativity property to the given filter and returns the resulting filter.
*
* <ul>
* <li>A | (B | C) == (A | B) | C == (A | B | C)
* <li>A & (B & C) == (A & B) & C == (A & B & C)
* </ul>
*
* <p>For more info, visit: https://en.wikipedia.org/wiki/Associative_property#Propositional_logic
*/
function applyAssociation(filter) {
hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
if (filter instanceof FieldFilter) {
return filter;
}
const filters = filter.getFilters();
// If the composite filter only contains 1 filter, apply associativity to it.
if (filters.length === 1) {
return applyAssociation(filters[0]);
}
// Associativity applied to a flat composite filter results is itself.
if (compositeFilterIsFlat(filter)) {
return filter;
}
// First apply associativity to all subfilters. This will in turn recursively apply
// associativity to all nested composite filters and field filters.
const updatedFilters = filters.map(subfilter => applyAssociation(subfilter));
// For composite subfilters that perform the same kind of logical operation as `compositeFilter`
// take out their filters and add them to `compositeFilter`. For example:
// compositeFilter = (A | (B | C | D))
// compositeSubfilter = (B | C | D)
// Result: (A | B | C | D)
// Note that the `compositeSubfilter` has been eliminated, and its filters (B, C, D) have been
// added to the top-level "compositeFilter".
const newSubfilters = [];
updatedFilters.forEach(subfilter => {
if (subfilter instanceof FieldFilter) {
newSubfilters.push(subfilter);
}
else if (subfilter instanceof CompositeFilter) {
if (subfilter.op === filter.op) {
// compositeFilter: (A | (B | C))
// compositeSubfilter: (B | C)
// Result: (A | B | C)
newSubfilters.push(...subfilter.filters);
}
else {
// compositeFilter: (A | (B & C))
// compositeSubfilter: (B & C)
// Result: (A | (B & C))
newSubfilters.push(subfilter);
}
}
});
if (newSubfilters.length === 1) {
return newSubfilters[0];
}
return CompositeFilter.create(newSubfilters, filter.op);
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of IndexManager.
*/
class MemoryIndexManager {
constructor() {
this.collectionParentIndex = new MemoryCollectionParentIndex();
}
addToCollectionParentIndex(transaction, collectionPath) {
this.collectionParentIndex.add(collectionPath);
return PersistencePromise.resolve();
}
getCollectionParents(transaction, collectionId) {
return PersistencePromise.resolve(this.collectionParentIndex.getEntries(collectionId));
}
addFieldIndex(transaction, index) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
deleteFieldIndex(transaction, index) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
deleteAllFieldIndexes(transaction) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
createTargetIndexes(transaction, target) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
getDocumentsMatchingTarget(transaction, target) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve(null);
}
getIndexType(transaction, target) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve(0 /* IndexType.NONE */);
}
getFieldIndexes(transaction, collectionGroup) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve([]);
}
getNextCollectionGroupToUpdate(transaction) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve(null);
}
getMinOffset(transaction, target) {
return PersistencePromise.resolve(IndexOffset.min());
}
getMinOffsetFromCollectionGroup(transaction, collectionGroup) {
return PersistencePromise.resolve(IndexOffset.min());
}
updateCollectionGroup(transaction, collectionGroup, offset) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
updateIndexEntries(transaction, documents) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
}
/**
* Internal implementation of the collection-parent index exposed by MemoryIndexManager.
* Also used for in-memory caching by IndexedDbIndexManager and initial index population
* in indexeddb_schema.ts
*/
class MemoryCollectionParentIndex {
constructor() {
this.index = {};
}
// Returns false if the entry already existed.
add(collectionPath) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
const existingParents = this.index[collectionId] ||
new SortedSet(ResourcePath.comparator);
const added = !existingParents.has(parentPath);
this.index[collectionId] = existingParents.add(parentPath);
return added;
}
has(collectionPath) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
const existingParents = this.index[collectionId];
return existingParents && existingParents.has(parentPath);
}
getEntries(collectionId) {
const parentPaths = this.index[collectionId] ||
new SortedSet(ResourcePath.comparator);
return parentPaths.toArray();
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$f = 'IndexedDbIndexManager';
const EMPTY_VALUE = new Uint8Array(0);
/**
* A persisted implementation of IndexManager.
*
* PORTING NOTE: Unlike iOS and Android, the Web SDK does not memoize index
* data as it supports multi-tab access.
*/
class IndexedDbIndexManager {
constructor(user, databaseId) {
this.databaseId = databaseId;
/**
* An in-memory copy of the index entries we've already written since the SDK
* launched. Used to avoid re-writing the same entry repeatedly.
*
* This is *NOT* a complete cache of what's in persistence and so can never be
* used to satisfy reads.
*/
this.collectionParentsCache = new MemoryCollectionParentIndex();
/**
* Maps from a target to its equivalent list of sub-targets. Each sub-target
* contains only one term from the target's disjunctive normal form (DNF).
*/
this.targetToDnfSubTargets = new ObjectMap(t => canonifyTarget(t), (l, r) => targetEquals(l, r));
this.uid = user.uid || '';
}
/**
* Adds a new entry to the collection parent index.
*
* Repeated calls for the same collectionPath should be avoided within a
* transaction as IndexedDbIndexManager only caches writes once a transaction
* has been committed.
*/
addToCollectionParentIndex(transaction, collectionPath) {
if (!this.collectionParentsCache.has(collectionPath)) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
transaction.addOnCommittedListener(() => {
// Add the collection to the in memory cache only if the transaction was
// successfully committed.
this.collectionParentsCache.add(collectionPath);
});
const collectionParent = {
collectionId,
parent: encodeResourcePath(parentPath)
};
return collectionParentsStore(transaction).put(collectionParent);
}
return PersistencePromise.resolve();
}
getCollectionParents(transaction, collectionId) {
const parentPaths = [];
const range = IDBKeyRange.bound([collectionId, ''], [immediateSuccessor(collectionId), ''],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
return collectionParentsStore(transaction)
.loadAll(range)
.next(entries => {
for (const entry of entries) {
// This collectionId guard shouldn't be necessary (and isn't as long
// as we're running in a real browser), but there's a bug in
// indexeddbshim that breaks our range in our tests running in node:
// https://github.com/axemclion/IndexedDBShim/issues/334
if (entry.collectionId !== collectionId) {
break;
}
parentPaths.push(decodeResourcePath(entry.parent));
}
return parentPaths;
});
}
addFieldIndex(transaction, index) {
// TODO(indexing): Verify that the auto-incrementing index ID works in
// Safari & Firefox.
const indexes = indexConfigurationStore(transaction);
const dbIndex = toDbIndexConfiguration(index);
delete dbIndex.indexId; // `indexId` is auto-populated by IndexedDb
const result = indexes.add(dbIndex);
if (index.indexState) {
const states = indexStateStore(transaction);
return result.next(indexId => {
states.put(toDbIndexState(indexId, this.uid, index.indexState.sequenceNumber, index.indexState.offset));
});
}
else {
return result.next();
}
}
deleteFieldIndex(transaction, index) {
const indexes = indexConfigurationStore(transaction);
const states = indexStateStore(transaction);
const entries = indexEntriesStore(transaction);
return indexes
.delete(index.indexId)
.next(() => states.delete(IDBKeyRange.bound([index.indexId], [index.indexId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true)))
.next(() => entries.delete(IDBKeyRange.bound([index.indexId], [index.indexId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true)));
}
deleteAllFieldIndexes(transaction) {
const indexes = indexConfigurationStore(transaction);
const entries = indexEntriesStore(transaction);
const states = indexStateStore(transaction);
return indexes
.deleteAll()
.next(() => entries.deleteAll())
.next(() => states.deleteAll());
}
createTargetIndexes(transaction, target) {
return PersistencePromise.forEach(this.getSubTargets(target), (subTarget) => {
return this.getIndexType(transaction, subTarget).next(type => {
if (type === 0 /* IndexType.NONE */ || type === 1 /* IndexType.PARTIAL */) {
const targetIndexMatcher = new TargetIndexMatcher(subTarget);
const fieldIndex = targetIndexMatcher.buildTargetIndex();
if (fieldIndex != null) {
return this.addFieldIndex(transaction, fieldIndex);
}
}
});
});
}
getDocumentsMatchingTarget(transaction, target) {
const indexEntries = indexEntriesStore(transaction);
let canServeTarget = true;
const indexes = new Map();
return PersistencePromise.forEach(this.getSubTargets(target), (subTarget) => {
return this.getFieldIndex(transaction, subTarget).next(index => {
canServeTarget && (canServeTarget = !!index);
indexes.set(subTarget, index);
});
}).next(() => {
if (!canServeTarget) {
return PersistencePromise.resolve(null);
}
else {
let existingKeys = documentKeySet();
const result = [];
return PersistencePromise.forEach(indexes, (index, subTarget) => {
logDebug(LOG_TAG$f, `Using index ${fieldIndexToString(index)} to execute ${canonifyTarget(target)}`);
const arrayValues = targetGetArrayValues(subTarget, index);
const notInValues = targetGetNotInValues(subTarget, index);
const lowerBound = targetGetLowerBound(subTarget, index);
const upperBound = targetGetUpperBound(subTarget, index);
const lowerBoundEncoded = this.encodeBound(index, subTarget, lowerBound);
const upperBoundEncoded = this.encodeBound(index, subTarget, upperBound);
const notInEncoded = this.encodeValues(index, subTarget, notInValues);
const indexRanges = this.generateIndexRanges(index.indexId, arrayValues, lowerBoundEncoded, lowerBound.inclusive, upperBoundEncoded, upperBound.inclusive, notInEncoded);
return PersistencePromise.forEach(indexRanges, (indexRange) => {
return indexEntries
.loadFirst(indexRange, target.limit)
.next(entries => {
entries.forEach(entry => {
const documentKey = DocumentKey.fromSegments(entry.documentKey);
if (!existingKeys.has(documentKey)) {
existingKeys = existingKeys.add(documentKey);
result.push(documentKey);
}
});
});
});
}).next(() => result);
}
});
}
getSubTargets(target) {
let subTargets = this.targetToDnfSubTargets.get(target);
if (subTargets) {
return subTargets;
}
if (target.filters.length === 0) {
subTargets = [target];
}
else {
// There is an implicit AND operation between all the filters stored in the target
const dnf = getDnfTerms(CompositeFilter.create(target.filters, "and" /* CompositeOperator.AND */));
subTargets = dnf.map(term => newTarget(target.path, target.collectionGroup, target.orderBy, term.getFilters(), target.limit, target.startAt, target.endAt));
}
this.targetToDnfSubTargets.set(target, subTargets);
return subTargets;
}
/**
* Constructs a key range query on `DbIndexEntryStore` that unions all
* bounds.
*/
generateIndexRanges(indexId, arrayValues, lowerBounds, lowerBoundInclusive, upperBounds, upperBoundInclusive, notInValues) {
// The number of total index scans we union together. This is similar to a
// distributed normal form, but adapted for array values. We create a single
// index range per value in an ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filter
// combined with the values from the query bounds.
const totalScans = (arrayValues != null ? arrayValues.length : 1) *
Math.max(lowerBounds.length, upperBounds.length);
const scansPerArrayElement = totalScans / (arrayValues != null ? arrayValues.length : 1);
const indexRanges = [];
for (let i = 0; i < totalScans; ++i) {
const arrayValue = arrayValues
? this.encodeSingleElement(arrayValues[i / scansPerArrayElement])
: EMPTY_VALUE;
const lowerBound = this.generateLowerBound(indexId, arrayValue, lowerBounds[i % scansPerArrayElement], lowerBoundInclusive);
const upperBound = this.generateUpperBound(indexId, arrayValue, upperBounds[i % scansPerArrayElement], upperBoundInclusive);
const notInBound = notInValues.map(notIn => this.generateLowerBound(indexId, arrayValue, notIn,
/* inclusive= */ true));
indexRanges.push(...this.createRange(lowerBound, upperBound, notInBound));
}
return indexRanges;
}
/** Generates the lower bound for `arrayValue` and `directionalValue`. */
generateLowerBound(indexId, arrayValue, directionalValue, inclusive) {
const entry = new IndexEntry(indexId, DocumentKey.empty(), arrayValue, directionalValue);
return inclusive ? entry : entry.successor();
}
/** Generates the upper bound for `arrayValue` and `directionalValue`. */
generateUpperBound(indexId, arrayValue, directionalValue, inclusive) {
const entry = new IndexEntry(indexId, DocumentKey.empty(), arrayValue, directionalValue);
return inclusive ? entry.successor() : entry;
}
getFieldIndex(transaction, target) {
const targetIndexMatcher = new TargetIndexMatcher(target);
const collectionGroup = target.collectionGroup != null
? target.collectionGroup
: target.path.lastSegment();
return this.getFieldIndexes(transaction, collectionGroup).next(indexes => {
// Return the index with the most number of segments.
let index = null;
for (const candidate of indexes) {
const matches = targetIndexMatcher.servedByIndex(candidate);
if (matches &&
(!index || candidate.fields.length > index.fields.length)) {
index = candidate;
}
}
return index;
});
}
getIndexType(transaction, target) {
let indexType = 2 /* IndexType.FULL */;
const subTargets = this.getSubTargets(target);
return PersistencePromise.forEach(subTargets, (target) => {
return this.getFieldIndex(transaction, target).next(index => {
if (!index) {
indexType = 0 /* IndexType.NONE */;
}
else if (indexType !== 0 /* IndexType.NONE */ &&
index.fields.length < targetGetSegmentCount(target)) {
indexType = 1 /* IndexType.PARTIAL */;
}
});
}).next(() => {
// OR queries have more than one sub-target (one sub-target per DNF term). We currently consider
// OR queries that have a `limit` to have a partial index. For such queries we perform sorting
// and apply the limit in memory as a post-processing step.
if (targetHasLimit(target) &&
subTargets.length > 1 &&
indexType === 2 /* IndexType.FULL */) {
return 1 /* IndexType.PARTIAL */;
}
return indexType;
});
}
/**
* Returns the byte encoded form of the directional values in the field index.
* Returns `null` if the document does not have all fields specified in the
* index.
*/
encodeDirectionalElements(fieldIndex, document) {
const encoder = new IndexByteEncoder();
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
const field = document.data.field(segment.fieldPath);
if (field == null) {
return null;
}
const directionalEncoder = encoder.forKind(segment.kind);
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(field, directionalEncoder);
}
return encoder.encodedBytes();
}
/** Encodes a single value to the ascending index format. */
encodeSingleElement(value) {
const encoder = new IndexByteEncoder();
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(value, encoder.forKind(0 /* IndexKind.ASCENDING */));
return encoder.encodedBytes();
}
/**
* Returns an encoded form of the document key that sorts based on the key
* ordering of the field index.
*/
encodeDirectionalKey(fieldIndex, documentKey) {
const encoder = new IndexByteEncoder();
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(refValue(this.databaseId, documentKey), encoder.forKind(fieldIndexGetKeyOrder(fieldIndex)));
return encoder.encodedBytes();
}
/**
* Encodes the given field values according to the specification in `target`.
* For IN queries, a list of possible values is returned.
*/
encodeValues(fieldIndex, target, values) {
if (values === null) {
return [];
}
let encoders = [];
encoders.push(new IndexByteEncoder());
let valueIdx = 0;
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
const value = values[valueIdx++];
for (const encoder of encoders) {
if (this.isInFilter(target, segment.fieldPath) && isArray(value)) {
encoders = this.expandIndexValues(encoders, segment, value);
}
else {
const directionalEncoder = encoder.forKind(segment.kind);
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(value, directionalEncoder);
}
}
}
return this.getEncodedBytes(encoders);
}
/**
* Encodes the given bounds according to the specification in `target`. For IN
* queries, a list of possible values is returned.
*/
encodeBound(fieldIndex, target, bound) {
return this.encodeValues(fieldIndex, target, bound.position);
}
/** Returns the byte representation for the provided encoders. */
getEncodedBytes(encoders) {
const result = [];
for (let i = 0; i < encoders.length; ++i) {
result[i] = encoders[i].encodedBytes();
}
return result;
}
/**
* Creates a separate encoder for each element of an array.
*
* The method appends each value to all existing encoders (e.g. filter("a",
* "==", "a1").filter("b", "in", ["b1", "b2"]) becomes ["a1,b1", "a1,b2"]). A
* list of new encoders is returned.
*/
expandIndexValues(encoders, segment, value) {
const prefixes = [...encoders];
const results = [];
for (const arrayElement of value.arrayValue.values || []) {
for (const prefix of prefixes) {
const clonedEncoder = new IndexByteEncoder();
clonedEncoder.seed(prefix.encodedBytes());
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(arrayElement, clonedEncoder.forKind(segment.kind));
results.push(clonedEncoder);
}
}
return results;
}
isInFilter(target, fieldPath) {
return !!target.filters.find(f => f instanceof FieldFilter &&
f.field.isEqual(fieldPath) &&
(f.op === "in" /* Operator.IN */ || f.op === "not-in" /* Operator.NOT_IN */));
}
getFieldIndexes(transaction, collectionGroup) {
const indexes = indexConfigurationStore(transaction);
const states = indexStateStore(transaction);
return (collectionGroup
? indexes.loadAll(DbIndexConfigurationCollectionGroupIndex, IDBKeyRange.bound(collectionGroup, collectionGroup))
: indexes.loadAll()).next(indexConfigs => {
const result = [];
return PersistencePromise.forEach(indexConfigs, (indexConfig) => {
return states
.get([indexConfig.indexId, this.uid])
.next(indexState => {
result.push(fromDbIndexConfiguration(indexConfig, indexState));
});
}).next(() => result);
});
}
getNextCollectionGroupToUpdate(transaction) {
return this.getFieldIndexes(transaction).next(indexes => {
if (indexes.length === 0) {
return null;
}
indexes.sort((l, r) => {
const cmp = l.indexState.sequenceNumber - r.indexState.sequenceNumber;
return cmp !== 0
? cmp
: primitiveComparator(l.collectionGroup, r.collectionGroup);
});
return indexes[0].collectionGroup;
});
}
updateCollectionGroup(transaction, collectionGroup, offset) {
const indexes = indexConfigurationStore(transaction);
const states = indexStateStore(transaction);
return this.getNextSequenceNumber(transaction).next(nextSequenceNumber => indexes
.loadAll(DbIndexConfigurationCollectionGroupIndex, IDBKeyRange.bound(collectionGroup, collectionGroup))
.next(configs => PersistencePromise.forEach(configs, (config) => states.put(toDbIndexState(config.indexId, this.uid, nextSequenceNumber, offset)))));
}
updateIndexEntries(transaction, documents) {
// Porting Note: `getFieldIndexes()` on Web does not cache index lookups as
// it could be used across different IndexedDB transactions. As any cached
// data might be invalidated by other multi-tab clients, we can only trust
// data within a single IndexedDB transaction. We therefore add a cache
// here.
const memoizedIndexes = new Map();
return PersistencePromise.forEach(documents, (key, doc) => {
const memoizedCollectionIndexes = memoizedIndexes.get(key.collectionGroup);
const fieldIndexes = memoizedCollectionIndexes
? PersistencePromise.resolve(memoizedCollectionIndexes)
: this.getFieldIndexes(transaction, key.collectionGroup);
return fieldIndexes.next(fieldIndexes => {
memoizedIndexes.set(key.collectionGroup, fieldIndexes);
return PersistencePromise.forEach(fieldIndexes, (fieldIndex) => {
return this.getExistingIndexEntries(transaction, key, fieldIndex).next(existingEntries => {
const newEntries = this.computeIndexEntries(doc, fieldIndex);
if (!existingEntries.isEqual(newEntries)) {
return this.updateEntries(transaction, doc, fieldIndex, existingEntries, newEntries);
}
return PersistencePromise.resolve();
});
});
});
});
}
addIndexEntry(transaction, document, fieldIndex, indexEntry) {
const indexEntries = indexEntriesStore(transaction);
return indexEntries.put({
indexId: indexEntry.indexId,
uid: this.uid,
arrayValue: indexEntry.arrayValue,
directionalValue: indexEntry.directionalValue,
orderedDocumentKey: this.encodeDirectionalKey(fieldIndex, document.key),
documentKey: document.key.path.toArray()
});
}
deleteIndexEntry(transaction, document, fieldIndex, indexEntry) {
const indexEntries = indexEntriesStore(transaction);
return indexEntries.delete([
indexEntry.indexId,
this.uid,
indexEntry.arrayValue,
indexEntry.directionalValue,
this.encodeDirectionalKey(fieldIndex, document.key),
document.key.path.toArray()
]);
}
getExistingIndexEntries(transaction, documentKey, fieldIndex) {
const indexEntries = indexEntriesStore(transaction);
let results = new SortedSet(indexEntryComparator);
return indexEntries
.iterate({
index: DbIndexEntryDocumentKeyIndex,
range: IDBKeyRange.only([
fieldIndex.indexId,
this.uid,
this.encodeDirectionalKey(fieldIndex, documentKey)
])
}, (_, entry) => {
results = results.add(new IndexEntry(fieldIndex.indexId, documentKey, entry.arrayValue, entry.directionalValue));
})
.next(() => results);
}
/** Creates the index entries for the given document. */
computeIndexEntries(document, fieldIndex) {
let results = new SortedSet(indexEntryComparator);
const directionalValue = this.encodeDirectionalElements(fieldIndex, document);
if (directionalValue == null) {
return results;
}
const arraySegment = fieldIndexGetArraySegment(fieldIndex);
if (arraySegment != null) {
const value = document.data.field(arraySegment.fieldPath);
if (isArray(value)) {
for (const arrayValue of value.arrayValue.values || []) {
results = results.add(new IndexEntry(fieldIndex.indexId, document.key, this.encodeSingleElement(arrayValue), directionalValue));
}
}
}
else {
results = results.add(new IndexEntry(fieldIndex.indexId, document.key, EMPTY_VALUE, directionalValue));
}
return results;
}
/**
* Updates the index entries for the provided document by deleting entries
* that are no longer referenced in `newEntries` and adding all newly added
* entries.
*/
updateEntries(transaction, document, fieldIndex, existingEntries, newEntries) {
logDebug(LOG_TAG$f, "Updating index entries for document '%s'", document.key);
const promises = [];
diffSortedSets(existingEntries, newEntries, indexEntryComparator,
/* onAdd= */ entry => {
promises.push(this.addIndexEntry(transaction, document, fieldIndex, entry));
},
/* onRemove= */ entry => {
promises.push(this.deleteIndexEntry(transaction, document, fieldIndex, entry));
});
return PersistencePromise.waitFor(promises);
}
getNextSequenceNumber(transaction) {
let nextSequenceNumber = 1;
const states = indexStateStore(transaction);
return states
.iterate({
index: DbIndexStateSequenceNumberIndex,
reverse: true,
range: IDBKeyRange.upperBound([this.uid, Number.MAX_SAFE_INTEGER])
}, (_, state, controller) => {
controller.done();
nextSequenceNumber = state.sequenceNumber + 1;
})
.next(() => nextSequenceNumber);
}
/**
* Returns a new set of IDB ranges that splits the existing range and excludes
* any values that match the `notInValue` from these ranges. As an example,
* '[foo > 2 && foo != 3]` becomes `[foo > 2 && < 3, foo > 3]`.
*/
createRange(lower, upper, notInValues) {
// The notIn values need to be sorted and unique so that we can return a
// sorted set of non-overlapping ranges.
notInValues = notInValues
.sort((l, r) => indexEntryComparator(l, r))
.filter((el, i, values) => !i || indexEntryComparator(el, values[i - 1]) !== 0);
const bounds = [];
bounds.push(lower);
for (const notInValue of notInValues) {
const cmpToLower = indexEntryComparator(notInValue, lower);
const cmpToUpper = indexEntryComparator(notInValue, upper);
if (cmpToLower === 0) {
// `notInValue` is the lower bound. We therefore need to raise the bound
// to the next value.
bounds[0] = lower.successor();
}
else if (cmpToLower > 0 && cmpToUpper < 0) {
// `notInValue` is in the middle of the range
bounds.push(notInValue);
bounds.push(notInValue.successor());
}
else if (cmpToUpper > 0) {
// `notInValue` (and all following values) are out of the range
break;
}
}
bounds.push(upper);
const ranges = [];
for (let i = 0; i < bounds.length; i += 2) {
// If we encounter two bounds that will create an unmatchable key range,
// then we return an empty set of key ranges.
if (this.isRangeMatchable(bounds[i], bounds[i + 1])) {
return [];
}
const lowerBound = [
bounds[i].indexId,
this.uid,
bounds[i].arrayValue,
bounds[i].directionalValue,
EMPTY_VALUE,
[]
];
const upperBound = [
bounds[i + 1].indexId,
this.uid,
bounds[i + 1].arrayValue,
bounds[i + 1].directionalValue,
EMPTY_VALUE,
[]
];
ranges.push(IDBKeyRange.bound(lowerBound, upperBound));
}
return ranges;
}
isRangeMatchable(lowerBound, upperBound) {
// If lower bound is greater than the upper bound, then the key
// range can never be matched.
return indexEntryComparator(lowerBound, upperBound) > 0;
}
getMinOffsetFromCollectionGroup(transaction, collectionGroup) {
return this.getFieldIndexes(transaction, collectionGroup).next(getMinOffsetFromFieldIndexes);
}
getMinOffset(transaction, target) {
return PersistencePromise.mapArray(this.getSubTargets(target), (subTarget) => this.getFieldIndex(transaction, subTarget).next(index => index ? index : fail())).next(getMinOffsetFromFieldIndexes);
}
}
/**
* Helper to get a typed SimpleDbStore for the collectionParents
* document store.
*/
function collectionParentsStore(txn) {
return getStore(txn, DbCollectionParentStore);
}
/**
* Helper to get a typed SimpleDbStore for the index entry object store.
*/
function indexEntriesStore(txn) {
return getStore(txn, DbIndexEntryStore);
}
/**
* Helper to get a typed SimpleDbStore for the index configuration object store.
*/
function indexConfigurationStore(txn) {
return getStore(txn, DbIndexConfigurationStore);
}
/**
* Helper to get a typed SimpleDbStore for the index state object store.
*/
function indexStateStore(txn) {
return getStore(txn, DbIndexStateStore);
}
function getMinOffsetFromFieldIndexes(fieldIndexes) {
hardAssert(fieldIndexes.length !== 0);
let minOffset = fieldIndexes[0].indexState.offset;
let maxBatchId = minOffset.largestBatchId;
for (let i = 1; i < fieldIndexes.length; i++) {
const newOffset = fieldIndexes[i].indexState.offset;
if (indexOffsetComparator(newOffset, minOffset) < 0) {
minOffset = newOffset;
}
if (maxBatchId < newOffset.largestBatchId) {
maxBatchId = newOffset.largestBatchId;
}
}
return new IndexOffset(minOffset.readTime, minOffset.documentKey, maxBatchId);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Delete a mutation batch and the associated document mutations.
* @returns A PersistencePromise of the document mutations that were removed.
*/
function removeMutationBatch(txn, userId, batch) {
const mutationStore = txn.store(DbMutationBatchStore);
const indexTxn = txn.store(DbDocumentMutationStore);
const promises = [];
const range = IDBKeyRange.only(batch.batchId);
let numDeleted = 0;
const removePromise = mutationStore.iterate({ range }, (key, value, control) => {
numDeleted++;
return control.delete();
});
promises.push(removePromise.next(() => {
hardAssert(numDeleted === 1);
}));
const removedDocuments = [];
for (const mutation of batch.mutations) {
const indexKey = newDbDocumentMutationKey(userId, mutation.key.path, batch.batchId);
promises.push(indexTxn.delete(indexKey));
removedDocuments.push(mutation.key);
}
return PersistencePromise.waitFor(promises).next(() => removedDocuments);
}
/**
* Returns an approximate size for the given document.
*/
function dbDocumentSize(doc) {
if (!doc) {
return 0;
}
let value;
if (doc.document) {
value = doc.document;
}
else if (doc.unknownDocument) {
value = doc.unknownDocument;
}
else if (doc.noDocument) {
value = doc.noDocument;
}
else {
throw fail();
}
return JSON.stringify(value).length;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A mutation queue for a specific user, backed by IndexedDB. */
class IndexedDbMutationQueue {
constructor(
/**
* The normalized userId (e.g. null UID => "" userId) used to store /
* retrieve mutations.
*/
userId, serializer, indexManager, referenceDelegate) {
this.userId = userId;
this.serializer = serializer;
this.indexManager = indexManager;
this.referenceDelegate = referenceDelegate;
/**
* Caches the document keys for pending mutation batches. If the mutation
* has been removed from IndexedDb, the cached value may continue to
* be used to retrieve the batch's document keys. To remove a cached value
* locally, `removeCachedMutationKeys()` should be invoked either directly
* or through `removeMutationBatches()`.
*
* With multi-tab, when the primary client acknowledges or rejects a mutation,
* this cache is used by secondary clients to invalidate the local
* view of the documents that were previously affected by the mutation.
*/
// PORTING NOTE: Multi-tab only.
this.documentKeysByBatchId = {};
}
/**
* Creates a new mutation queue for the given user.
* @param user - The user for which to create a mutation queue.
* @param serializer - The serializer to use when persisting to IndexedDb.
*/
static forUser(user, serializer, indexManager, referenceDelegate) {
// TODO(mcg): Figure out what constraints there are on userIDs
// In particular, are there any reserved characters? are empty ids allowed?
// For the moment store these together in the same mutations table assuming
// that empty userIDs aren't allowed.
hardAssert(user.uid !== '');
const userId = user.isAuthenticated() ? user.uid : '';
return new IndexedDbMutationQueue(userId, serializer, indexManager, referenceDelegate);
}
checkEmpty(transaction) {
let empty = true;
const range = IDBKeyRange.bound([this.userId, Number.NEGATIVE_INFINITY], [this.userId, Number.POSITIVE_INFINITY]);
return mutationsStore(transaction)
.iterate({ index: DbMutationBatchUserMutationsIndex, range }, (key, value, control) => {
empty = false;
control.done();
})
.next(() => empty);
}
addMutationBatch(transaction, localWriteTime, baseMutations, mutations) {
const documentStore = documentMutationsStore(transaction);
const mutationStore = mutationsStore(transaction);
// The IndexedDb implementation in Chrome (and Firefox) does not handle
// compound indices that include auto-generated keys correctly. To ensure
// that the index entry is added correctly in all browsers, we perform two
// writes: The first write is used to retrieve the next auto-generated Batch
// ID, and the second write populates the index and stores the actual
// mutation batch.
// See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972
// We write an empty object to obtain key
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return mutationStore.add({}).next(batchId => {
hardAssert(typeof batchId === 'number');
const batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
const dbBatch = toDbMutationBatch(this.serializer, this.userId, batch);
const promises = [];
let collectionParents = new SortedSet((l, r) => primitiveComparator(l.canonicalString(), r.canonicalString()));
for (const mutation of mutations) {
const indexKey = newDbDocumentMutationKey(this.userId, mutation.key.path, batchId);
collectionParents = collectionParents.add(mutation.key.path.popLast());
promises.push(mutationStore.put(dbBatch));
promises.push(documentStore.put(indexKey, DbDocumentMutationPlaceholder));
}
collectionParents.forEach(parent => {
promises.push(this.indexManager.addToCollectionParentIndex(transaction, parent));
});
transaction.addOnCommittedListener(() => {
this.documentKeysByBatchId[batchId] = batch.keys();
});
return PersistencePromise.waitFor(promises).next(() => batch);
});
}
lookupMutationBatch(transaction, batchId) {
return mutationsStore(transaction)
.get(batchId)
.next(dbBatch => {
if (dbBatch) {
hardAssert(dbBatch.userId === this.userId);
return fromDbMutationBatch(this.serializer, dbBatch);
}
return null;
});
}
/**
* Returns the document keys for the mutation batch with the given batchId.
* For primary clients, this method returns `null` after
* `removeMutationBatches()` has been called. Secondary clients return a
* cached result until `removeCachedMutationKeys()` is invoked.
*/
// PORTING NOTE: Multi-tab only.
lookupMutationKeys(transaction, batchId) {
if (this.documentKeysByBatchId[batchId]) {
return PersistencePromise.resolve(this.documentKeysByBatchId[batchId]);
}
else {
return this.lookupMutationBatch(transaction, batchId).next(batch => {
if (batch) {
const keys = batch.keys();
this.documentKeysByBatchId[batchId] = keys;
return keys;
}
else {
return null;
}
});
}
}
getNextMutationBatchAfterBatchId(transaction, batchId) {
const nextBatchId = batchId + 1;
const range = IDBKeyRange.lowerBound([this.userId, nextBatchId]);
let foundBatch = null;
return mutationsStore(transaction)
.iterate({ index: DbMutationBatchUserMutationsIndex, range }, (key, dbBatch, control) => {
if (dbBatch.userId === this.userId) {
hardAssert(dbBatch.batchId >= nextBatchId);
foundBatch = fromDbMutationBatch(this.serializer, dbBatch);
}
control.done();
})
.next(() => foundBatch);
}
getHighestUnacknowledgedBatchId(transaction) {
const range = IDBKeyRange.upperBound([
this.userId,
Number.POSITIVE_INFINITY
]);
let batchId = BATCHID_UNKNOWN;
return mutationsStore(transaction)
.iterate({ index: DbMutationBatchUserMutationsIndex, range, reverse: true }, (key, dbBatch, control) => {
batchId = dbBatch.batchId;
control.done();
})
.next(() => batchId);
}
getAllMutationBatches(transaction) {
const range = IDBKeyRange.bound([this.userId, BATCHID_UNKNOWN], [this.userId, Number.POSITIVE_INFINITY]);
return mutationsStore(transaction)
.loadAll(DbMutationBatchUserMutationsIndex, range)
.next(dbBatches => dbBatches.map(dbBatch => fromDbMutationBatch(this.serializer, dbBatch)));
}
getAllMutationBatchesAffectingDocumentKey(transaction, documentKey) {
// Scan the document-mutation index starting with a prefix starting with
// the given documentKey.
const indexPrefix = newDbDocumentMutationPrefixForPath(this.userId, documentKey.path);
const indexStart = IDBKeyRange.lowerBound(indexPrefix);
const results = [];
return documentMutationsStore(transaction)
.iterate({ range: indexStart }, (indexKey, _, control) => {
const [userID, encodedPath, batchId] = indexKey;
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
const path = decodeResourcePath(encodedPath);
if (userID !== this.userId || !documentKey.path.isEqual(path)) {
control.done();
return;
}
// Look up the mutation batch in the store.
return mutationsStore(transaction)
.get(batchId)
.next(mutation => {
if (!mutation) {
throw fail();
}
hardAssert(mutation.userId === this.userId);
results.push(fromDbMutationBatch(this.serializer, mutation));
});
})
.next(() => results);
}
getAllMutationBatchesAffectingDocumentKeys(transaction, documentKeys) {
let uniqueBatchIDs = new SortedSet(primitiveComparator);
const promises = [];
documentKeys.forEach(documentKey => {
const indexStart = newDbDocumentMutationPrefixForPath(this.userId, documentKey.path);
const range = IDBKeyRange.lowerBound(indexStart);
const promise = documentMutationsStore(transaction).iterate({ range }, (indexKey, _, control) => {
const [userID, encodedPath, batchID] = indexKey;
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
const path = decodeResourcePath(encodedPath);
if (userID !== this.userId || !documentKey.path.isEqual(path)) {
control.done();
return;
}
uniqueBatchIDs = uniqueBatchIDs.add(batchID);
});
promises.push(promise);
});
return PersistencePromise.waitFor(promises).next(() => this.lookupMutationBatches(transaction, uniqueBatchIDs));
}
getAllMutationBatchesAffectingQuery(transaction, query) {
const queryPath = query.path;
const immediateChildrenLength = queryPath.length + 1;
// TODO(mcg): Actually implement a single-collection query
//
// This is actually executing an ancestor query, traversing the whole
// subtree below the collection which can be horrifically inefficient for
// some structures. The right way to solve this is to implement the full
// value index, but that's not in the cards in the near future so this is
// the best we can do for the moment.
//
// Since we don't yet index the actual properties in the mutations, our
// current approach is to just return all mutation batches that affect
// documents in the collection being queried.
const indexPrefix = newDbDocumentMutationPrefixForPath(this.userId, queryPath);
const indexStart = IDBKeyRange.lowerBound(indexPrefix);
// Collect up unique batchIDs encountered during a scan of the index. Use a
// SortedSet to accumulate batch IDs so they can be traversed in order in a
// scan of the main table.
let uniqueBatchIDs = new SortedSet(primitiveComparator);
return documentMutationsStore(transaction)
.iterate({ range: indexStart }, (indexKey, _, control) => {
const [userID, encodedPath, batchID] = indexKey;
const path = decodeResourcePath(encodedPath);
if (userID !== this.userId || !queryPath.isPrefixOf(path)) {
control.done();
return;
}
// Rows with document keys more than one segment longer than the
// query path can't be matches. For example, a query on 'rooms'
// can't match the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
if (path.length !== immediateChildrenLength) {
return;
}
uniqueBatchIDs = uniqueBatchIDs.add(batchID);
})
.next(() => this.lookupMutationBatches(transaction, uniqueBatchIDs));
}
lookupMutationBatches(transaction, batchIDs) {
const results = [];
const promises = [];
// TODO(rockwood): Implement this using iterate.
batchIDs.forEach(batchId => {
promises.push(mutationsStore(transaction)
.get(batchId)
.next(mutation => {
if (mutation === null) {
throw fail();
}
hardAssert(mutation.userId === this.userId);
results.push(fromDbMutationBatch(this.serializer, mutation));
}));
});
return PersistencePromise.waitFor(promises).next(() => results);
}
removeMutationBatch(transaction, batch) {
return removeMutationBatch(transaction.simpleDbTransaction, this.userId, batch).next(removedDocuments => {
transaction.addOnCommittedListener(() => {
this.removeCachedMutationKeys(batch.batchId);
});
return PersistencePromise.forEach(removedDocuments, (key) => {
return this.referenceDelegate.markPotentiallyOrphaned(transaction, key);
});
});
}
/**
* Clears the cached keys for a mutation batch. This method should be
* called by secondary clients after they process mutation updates.
*
* Note that this method does not have to be called from primary clients as
* the corresponding cache entries are cleared when an acknowledged or
* rejected batch is removed from the mutation queue.
*/
// PORTING NOTE: Multi-tab only
removeCachedMutationKeys(batchId) {
delete this.documentKeysByBatchId[batchId];
}
performConsistencyCheck(txn) {
return this.checkEmpty(txn).next(empty => {
if (!empty) {
return PersistencePromise.resolve();
}
// Verify that there are no entries in the documentMutations index if
// the queue is empty.
const startRange = IDBKeyRange.lowerBound(newDbDocumentMutationPrefixForUser(this.userId));
const danglingMutationReferences = [];
return documentMutationsStore(txn)
.iterate({ range: startRange }, (key, _, control) => {
const userID = key[0];
if (userID !== this.userId) {
control.done();
return;
}
else {
const path = decodeResourcePath(key[1]);
danglingMutationReferences.push(path);
}
})
.next(() => {
hardAssert(danglingMutationReferences.length === 0);
});
});
}
containsKey(txn, key) {
return mutationQueueContainsKey(txn, this.userId, key);
}
// PORTING NOTE: Multi-tab only (state is held in memory in other clients).
/** Returns the mutation queue's metadata from IndexedDb. */
getMutationQueueMetadata(transaction) {
return mutationQueuesStore(transaction)
.get(this.userId)
.next((metadata) => {
return (metadata || {
userId: this.userId,
lastAcknowledgedBatchId: BATCHID_UNKNOWN,
lastStreamToken: ''
});
});
}
}
/**
* @returns true if the mutation queue for the given user contains a pending
* mutation for the given key.
*/
function mutationQueueContainsKey(txn, userId, key) {
const indexKey = newDbDocumentMutationPrefixForPath(userId, key.path);
const encodedPath = indexKey[1];
const startRange = IDBKeyRange.lowerBound(indexKey);
let containsKey = false;
return documentMutationsStore(txn)
.iterate({ range: startRange, keysOnly: true }, (key, value, control) => {
const [userID, keyPath, /*batchID*/ _] = key;
if (userID === userId && keyPath === encodedPath) {
containsKey = true;
}
control.done();
})
.next(() => containsKey);
}
/** Returns true if any mutation queue contains the given document. */
function mutationQueuesContainKey(txn, docKey) {
let found = false;
return mutationQueuesStore(txn)
.iterateSerial(userId => {
return mutationQueueContainsKey(txn, userId, docKey).next(containsKey => {
if (containsKey) {
found = true;
}
return PersistencePromise.resolve(!containsKey);
});
})
.next(() => found);
}
/**
* Helper to get a typed SimpleDbStore for the mutations object store.
*/
function mutationsStore(txn) {
return getStore(txn, DbMutationBatchStore);
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/
function documentMutationsStore(txn) {
return getStore(txn, DbDocumentMutationStore);
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/
function mutationQueuesStore(txn) {
return getStore(txn, DbMutationQueueStore);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Offset to ensure non-overlapping target ids. */
const OFFSET = 2;
/**
* Generates monotonically increasing target IDs for sending targets to the
* watch stream.
*
* The client constructs two generators, one for the target cache, and one for
* for the sync engine (to generate limbo documents targets). These
* generators produce non-overlapping IDs (by using even and odd IDs
* respectively).
*
* By separating the target ID space, the query cache can generate target IDs
* that persist across client restarts, while sync engine can independently
* generate in-memory target IDs that are transient and can be reused after a
* restart.
*/
class TargetIdGenerator {
constructor(lastId) {
this.lastId = lastId;
}
next() {
this.lastId += OFFSET;
return this.lastId;
}
static forTargetCache() {
// The target cache generator must return '2' in its first call to `next()`
// as there is no differentiation in the protocol layer between an unset
// number and the number '0'. If we were to sent a target with target ID
// '0', the backend would consider it unset and replace it with its own ID.
return new TargetIdGenerator(2 - OFFSET);
}
static forSyncEngine() {
// Sync engine assigns target IDs for limbo document detection.
return new TargetIdGenerator(1 - OFFSET);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class IndexedDbTargetCache {
constructor(referenceDelegate, serializer) {
this.referenceDelegate = referenceDelegate;
this.serializer = serializer;
}
// PORTING NOTE: We don't cache global metadata for the target cache, since
// some of it (in particular `highestTargetId`) can be modified by secondary
// tabs. We could perhaps be more granular (and e.g. still cache
// `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go
// to IndexedDb whenever we need to read metadata. We can revisit if it turns
// out to have a meaningful performance impact.
allocateTargetId(transaction) {
return this.retrieveMetadata(transaction).next(metadata => {
const targetIdGenerator = new TargetIdGenerator(metadata.highestTargetId);
metadata.highestTargetId = targetIdGenerator.next();
return this.saveMetadata(transaction, metadata).next(() => metadata.highestTargetId);
});
}
getLastRemoteSnapshotVersion(transaction) {
return this.retrieveMetadata(transaction).next(metadata => {
return SnapshotVersion.fromTimestamp(new Timestamp(metadata.lastRemoteSnapshotVersion.seconds, metadata.lastRemoteSnapshotVersion.nanoseconds));
});
}
getHighestSequenceNumber(transaction) {
return this.retrieveMetadata(transaction).next(targetGlobal => targetGlobal.highestListenSequenceNumber);
}
setTargetsMetadata(transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
return this.retrieveMetadata(transaction).next(metadata => {
metadata.highestListenSequenceNumber = highestListenSequenceNumber;
if (lastRemoteSnapshotVersion) {
metadata.lastRemoteSnapshotVersion =
lastRemoteSnapshotVersion.toTimestamp();
}
if (highestListenSequenceNumber > metadata.highestListenSequenceNumber) {
metadata.highestListenSequenceNumber = highestListenSequenceNumber;
}
return this.saveMetadata(transaction, metadata);
});
}
addTargetData(transaction, targetData) {
return this.saveTargetData(transaction, targetData).next(() => {
return this.retrieveMetadata(transaction).next(metadata => {
metadata.targetCount += 1;
this.updateMetadataFromTargetData(targetData, metadata);
return this.saveMetadata(transaction, metadata);
});
});
}
updateTargetData(transaction, targetData) {
return this.saveTargetData(transaction, targetData);
}
removeTargetData(transaction, targetData) {
return this.removeMatchingKeysForTargetId(transaction, targetData.targetId)
.next(() => targetsStore(transaction).delete(targetData.targetId))
.next(() => this.retrieveMetadata(transaction))
.next(metadata => {
hardAssert(metadata.targetCount > 0);
metadata.targetCount -= 1;
return this.saveMetadata(transaction, metadata);
});
}
/**
* Drops any targets with sequence number less than or equal to the upper bound, excepting those
* present in `activeTargetIds`. Document associations for the removed targets are also removed.
* Returns the number of targets removed.
*/
removeTargets(txn, upperBound, activeTargetIds) {
let count = 0;
const promises = [];
return targetsStore(txn)
.iterate((key, value) => {
const targetData = fromDbTarget(value);
if (targetData.sequenceNumber <= upperBound &&
activeTargetIds.get(targetData.targetId) === null) {
count++;
promises.push(this.removeTargetData(txn, targetData));
}
})
.next(() => PersistencePromise.waitFor(promises))
.next(() => count);
}
/**
* Call provided function with each `TargetData` that we have cached.
*/
forEachTarget(txn, f) {
return targetsStore(txn).iterate((key, value) => {
const targetData = fromDbTarget(value);
f(targetData);
});
}
retrieveMetadata(transaction) {
return globalTargetStore(transaction)
.get(DbTargetGlobalKey)
.next(metadata => {
hardAssert(metadata !== null);
return metadata;
});
}
saveMetadata(transaction, metadata) {
return globalTargetStore(transaction).put(DbTargetGlobalKey, metadata);
}
saveTargetData(transaction, targetData) {
return targetsStore(transaction).put(toDbTarget(this.serializer, targetData));
}
/**
* In-place updates the provided metadata to account for values in the given
* TargetData. Saving is done separately. Returns true if there were any
* changes to the metadata.
*/
updateMetadataFromTargetData(targetData, metadata) {
let updated = false;
if (targetData.targetId > metadata.highestTargetId) {
metadata.highestTargetId = targetData.targetId;
updated = true;
}
if (targetData.sequenceNumber > metadata.highestListenSequenceNumber) {
metadata.highestListenSequenceNumber = targetData.sequenceNumber;
updated = true;
}
return updated;
}
getTargetCount(transaction) {
return this.retrieveMetadata(transaction).next(metadata => metadata.targetCount);
}
getTargetData(transaction, target) {
// Iterating by the canonicalId may yield more than one result because
// canonicalId values are not required to be unique per target. This query
// depends on the queryTargets index to be efficient.
const canonicalId = canonifyTarget(target);
const range = IDBKeyRange.bound([canonicalId, Number.NEGATIVE_INFINITY], [canonicalId, Number.POSITIVE_INFINITY]);
let result = null;
return targetsStore(transaction)
.iterate({ range, index: DbTargetQueryTargetsIndexName }, (key, value, control) => {
const found = fromDbTarget(value);
// After finding a potential match, check that the target is
// actually equal to the requested target.
if (targetEquals(target, found.target)) {
result = found;
control.done();
}
})
.next(() => result);
}
addMatchingKeys(txn, keys, targetId) {
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
const promises = [];
const store = documentTargetStore(txn);
keys.forEach(key => {
const path = encodeResourcePath(key.path);
promises.push(store.put({ targetId, path }));
promises.push(this.referenceDelegate.addReference(txn, targetId, key));
});
return PersistencePromise.waitFor(promises);
}
removeMatchingKeys(txn, keys, targetId) {
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
const store = documentTargetStore(txn);
return PersistencePromise.forEach(keys, (key) => {
const path = encodeResourcePath(key.path);
return PersistencePromise.waitFor([
store.delete([targetId, path]),
this.referenceDelegate.removeReference(txn, targetId, key)
]);
});
}
removeMatchingKeysForTargetId(txn, targetId) {
const store = documentTargetStore(txn);
const range = IDBKeyRange.bound([targetId], [targetId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
return store.delete(range);
}
getMatchingKeysForTargetId(txn, targetId) {
const range = IDBKeyRange.bound([targetId], [targetId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
const store = documentTargetStore(txn);
let result = documentKeySet();
return store
.iterate({ range, keysOnly: true }, (key, _, control) => {
const path = decodeResourcePath(key[1]);
const docKey = new DocumentKey(path);
result = result.add(docKey);
})
.next(() => result);
}
containsKey(txn, key) {
const path = encodeResourcePath(key.path);
const range = IDBKeyRange.bound([path], [immediateSuccessor(path)],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
let count = 0;
return documentTargetStore(txn)
.iterate({
index: DbTargetDocumentDocumentTargetsIndex,
keysOnly: true,
range
}, ([targetId, path], _, control) => {
// Having a sentinel row for a document does not count as containing that document;
// For the target cache, containing the document means the document is part of some
// target.
if (targetId !== 0) {
count++;
control.done();
}
})
.next(() => count > 0);
}
/**
* Looks up a TargetData entry by target ID.
*
* @param targetId - The target ID of the TargetData entry to look up.
* @returns The cached TargetData entry, or null if the cache has no entry for
* the target.
*/
// PORTING NOTE: Multi-tab only.
getTargetDataForTarget(transaction, targetId) {
return targetsStore(transaction)
.get(targetId)
.next(found => {
if (found) {
return fromDbTarget(found);
}
else {
return null;
}
});
}
}
/**
* Helper to get a typed SimpleDbStore for the queries object store.
*/
function targetsStore(txn) {
return getStore(txn, DbTargetStore);
}
/**
* Helper to get a typed SimpleDbStore for the target globals object store.
*/
function globalTargetStore(txn) {
return getStore(txn, DbTargetGlobalStore);
}
/**
* Helper to get a typed SimpleDbStore for the document target object store.
*/
function documentTargetStore(txn) {
return getStore(txn, DbTargetDocumentStore);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const GC_DID_NOT_RUN = {
didRun: false,
sequenceNumbersCollected: 0,
targetsRemoved: 0,
documentsRemoved: 0
};
const LRU_COLLECTION_DISABLED = -1;
const LRU_DEFAULT_CACHE_SIZE_BYTES = 40 * 1024 * 1024;
class LruParams {
constructor(
// When we attempt to collect, we will only do so if the cache size is greater than this
// threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.
cacheSizeCollectionThreshold,
// The percentage of sequence numbers that we will attempt to collect
percentileToCollect,
// A cap on the total number of sequence numbers that will be collected. This prevents
// us from collecting a huge number of sequence numbers if the cache has grown very large.
maximumSequenceNumbersToCollect) {
this.cacheSizeCollectionThreshold = cacheSizeCollectionThreshold;
this.percentileToCollect = percentileToCollect;
this.maximumSequenceNumbersToCollect = maximumSequenceNumbersToCollect;
}
static withCacheSize(cacheSize) {
return new LruParams(cacheSize, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
}
}
LruParams.DEFAULT_COLLECTION_PERCENTILE = 10;
LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1000;
LruParams.DEFAULT = new LruParams(LRU_DEFAULT_CACHE_SIZE_BYTES, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
LruParams.DISABLED = new LruParams(LRU_COLLECTION_DISABLED, 0, 0);
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$e = 'LruGarbageCollector';
const LRU_MINIMUM_CACHE_SIZE_BYTES = 1 * 1024 * 1024;
/** How long we wait to try running LRU GC after SDK initialization. */
const INITIAL_GC_DELAY_MS = 1 * 60 * 1000;
/** Minimum amount of time between GC checks, after the first one. */
const REGULAR_GC_DELAY_MS = 5 * 60 * 1000;
function bufferEntryComparator([aSequence, aIndex], [bSequence, bIndex]) {
const seqCmp = primitiveComparator(aSequence, bSequence);
if (seqCmp === 0) {
// This order doesn't matter, but we can bias against churn by sorting
// entries created earlier as less than newer entries.
return primitiveComparator(aIndex, bIndex);
}
else {
return seqCmp;
}
}
/**
* Used to calculate the nth sequence number. Keeps a rolling buffer of the
* lowest n values passed to `addElement`, and finally reports the largest of
* them in `maxValue`.
*/
class RollingSequenceNumberBuffer {
constructor(maxElements) {
this.maxElements = maxElements;
this.buffer = new SortedSet(bufferEntryComparator);
this.previousIndex = 0;
}
nextIndex() {
return ++this.previousIndex;
}
addElement(sequenceNumber) {
const entry = [sequenceNumber, this.nextIndex()];
if (this.buffer.size < this.maxElements) {
this.buffer = this.buffer.add(entry);
}
else {
const highestValue = this.buffer.last();
if (bufferEntryComparator(entry, highestValue) < 0) {
this.buffer = this.buffer.delete(highestValue).add(entry);
}
}
}
get maxValue() {
// Guaranteed to be non-empty. If we decide we are not collecting any
// sequence numbers, nthSequenceNumber below short-circuits. If we have
// decided that we are collecting n sequence numbers, it's because n is some
// percentage of the existing sequence numbers. That means we should never
// be in a situation where we are collecting sequence numbers but don't
// actually have any.
return this.buffer.last()[0];
}
}
/**
* This class is responsible for the scheduling of LRU garbage collection. It handles checking
* whether or not GC is enabled, as well as which delay to use before the next run.
*/
class LruScheduler {
constructor(garbageCollector, asyncQueue, localStore) {
this.garbageCollector = garbageCollector;
this.asyncQueue = asyncQueue;
this.localStore = localStore;
this.gcTask = null;
}
start() {
if (this.garbageCollector.params.cacheSizeCollectionThreshold !==
LRU_COLLECTION_DISABLED) {
this.scheduleGC(INITIAL_GC_DELAY_MS);
}
}
stop() {
if (this.gcTask) {
this.gcTask.cancel();
this.gcTask = null;
}
}
get started() {
return this.gcTask !== null;
}
scheduleGC(delay) {
logDebug(LOG_TAG$e, `Garbage collection scheduled in ${delay}ms`);
this.gcTask = this.asyncQueue.enqueueAfterDelay("lru_garbage_collection" /* TimerId.LruGarbageCollection */, delay, async () => {
this.gcTask = null;
try {
await this.localStore.collectGarbage(this.garbageCollector);
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG$e, 'Ignoring IndexedDB error during garbage collection: ', e);
}
else {
await ignoreIfPrimaryLeaseLoss(e);
}
}
await this.scheduleGC(REGULAR_GC_DELAY_MS);
});
}
}
/**
* Implements the steps for LRU garbage collection.
*/
class LruGarbageCollectorImpl {
constructor(delegate, params) {
this.delegate = delegate;
this.params = params;
}
calculateTargetCount(txn, percentile) {
return this.delegate.getSequenceNumberCount(txn).next(targetCount => {
return Math.floor((percentile / 100.0) * targetCount);
});
}
nthSequenceNumber(txn, n) {
if (n === 0) {
return PersistencePromise.resolve(ListenSequence.INVALID);
}
const buffer = new RollingSequenceNumberBuffer(n);
return this.delegate
.forEachTarget(txn, target => buffer.addElement(target.sequenceNumber))
.next(() => {
return this.delegate.forEachOrphanedDocumentSequenceNumber(txn, sequenceNumber => buffer.addElement(sequenceNumber));
})
.next(() => buffer.maxValue);
}
removeTargets(txn, upperBound, activeTargetIds) {
return this.delegate.removeTargets(txn, upperBound, activeTargetIds);
}
removeOrphanedDocuments(txn, upperBound) {
return this.delegate.removeOrphanedDocuments(txn, upperBound);
}
collect(txn, activeTargetIds) {
if (this.params.cacheSizeCollectionThreshold === LRU_COLLECTION_DISABLED) {
logDebug('LruGarbageCollector', 'Garbage collection skipped; disabled');
return PersistencePromise.resolve(GC_DID_NOT_RUN);
}
return this.getCacheSize(txn).next(cacheSize => {
if (cacheSize < this.params.cacheSizeCollectionThreshold) {
logDebug('LruGarbageCollector', `Garbage collection skipped; Cache size ${cacheSize} ` +
`is lower than threshold ${this.params.cacheSizeCollectionThreshold}`);
return GC_DID_NOT_RUN;
}
else {
return this.runGarbageCollection(txn, activeTargetIds);
}
});
}
getCacheSize(txn) {
return this.delegate.getCacheSize(txn);
}
runGarbageCollection(txn, activeTargetIds) {
let upperBoundSequenceNumber;
let sequenceNumbersToCollect, targetsRemoved;
// Timestamps for various pieces of the process
let countedTargetsTs, foundUpperBoundTs, removedTargetsTs, removedDocumentsTs;
const startTs = Date.now();
return this.calculateTargetCount(txn, this.params.percentileToCollect)
.next(sequenceNumbers => {
// Cap at the configured max
if (sequenceNumbers > this.params.maximumSequenceNumbersToCollect) {
logDebug('LruGarbageCollector', 'Capping sequence numbers to collect down ' +
`to the maximum of ${this.params.maximumSequenceNumbersToCollect} ` +
`from ${sequenceNumbers}`);
sequenceNumbersToCollect =
this.params.maximumSequenceNumbersToCollect;
}
else {
sequenceNumbersToCollect = sequenceNumbers;
}
countedTargetsTs = Date.now();
return this.nthSequenceNumber(txn, sequenceNumbersToCollect);
})
.next(upperBound => {
upperBoundSequenceNumber = upperBound;
foundUpperBoundTs = Date.now();
return this.removeTargets(txn, upperBoundSequenceNumber, activeTargetIds);
})
.next(numTargetsRemoved => {
targetsRemoved = numTargetsRemoved;
removedTargetsTs = Date.now();
return this.removeOrphanedDocuments(txn, upperBoundSequenceNumber);
})
.next(documentsRemoved => {
removedDocumentsTs = Date.now();
if (getLogLevel() <= LogLevel.DEBUG) {
const desc = 'LRU Garbage Collection\n' +
`\tCounted targets in ${countedTargetsTs - startTs}ms\n` +
`\tDetermined least recently used ${sequenceNumbersToCollect} in ` +
`${foundUpperBoundTs - countedTargetsTs}ms\n` +
`\tRemoved ${targetsRemoved} targets in ` +
`${removedTargetsTs - foundUpperBoundTs}ms\n` +
`\tRemoved ${documentsRemoved} documents in ` +
`${removedDocumentsTs - removedTargetsTs}ms\n` +
`Total Duration: ${removedDocumentsTs - startTs}ms`;
logDebug('LruGarbageCollector', desc);
}
return PersistencePromise.resolve({
didRun: true,
sequenceNumbersCollected: sequenceNumbersToCollect,
targetsRemoved,
documentsRemoved
});
});
}
}
function newLruGarbageCollector(delegate, params) {
return new LruGarbageCollectorImpl(delegate, params);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Provides LRU functionality for IndexedDB persistence. */
class IndexedDbLruDelegateImpl {
constructor(db, params) {
this.db = db;
this.garbageCollector = newLruGarbageCollector(this, params);
}
getSequenceNumberCount(txn) {
const docCountPromise = this.orphanedDocumentCount(txn);
const targetCountPromise = this.db.getTargetCache().getTargetCount(txn);
return targetCountPromise.next(targetCount => docCountPromise.next(docCount => targetCount + docCount));
}
orphanedDocumentCount(txn) {
let orphanedCount = 0;
return this.forEachOrphanedDocumentSequenceNumber(txn, _ => {
orphanedCount++;
}).next(() => orphanedCount);
}
forEachTarget(txn, f) {
return this.db.getTargetCache().forEachTarget(txn, f);
}
forEachOrphanedDocumentSequenceNumber(txn, f) {
return this.forEachOrphanedDocument(txn, (docKey, sequenceNumber) => f(sequenceNumber));
}
addReference(txn, targetId, key) {
return writeSentinelKey(txn, key);
}
removeReference(txn, targetId, key) {
return writeSentinelKey(txn, key);
}
removeTargets(txn, upperBound, activeTargetIds) {
return this.db.getTargetCache().removeTargets(txn, upperBound, activeTargetIds);
}
markPotentiallyOrphaned(txn, key) {
return writeSentinelKey(txn, key);
}
/**
* Returns true if anything would prevent this document from being garbage
* collected, given that the document in question is not present in any
* targets and has a sequence number less than or equal to the upper bound for
* the collection run.
*/
isPinned(txn, docKey) {
return mutationQueuesContainKey(txn, docKey);
}
removeOrphanedDocuments(txn, upperBound) {
const documentCache = this.db.getRemoteDocumentCache();
const changeBuffer = documentCache.newChangeBuffer();
const promises = [];
let documentCount = 0;
const iteration = this.forEachOrphanedDocument(txn, (docKey, sequenceNumber) => {
if (sequenceNumber <= upperBound) {
const p = this.isPinned(txn, docKey).next(isPinned => {
if (!isPinned) {
documentCount++;
// Our size accounting requires us to read all documents before
// removing them.
return changeBuffer.getEntry(txn, docKey).next(() => {
changeBuffer.removeEntry(docKey, SnapshotVersion.min());
return documentTargetStore(txn).delete(sentinelKey$1(docKey));
});
}
});
promises.push(p);
}
});
return iteration
.next(() => PersistencePromise.waitFor(promises))
.next(() => changeBuffer.apply(txn))
.next(() => documentCount);
}
removeTarget(txn, targetData) {
const updated = targetData.withSequenceNumber(txn.currentSequenceNumber);
return this.db.getTargetCache().updateTargetData(txn, updated);
}
updateLimboDocument(txn, key) {
return writeSentinelKey(txn, key);
}
/**
* Call provided function for each document in the cache that is 'orphaned'. Orphaned
* means not a part of any target, so the only entry in the target-document index for
* that document will be the sentinel row (targetId 0), which will also have the sequence
* number for the last time the document was accessed.
*/
forEachOrphanedDocument(txn, f) {
const store = documentTargetStore(txn);
let nextToReport = ListenSequence.INVALID;
let nextPath;
return store
.iterate({
index: DbTargetDocumentDocumentTargetsIndex
}, ([targetId, docKey], { path, sequenceNumber }) => {
if (targetId === 0) {
// if nextToReport is valid, report it, this is a new key so the
// last one must not be a member of any targets.
if (nextToReport !== ListenSequence.INVALID) {
f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
}
// set nextToReport to be this sequence number. It's the next one we
// might report, if we don't find any targets for this document.
// Note that the sequence number must be defined when the targetId
// is 0.
nextToReport = sequenceNumber;
nextPath = path;
}
else {
// set nextToReport to be invalid, we know we don't need to report
// this one since we found a target for it.
nextToReport = ListenSequence.INVALID;
}
})
.next(() => {
// Since we report sequence numbers after getting to the next key, we
// need to check if the last key we iterated over was an orphaned
// document and report it.
if (nextToReport !== ListenSequence.INVALID) {
f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
}
});
}
getCacheSize(txn) {
return this.db.getRemoteDocumentCache().getSize(txn);
}
}
function sentinelKey$1(key) {
return [0, encodeResourcePath(key.path)];
}
/**
* @returns A value suitable for writing a sentinel row in the target-document
* store.
*/
function sentinelRow(key, sequenceNumber) {
return { targetId: 0, path: encodeResourcePath(key.path), sequenceNumber };
}
function writeSentinelKey(txn, key) {
return documentTargetStore(txn).put(sentinelRow(key, txn.currentSequenceNumber));
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory buffer of entries to be written to a RemoteDocumentCache.
* It can be used to batch up a set of changes to be written to the cache, but
* additionally supports reading entries back with the `getEntry()` method,
* falling back to the underlying RemoteDocumentCache if no entry is
* buffered.
*
* Entries added to the cache *must* be read first. This is to facilitate
* calculating the size delta of the pending changes.
*
* PORTING NOTE: This class was implemented then removed from other platforms.
* If byte-counting ends up being needed on the other platforms, consider
* porting this class as part of that implementation work.
*/
class RemoteDocumentChangeBuffer {
constructor() {
// A mapping of document key to the new cache entry that should be written.
this.changes = new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
this.changesApplied = false;
}
/**
* Buffers a `RemoteDocumentCache.addEntry()` call.
*
* You can only modify documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
addEntry(document) {
this.assertNotApplied();
this.changes.set(document.key, document);
}
/**
* Buffers a `RemoteDocumentCache.removeEntry()` call.
*
* You can only remove documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
removeEntry(key, readTime) {
this.assertNotApplied();
this.changes.set(key, MutableDocument.newInvalidDocument(key).setReadTime(readTime));
}
/**
* Looks up an entry in the cache. The buffered changes will first be checked,
* and if no buffered change applies, this will forward to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKey - The key of the entry to look up.
* @returns The cached document or an invalid document if we have nothing
* cached.
*/
getEntry(transaction, documentKey) {
this.assertNotApplied();
const bufferedEntry = this.changes.get(documentKey);
if (bufferedEntry !== undefined) {
return PersistencePromise.resolve(bufferedEntry);
}
else {
return this.getFromCache(transaction, documentKey);
}
}
/**
* Looks up several entries in the cache, forwarding to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKeys - The keys of the entries to look up.
* @returns A map of cached documents, indexed by key. If an entry cannot be
* found, the corresponding key will be mapped to an invalid document.
*/
getEntries(transaction, documentKeys) {
return this.getAllFromCache(transaction, documentKeys);
}
/**
* Applies buffered changes to the underlying RemoteDocumentCache, using
* the provided transaction.
*/
apply(transaction) {
this.assertNotApplied();
this.changesApplied = true;
return this.applyChanges(transaction);
}
/** Helper to assert this.changes is not null */
assertNotApplied() {
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The RemoteDocumentCache for IndexedDb. To construct, invoke
* `newIndexedDbRemoteDocumentCache()`.
*/
class IndexedDbRemoteDocumentCacheImpl {
constructor(serializer) {
this.serializer = serializer;
}
setIndexManager(indexManager) {
this.indexManager = indexManager;
}
/**
* Adds the supplied entries to the cache.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
addEntry(transaction, key, doc) {
const documentStore = remoteDocumentsStore(transaction);
return documentStore.put(doc);
}
/**
* Removes a document from the cache.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
removeEntry(transaction, documentKey, readTime) {
const store = remoteDocumentsStore(transaction);
return store.delete(dbReadTimeKey(documentKey, readTime));
}
/**
* Updates the current cache size.
*
* Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
* cache's metadata.
*/
updateMetadata(transaction, sizeDelta) {
return this.getMetadata(transaction).next(metadata => {
metadata.byteSize += sizeDelta;
return this.setMetadata(transaction, metadata);
});
}
getEntry(transaction, documentKey) {
let doc = MutableDocument.newInvalidDocument(documentKey);
return remoteDocumentsStore(transaction)
.iterate({
index: DbRemoteDocumentDocumentKeyIndex,
range: IDBKeyRange.only(dbKey(documentKey))
}, (_, dbRemoteDoc) => {
doc = this.maybeDecodeDocument(documentKey, dbRemoteDoc);
})
.next(() => doc);
}
/**
* Looks up an entry in the cache.
*
* @param documentKey - The key of the entry to look up.
* @returns The cached document entry and its size.
*/
getSizedEntry(transaction, documentKey) {
let result = {
size: 0,
document: MutableDocument.newInvalidDocument(documentKey)
};
return remoteDocumentsStore(transaction)
.iterate({
index: DbRemoteDocumentDocumentKeyIndex,
range: IDBKeyRange.only(dbKey(documentKey))
}, (_, dbRemoteDoc) => {
result = {
document: this.maybeDecodeDocument(documentKey, dbRemoteDoc),
size: dbDocumentSize(dbRemoteDoc)
};
})
.next(() => result);
}
getEntries(transaction, documentKeys) {
let results = mutableDocumentMap();
return this.forEachDbEntry(transaction, documentKeys, (key, dbRemoteDoc) => {
const doc = this.maybeDecodeDocument(key, dbRemoteDoc);
results = results.insert(key, doc);
}).next(() => results);
}
/**
* Looks up several entries in the cache.
*
* @param documentKeys - The set of keys entries to look up.
* @returns A map of documents indexed by key and a map of sizes indexed by
* key (zero if the document does not exist).
*/
getSizedEntries(transaction, documentKeys) {
let results = mutableDocumentMap();
let sizeMap = new SortedMap(DocumentKey.comparator);
return this.forEachDbEntry(transaction, documentKeys, (key, dbRemoteDoc) => {
const doc = this.maybeDecodeDocument(key, dbRemoteDoc);
results = results.insert(key, doc);
sizeMap = sizeMap.insert(key, dbDocumentSize(dbRemoteDoc));
}).next(() => {
return { documents: results, sizeMap };
});
}
forEachDbEntry(transaction, documentKeys, callback) {
if (documentKeys.isEmpty()) {
return PersistencePromise.resolve();
}
let sortedKeys = new SortedSet(dbKeyComparator);
documentKeys.forEach(e => (sortedKeys = sortedKeys.add(e)));
const range = IDBKeyRange.bound(dbKey(sortedKeys.first()), dbKey(sortedKeys.last()));
const keyIter = sortedKeys.getIterator();
let nextKey = keyIter.getNext();
return remoteDocumentsStore(transaction)
.iterate({ index: DbRemoteDocumentDocumentKeyIndex, range }, (_, dbRemoteDoc, control) => {
const potentialKey = DocumentKey.fromSegments([
...dbRemoteDoc.prefixPath,
dbRemoteDoc.collectionGroup,
dbRemoteDoc.documentId
]);
// Go through keys not found in cache.
while (nextKey && dbKeyComparator(nextKey, potentialKey) < 0) {
callback(nextKey, null);
nextKey = keyIter.getNext();
}
if (nextKey && nextKey.isEqual(potentialKey)) {
// Key found in cache.
callback(nextKey, dbRemoteDoc);
nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
}
// Skip to the next key (if there is one).
if (nextKey) {
control.skip(dbKey(nextKey));
}
else {
control.done();
}
})
.next(() => {
// The rest of the keys are not in the cache. One case where `iterate`
// above won't go through them is when the cache is empty.
while (nextKey) {
callback(nextKey, null);
nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
}
});
}
getDocumentsMatchingQuery(transaction, query, offset, mutatedDocs, context) {
const collection = query.path;
const startKey = [
collection.popLast().toArray(),
collection.lastSegment(),
toDbTimestampKey(offset.readTime),
offset.documentKey.path.isEmpty()
? ''
: offset.documentKey.path.lastSegment()
];
const endKey = [
collection.popLast().toArray(),
collection.lastSegment(),
[Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER],
''
];
return remoteDocumentsStore(transaction)
.loadAll(IDBKeyRange.bound(startKey, endKey, true))
.next(dbRemoteDocs => {
context === null || context === void 0 ? void 0 : context.incrementDocumentReadCount(dbRemoteDocs.length);
let results = mutableDocumentMap();
for (const dbRemoteDoc of dbRemoteDocs) {
const document = this.maybeDecodeDocument(DocumentKey.fromSegments(dbRemoteDoc.prefixPath.concat(dbRemoteDoc.collectionGroup, dbRemoteDoc.documentId)), dbRemoteDoc);
if (document.isFoundDocument() &&
(queryMatches(query, document) || mutatedDocs.has(document.key))) {
// Either the document matches the given query, or it is mutated.
results = results.insert(document.key, document);
}
}
return results;
});
}
getAllFromCollectionGroup(transaction, collectionGroup, offset, limit) {
let results = mutableDocumentMap();
const startKey = dbCollectionGroupKey(collectionGroup, offset);
const endKey = dbCollectionGroupKey(collectionGroup, IndexOffset.max());
return remoteDocumentsStore(transaction)
.iterate({
index: DbRemoteDocumentCollectionGroupIndex,
range: IDBKeyRange.bound(startKey, endKey, true)
}, (_, dbRemoteDoc, control) => {
const document = this.maybeDecodeDocument(DocumentKey.fromSegments(dbRemoteDoc.prefixPath.concat(dbRemoteDoc.collectionGroup, dbRemoteDoc.documentId)), dbRemoteDoc);
results = results.insert(document.key, document);
if (results.size === limit) {
control.done();
}
})
.next(() => results);
}
newChangeBuffer(options) {
return new IndexedDbRemoteDocumentChangeBuffer(this, !!options && options.trackRemovals);
}
getSize(txn) {
return this.getMetadata(txn).next(metadata => metadata.byteSize);
}
getMetadata(txn) {
return documentGlobalStore(txn)
.get(DbRemoteDocumentGlobalKey)
.next(metadata => {
hardAssert(!!metadata);
return metadata;
});
}
setMetadata(txn, metadata) {
return documentGlobalStore(txn).put(DbRemoteDocumentGlobalKey, metadata);
}
/**
* Decodes `dbRemoteDoc` and returns the document (or an invalid document if
* the document corresponds to the format used for sentinel deletes).
*/
maybeDecodeDocument(documentKey, dbRemoteDoc) {
if (dbRemoteDoc) {
const doc = fromDbRemoteDocument(this.serializer, dbRemoteDoc);
// Whether the document is a sentinel removal and should only be used in the
// `getNewDocumentChanges()`
const isSentinelRemoval = doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min());
if (!isSentinelRemoval) {
return doc;
}
}
return MutableDocument.newInvalidDocument(documentKey);
}
}
/** Creates a new IndexedDbRemoteDocumentCache. */
function newIndexedDbRemoteDocumentCache(serializer) {
return new IndexedDbRemoteDocumentCacheImpl(serializer);
}
/**
* Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
*
* Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
* delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
* when we apply the changes.
*/
class IndexedDbRemoteDocumentChangeBuffer extends RemoteDocumentChangeBuffer {
/**
* @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.
* @param trackRemovals - Whether to create sentinel deletes that can be tracked by
* `getNewDocumentChanges()`.
*/
constructor(documentCache, trackRemovals) {
super();
this.documentCache = documentCache;
this.trackRemovals = trackRemovals;
// A map of document sizes and read times prior to applying the changes in
// this buffer.
this.documentStates = new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
}
applyChanges(transaction) {
const promises = [];
let sizeDelta = 0;
let collectionParents = new SortedSet((l, r) => primitiveComparator(l.canonicalString(), r.canonicalString()));
this.changes.forEach((key, documentChange) => {
const previousDoc = this.documentStates.get(key);
promises.push(this.documentCache.removeEntry(transaction, key, previousDoc.readTime));
if (documentChange.isValidDocument()) {
const doc = toDbRemoteDocument(this.documentCache.serializer, documentChange);
collectionParents = collectionParents.add(key.path.popLast());
const size = dbDocumentSize(doc);
sizeDelta += size - previousDoc.size;
promises.push(this.documentCache.addEntry(transaction, key, doc));
}
else {
sizeDelta -= previousDoc.size;
if (this.trackRemovals) {
// In order to track removals, we store a "sentinel delete" in the
// RemoteDocumentCache. This entry is represented by a NoDocument
// with a version of 0 and ignored by `maybeDecodeDocument()` but
// preserved in `getNewDocumentChanges()`.
const deletedDoc = toDbRemoteDocument(this.documentCache.serializer, documentChange.convertToNoDocument(SnapshotVersion.min()));
promises.push(this.documentCache.addEntry(transaction, key, deletedDoc));
}
}
});
collectionParents.forEach(parent => {
promises.push(this.documentCache.indexManager.addToCollectionParentIndex(transaction, parent));
});
promises.push(this.documentCache.updateMetadata(transaction, sizeDelta));
return PersistencePromise.waitFor(promises);
}
getFromCache(transaction, documentKey) {
// Record the size of everything we load from the cache so we can compute a delta later.
return this.documentCache
.getSizedEntry(transaction, documentKey)
.next(getResult => {
this.documentStates.set(documentKey, {
size: getResult.size,
readTime: getResult.document.readTime
});
return getResult.document;
});
}
getAllFromCache(transaction, documentKeys) {
// Record the size of everything we load from the cache so we can compute
// a delta later.
return this.documentCache
.getSizedEntries(transaction, documentKeys)
.next(({ documents, sizeMap }) => {
// Note: `getAllFromCache` returns two maps instead of a single map from
// keys to `DocumentSizeEntry`s. This is to allow returning the
// `MutableDocumentMap` directly, without a conversion.
sizeMap.forEach((documentKey, size) => {
this.documentStates.set(documentKey, {
size,
readTime: documents.get(documentKey).readTime
});
});
return documents;
});
}
}
function documentGlobalStore(txn) {
return getStore(txn, DbRemoteDocumentGlobalStore);
}
/**
* Helper to get a typed SimpleDbStore for the remoteDocuments object store.
*/
function remoteDocumentsStore(txn) {
return getStore(txn, DbRemoteDocumentStore);
}
/**
* Returns a key that can be used for document lookups on the
* `DbRemoteDocumentDocumentKeyIndex` index.
*/
function dbKey(documentKey) {
const path = documentKey.path.toArray();
return [
/* prefix path */ path.slice(0, path.length - 2),
/* collection id */ path[path.length - 2],
/* document id */ path[path.length - 1]
];
}
/**
* Returns a key that can be used for document lookups via the primary key of
* the DbRemoteDocument object store.
*/
function dbReadTimeKey(documentKey, readTime) {
const path = documentKey.path.toArray();
return [
/* prefix path */ path.slice(0, path.length - 2),
/* collection id */ path[path.length - 2],
toDbTimestampKey(readTime),
/* document id */ path[path.length - 1]
];
}
/**
* Returns a key that can be used for document lookups on the
* `DbRemoteDocumentDocumentCollectionGroupIndex` index.
*/
function dbCollectionGroupKey(collectionGroup, offset) {
const path = offset.documentKey.path.toArray();
return [
/* collection id */ collectionGroup,
toDbTimestampKey(offset.readTime),
/* prefix path */ path.slice(0, path.length - 2),
/* document id */ path.length > 0 ? path[path.length - 1] : ''
];
}
/**
* Comparator that compares document keys according to the primary key sorting
* used by the `DbRemoteDocumentDocument` store (by prefix path, collection id
* and then document ID).
*
* Visible for testing.
*/
function dbKeyComparator(l, r) {
const left = l.path.toArray();
const right = r.path.toArray();
// The ordering is based on https://chromium.googlesource.com/chromium/blink/+/fe5c21fef94dae71c1c3344775b8d8a7f7e6d9ec/Source/modules/indexeddb/IDBKey.cpp#74
let cmp = 0;
for (let i = 0; i < left.length - 2 && i < right.length - 2; ++i) {
cmp = primitiveComparator(left[i], right[i]);
if (cmp) {
return cmp;
}
}
cmp = primitiveComparator(left.length, right.length);
if (cmp) {
return cmp;
}
cmp = primitiveComparator(left[left.length - 2], right[right.length - 2]);
if (cmp) {
return cmp;
}
return primitiveComparator(left[left.length - 1], right[right.length - 1]);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Schema Version for the Web client:
* 1. Initial version including Mutation Queue, Query Cache, and Remote
* Document Cache
* 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
* longer required because migration 3 unconditionally clears it.
* 3. Dropped and re-created Query Cache to deal with cache corruption related
* to limbo resolution. Addresses
* https://github.com/firebase/firebase-ios-sdk/issues/1548
* 4. Multi-Tab Support.
* 5. Removal of held write acks.
* 6. Create document global for tracking document cache size.
* 7. Ensure every cached document has a sentinel row with a sequence number.
* 8. Add collection-parent index for Collection Group queries.
* 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
* an auto-incrementing ID. This is required for Index-Free queries.
* 10. Rewrite the canonical IDs to the explicit Protobuf-based format.
* 11. Add bundles and named_queries for bundle support.
* 12. Add document overlays.
* 13. Rewrite the keys of the remote document cache to allow for efficient
* document lookup via `getAll()`.
* 14. Add overlays.
* 15. Add indexing support.
*/
const SCHEMA_VERSION = 15;
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a local view (overlay) of a document, and the fields that are
* locally mutated.
*/
class OverlayedDocument {
constructor(overlayedDocument,
/**
* The fields that are locally mutated by patch mutations.
*
* If the overlayed document is from set or delete mutations, this is `null`.
* If there is no overlay (mutation) for the document, this is an empty `FieldMask`.
*/
mutatedFields) {
this.overlayedDocument = overlayedDocument;
this.mutatedFields = mutatedFields;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A readonly view of the local state of all documents we're tracking (i.e. we
* have a cached version in remoteDocumentCache or local mutations for the
* document). The view is computed by applying the mutations in the
* MutationQueue to the RemoteDocumentCache.
*/
class LocalDocumentsView {
constructor(remoteDocumentCache, mutationQueue, documentOverlayCache, indexManager) {
this.remoteDocumentCache = remoteDocumentCache;
this.mutationQueue = mutationQueue;
this.documentOverlayCache = documentOverlayCache;
this.indexManager = indexManager;
}
/**
* Get the local view of the document identified by `key`.
*
* @returns Local view of the document or null if we don't have any cached
* state for it.
*/
getDocument(transaction, key) {
let overlay = null;
return this.documentOverlayCache
.getOverlay(transaction, key)
.next(value => {
overlay = value;
return this.remoteDocumentCache.getEntry(transaction, key);
})
.next(document => {
if (overlay !== null) {
mutationApplyToLocalView(overlay.mutation, document, FieldMask.empty(), Timestamp.now());
}
return document;
});
}
/**
* Gets the local view of the documents identified by `keys`.
*
* If we don't have cached state for a document in `keys`, a NoDocument will
* be stored for that key in the resulting set.
*/
getDocuments(transaction, keys) {
return this.remoteDocumentCache
.getEntries(transaction, keys)
.next(docs => this.getLocalViewOfDocuments(transaction, docs, documentKeySet()).next(() => docs));
}
/**
* Similar to `getDocuments`, but creates the local view from the given
* `baseDocs` without retrieving documents from the local store.
*
* @param transaction - The transaction this operation is scoped to.
* @param docs - The documents to apply local mutations to get the local views.
* @param existenceStateChanged - The set of document keys whose existence state
* is changed. This is useful to determine if some documents overlay needs
* to be recalculated.
*/
getLocalViewOfDocuments(transaction, docs, existenceStateChanged = documentKeySet()) {
const overlays = newOverlayMap();
return this.populateOverlays(transaction, overlays, docs).next(() => {
return this.computeViews(transaction, docs, overlays, existenceStateChanged).next(computeViewsResult => {
let result = documentMap();
computeViewsResult.forEach((documentKey, overlayedDocument) => {
result = result.insert(documentKey, overlayedDocument.overlayedDocument);
});
return result;
});
});
}
/**
* Gets the overlayed documents for the given document map, which will include
* the local view of those documents and a `FieldMask` indicating which fields
* are mutated locally, `null` if overlay is a Set or Delete mutation.
*/
getOverlayedDocuments(transaction, docs) {
const overlays = newOverlayMap();
return this.populateOverlays(transaction, overlays, docs).next(() => this.computeViews(transaction, docs, overlays, documentKeySet()));
}
/**
* Fetches the overlays for {@code docs} and adds them to provided overlay map
* if the map does not already contain an entry for the given document key.
*/
populateOverlays(transaction, overlays, docs) {
const missingOverlays = [];
docs.forEach(key => {
if (!overlays.has(key)) {
missingOverlays.push(key);
}
});
return this.documentOverlayCache
.getOverlays(transaction, missingOverlays)
.next(result => {
result.forEach((key, val) => {
overlays.set(key, val);
});
});
}
/**
* Computes the local view for the given documents.
*
* @param docs - The documents to compute views for. It also has the base
* version of the documents.
* @param overlays - The overlays that need to be applied to the given base
* version of the documents.
* @param existenceStateChanged - A set of documents whose existence states
* might have changed. This is used to determine if we need to re-calculate
* overlays from mutation queues.
* @return A map represents the local documents view.
*/
computeViews(transaction, docs, overlays, existenceStateChanged) {
let recalculateDocuments = mutableDocumentMap();
const mutatedFields = newDocumentKeyMap();
const results = newOverlayedDocumentMap();
docs.forEach((_, doc) => {
const overlay = overlays.get(doc.key);
// Recalculate an overlay if the document's existence state changed due to
// a remote event *and* the overlay is a PatchMutation. This is because
// document existence state can change if some patch mutation's
// preconditions are met.
// NOTE: we recalculate when `overlay` is undefined as well, because there
// might be a patch mutation whose precondition does not match before the
// change (hence overlay is undefined), but would now match.
if (existenceStateChanged.has(doc.key) &&
(overlay === undefined || overlay.mutation instanceof PatchMutation)) {
recalculateDocuments = recalculateDocuments.insert(doc.key, doc);
}
else if (overlay !== undefined) {
mutatedFields.set(doc.key, overlay.mutation.getFieldMask());
mutationApplyToLocalView(overlay.mutation, doc, overlay.mutation.getFieldMask(), Timestamp.now());
}
else {
// no overlay exists
// Using EMPTY to indicate there is no overlay for the document.
mutatedFields.set(doc.key, FieldMask.empty());
}
});
return this.recalculateAndSaveOverlays(transaction, recalculateDocuments).next(recalculatedFields => {
recalculatedFields.forEach((documentKey, mask) => mutatedFields.set(documentKey, mask));
docs.forEach((documentKey, document) => {
var _a;
return results.set(documentKey, new OverlayedDocument(document, (_a = mutatedFields.get(documentKey)) !== null && _a !== void 0 ? _a : null));
});
return results;
});
}
recalculateAndSaveOverlays(transaction, docs) {
const masks = newDocumentKeyMap();
// A reverse lookup map from batch id to the documents within that batch.
let documentsByBatchId = new SortedMap((key1, key2) => key1 - key2);
let processed = documentKeySet();
return this.mutationQueue
.getAllMutationBatchesAffectingDocumentKeys(transaction, docs)
.next(batches => {
for (const batch of batches) {
batch.keys().forEach(key => {
const baseDoc = docs.get(key);
if (baseDoc === null) {
return;
}
let mask = masks.get(key) || FieldMask.empty();
mask = batch.applyToLocalView(baseDoc, mask);
masks.set(key, mask);
const newSet = (documentsByBatchId.get(batch.batchId) || documentKeySet()).add(key);
documentsByBatchId = documentsByBatchId.insert(batch.batchId, newSet);
});
}
})
.next(() => {
const promises = [];
// Iterate in descending order of batch IDs, and skip documents that are
// already saved.
const iter = documentsByBatchId.getReverseIterator();
while (iter.hasNext()) {
const entry = iter.getNext();
const batchId = entry.key;
const keys = entry.value;
const overlays = newMutationMap();
keys.forEach(key => {
if (!processed.has(key)) {
const overlayMutation = calculateOverlayMutation(docs.get(key), masks.get(key));
if (overlayMutation !== null) {
overlays.set(key, overlayMutation);
}
processed = processed.add(key);
}
});
promises.push(this.documentOverlayCache.saveOverlays(transaction, batchId, overlays));
}
return PersistencePromise.waitFor(promises);
})
.next(() => masks);
}
/**
* Recalculates overlays by reading the documents from remote document cache
* first, and saves them after they are calculated.
*/
recalculateAndSaveOverlaysForDocumentKeys(transaction, documentKeys) {
return this.remoteDocumentCache
.getEntries(transaction, documentKeys)
.next(docs => this.recalculateAndSaveOverlays(transaction, docs));
}
/**
* Performs a query against the local view of all documents.
*
* @param transaction - The persistence transaction.
* @param query - The query to match documents against.
* @param offset - Read time and key to start scanning by (exclusive).
* @param context - A optional tracker to keep a record of important details
* during database local query execution.
*/
getDocumentsMatchingQuery(transaction, query, offset, context) {
if (isDocumentQuery$1(query)) {
return this.getDocumentsMatchingDocumentQuery(transaction, query.path);
}
else if (isCollectionGroupQuery(query)) {
return this.getDocumentsMatchingCollectionGroupQuery(transaction, query, offset, context);
}
else {
return this.getDocumentsMatchingCollectionQuery(transaction, query, offset, context);
}
}
/**
* Given a collection group, returns the next documents that follow the provided offset, along
* with an updated batch ID.
*
* <p>The documents returned by this method are ordered by remote version from the provided
* offset. If there are no more remote documents after the provided offset, documents with
* mutations in order of batch id from the offset are returned. Since all documents in a batch are
* returned together, the total number of documents returned can exceed {@code count}.
*
* @param transaction
* @param collectionGroup The collection group for the documents.
* @param offset The offset to index into.
* @param count The number of documents to return
* @return A LocalWriteResult with the documents that follow the provided offset and the last processed batch id.
*/
getNextDocuments(transaction, collectionGroup, offset, count) {
return this.remoteDocumentCache
.getAllFromCollectionGroup(transaction, collectionGroup, offset, count)
.next((originalDocs) => {
const overlaysPromise = count - originalDocs.size > 0
? this.documentOverlayCache.getOverlaysForCollectionGroup(transaction, collectionGroup, offset.largestBatchId, count - originalDocs.size)
: PersistencePromise.resolve(newOverlayMap());
// The callsite will use the largest batch ID together with the latest read time to create
// a new index offset. Since we only process batch IDs if all remote documents have been read,
// no overlay will increase the overall read time. This is why we only need to special case
// the batch id.
let largestBatchId = INITIAL_LARGEST_BATCH_ID;
let modifiedDocs = originalDocs;
return overlaysPromise.next(overlays => {
return PersistencePromise.forEach(overlays, (key, overlay) => {
if (largestBatchId < overlay.largestBatchId) {
largestBatchId = overlay.largestBatchId;
}
if (originalDocs.get(key)) {
return PersistencePromise.resolve();
}
return this.remoteDocumentCache
.getEntry(transaction, key)
.next(doc => {
modifiedDocs = modifiedDocs.insert(key, doc);
});
})
.next(() => this.populateOverlays(transaction, overlays, originalDocs))
.next(() => this.computeViews(transaction, modifiedDocs, overlays, documentKeySet()))
.next(localDocs => ({
batchId: largestBatchId,
changes: convertOverlayedDocumentMapToDocumentMap(localDocs)
}));
});
});
}
getDocumentsMatchingDocumentQuery(transaction, docPath) {
// Just do a simple document lookup.
return this.getDocument(transaction, new DocumentKey(docPath)).next(document => {
let result = documentMap();
if (document.isFoundDocument()) {
result = result.insert(document.key, document);
}
return result;
});
}
getDocumentsMatchingCollectionGroupQuery(transaction, query, offset, context) {
const collectionId = query.collectionGroup;
let results = documentMap();
return this.indexManager
.getCollectionParents(transaction, collectionId)
.next(parents => {
// Perform a collection query against each parent that contains the
// collectionId and aggregate the results.
return PersistencePromise.forEach(parents, (parent) => {
const collectionQuery = asCollectionQueryAtPath(query, parent.child(collectionId));
return this.getDocumentsMatchingCollectionQuery(transaction, collectionQuery, offset, context).next(r => {
r.forEach((key, doc) => {
results = results.insert(key, doc);
});
});
}).next(() => results);
});
}
getDocumentsMatchingCollectionQuery(transaction, query, offset, context) {
// Query the remote documents and overlay mutations.
let overlays;
return this.documentOverlayCache
.getOverlaysForCollection(transaction, query.path, offset.largestBatchId)
.next(result => {
overlays = result;
return this.remoteDocumentCache.getDocumentsMatchingQuery(transaction, query, offset, overlays, context);
})
.next(remoteDocuments => {
// As documents might match the query because of their overlay we need to
// include documents for all overlays in the initial document set.
overlays.forEach((_, overlay) => {
const key = overlay.getKey();
if (remoteDocuments.get(key) === null) {
remoteDocuments = remoteDocuments.insert(key, MutableDocument.newInvalidDocument(key));
}
});
// Apply the overlays and match against the query.
let results = documentMap();
remoteDocuments.forEach((key, document) => {
const overlay = overlays.get(key);
if (overlay !== undefined) {
mutationApplyToLocalView(overlay.mutation, document, FieldMask.empty(), Timestamp.now());
}
// Finally, insert the documents that still match the query
if (queryMatches(query, document)) {
results = results.insert(key, document);
}
});
return results;
});
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryBundleCache {
constructor(serializer) {
this.serializer = serializer;
this.bundles = new Map();
this.namedQueries = new Map();
}
getBundleMetadata(transaction, bundleId) {
return PersistencePromise.resolve(this.bundles.get(bundleId));
}
saveBundleMetadata(transaction, bundleMetadata) {
this.bundles.set(bundleMetadata.id, fromBundleMetadata(bundleMetadata));
return PersistencePromise.resolve();
}
getNamedQuery(transaction, queryName) {
return PersistencePromise.resolve(this.namedQueries.get(queryName));
}
saveNamedQuery(transaction, query) {
this.namedQueries.set(query.name, fromProtoNamedQuery(query));
return PersistencePromise.resolve();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of DocumentOverlayCache.
*/
class MemoryDocumentOverlayCache {
constructor() {
// A map sorted by DocumentKey, whose value is a pair of the largest batch id
// for the overlay and the overlay itself.
this.overlays = new SortedMap(DocumentKey.comparator);
this.overlayByBatchId = new Map();
}
getOverlay(transaction, key) {
return PersistencePromise.resolve(this.overlays.get(key));
}
getOverlays(transaction, keys) {
const result = newOverlayMap();
return PersistencePromise.forEach(keys, (key) => {
return this.getOverlay(transaction, key).next(overlay => {
if (overlay !== null) {
result.set(key, overlay);
}
});
}).next(() => result);
}
saveOverlays(transaction, largestBatchId, overlays) {
overlays.forEach((_, mutation) => {
this.saveOverlay(transaction, largestBatchId, mutation);
});
return PersistencePromise.resolve();
}
removeOverlaysForBatchId(transaction, documentKeys, batchId) {
const keys = this.overlayByBatchId.get(batchId);
if (keys !== undefined) {
keys.forEach(key => (this.overlays = this.overlays.remove(key)));
this.overlayByBatchId.delete(batchId);
}
return PersistencePromise.resolve();
}
getOverlaysForCollection(transaction, collection, sinceBatchId) {
const result = newOverlayMap();
const immediateChildrenPathLength = collection.length + 1;
const prefix = new DocumentKey(collection.child(''));
const iter = this.overlays.getIteratorFrom(prefix);
while (iter.hasNext()) {
const entry = iter.getNext();
const overlay = entry.value;
const key = overlay.getKey();
if (!collection.isPrefixOf(key.path)) {
break;
}
// Documents from sub-collections
if (key.path.length !== immediateChildrenPathLength) {
continue;
}
if (overlay.largestBatchId > sinceBatchId) {
result.set(overlay.getKey(), overlay);
}
}
return PersistencePromise.resolve(result);
}
getOverlaysForCollectionGroup(transaction, collectionGroup, sinceBatchId, count) {
let batchIdToOverlays = new SortedMap((key1, key2) => key1 - key2);
const iter = this.overlays.getIterator();
while (iter.hasNext()) {
const entry = iter.getNext();
const overlay = entry.value;
const key = overlay.getKey();
if (key.getCollectionGroup() !== collectionGroup) {
continue;
}
if (overlay.largestBatchId > sinceBatchId) {
let overlaysForBatchId = batchIdToOverlays.get(overlay.largestBatchId);
if (overlaysForBatchId === null) {
overlaysForBatchId = newOverlayMap();
batchIdToOverlays = batchIdToOverlays.insert(overlay.largestBatchId, overlaysForBatchId);
}
overlaysForBatchId.set(overlay.getKey(), overlay);
}
}
const result = newOverlayMap();
const batchIter = batchIdToOverlays.getIterator();
while (batchIter.hasNext()) {
const entry = batchIter.getNext();
const overlays = entry.value;
overlays.forEach((key, overlay) => result.set(key, overlay));
if (result.size() >= count) {
break;
}
}
return PersistencePromise.resolve(result);
}
saveOverlay(transaction, largestBatchId, mutation) {
// Remove the association of the overlay to its batch id.
const existing = this.overlays.get(mutation.key);
if (existing !== null) {
const newSet = this.overlayByBatchId
.get(existing.largestBatchId)
.delete(mutation.key);
this.overlayByBatchId.set(existing.largestBatchId, newSet);
}
this.overlays = this.overlays.insert(mutation.key, new Overlay(largestBatchId, mutation));
// Create the association of this overlay to the given largestBatchId.
let batch = this.overlayByBatchId.get(largestBatchId);
if (batch === undefined) {
batch = documentKeySet();
this.overlayByBatchId.set(largestBatchId, batch);
}
this.overlayByBatchId.set(largestBatchId, batch.add(mutation.key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A collection of references to a document from some kind of numbered entity
* (either a target ID or batch ID). As references are added to or removed from
* the set corresponding events are emitted to a registered garbage collector.
*
* Each reference is represented by a DocumentReference object. Each of them
* contains enough information to uniquely identify the reference. They are all
* stored primarily in a set sorted by key. A document is considered garbage if
* there's no references in that set (this can be efficiently checked thanks to
* sorting by key).
*
* ReferenceSet also keeps a secondary set that contains references sorted by
* IDs. This one is used to efficiently implement removal of all references by
* some target ID.
*/
class ReferenceSet {
constructor() {
// A set of outstanding references to a document sorted by key.
this.refsByKey = new SortedSet(DocReference.compareByKey);
// A set of outstanding references to a document sorted by target id.
this.refsByTarget = new SortedSet(DocReference.compareByTargetId);
}
/** Returns true if the reference set contains no references. */
isEmpty() {
return this.refsByKey.isEmpty();
}
/** Adds a reference to the given document key for the given ID. */
addReference(key, id) {
const ref = new DocReference(key, id);
this.refsByKey = this.refsByKey.add(ref);
this.refsByTarget = this.refsByTarget.add(ref);
}
/** Add references to the given document keys for the given ID. */
addReferences(keys, id) {
keys.forEach(key => this.addReference(key, id));
}
/**
* Removes a reference to the given document key for the given
* ID.
*/
removeReference(key, id) {
this.removeRef(new DocReference(key, id));
}
removeReferences(keys, id) {
keys.forEach(key => this.removeReference(key, id));
}
/**
* Clears all references with a given ID. Calls removeRef() for each key
* removed.
*/
removeReferencesForId(id) {
const emptyKey = new DocumentKey(new ResourcePath([]));
const startRef = new DocReference(emptyKey, id);
const endRef = new DocReference(emptyKey, id + 1);
const keys = [];
this.refsByTarget.forEachInRange([startRef, endRef], ref => {
this.removeRef(ref);
keys.push(ref.key);
});
return keys;
}
removeAllReferences() {
this.refsByKey.forEach(ref => this.removeRef(ref));
}
removeRef(ref) {
this.refsByKey = this.refsByKey.delete(ref);
this.refsByTarget = this.refsByTarget.delete(ref);
}
referencesForId(id) {
const emptyKey = new DocumentKey(new ResourcePath([]));
const startRef = new DocReference(emptyKey, id);
const endRef = new DocReference(emptyKey, id + 1);
let keys = documentKeySet();
this.refsByTarget.forEachInRange([startRef, endRef], ref => {
keys = keys.add(ref.key);
});
return keys;
}
containsKey(key) {
const ref = new DocReference(key, 0);
const firstRef = this.refsByKey.firstAfterOrEqual(ref);
return firstRef !== null && key.isEqual(firstRef.key);
}
}
class DocReference {
constructor(key, targetOrBatchId) {
this.key = key;
this.targetOrBatchId = targetOrBatchId;
}
/** Compare by key then by ID */
static compareByKey(left, right) {
return (DocumentKey.comparator(left.key, right.key) ||
primitiveComparator(left.targetOrBatchId, right.targetOrBatchId));
}
/** Compare by ID then by key */
static compareByTargetId(left, right) {
return (primitiveComparator(left.targetOrBatchId, right.targetOrBatchId) ||
DocumentKey.comparator(left.key, right.key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryMutationQueue {
constructor(indexManager, referenceDelegate) {
this.indexManager = indexManager;
this.referenceDelegate = referenceDelegate;
/**
* The set of all mutations that have been sent but not yet been applied to
* the backend.
*/
this.mutationQueue = [];
/** Next value to use when assigning sequential IDs to each mutation batch. */
this.nextBatchId = 1;
/** An ordered mapping between documents and the mutations batch IDs. */
this.batchesByDocumentKey = new SortedSet(DocReference.compareByKey);
}
checkEmpty(transaction) {
return PersistencePromise.resolve(this.mutationQueue.length === 0);
}
addMutationBatch(transaction, localWriteTime, baseMutations, mutations) {
const batchId = this.nextBatchId;
this.nextBatchId++;
if (this.mutationQueue.length > 0) {
this.mutationQueue[this.mutationQueue.length - 1];
}
const batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
this.mutationQueue.push(batch);
// Track references by document key and index collection parents.
for (const mutation of mutations) {
this.batchesByDocumentKey = this.batchesByDocumentKey.add(new DocReference(mutation.key, batchId));
this.indexManager.addToCollectionParentIndex(transaction, mutation.key.path.popLast());
}
return PersistencePromise.resolve(batch);
}
lookupMutationBatch(transaction, batchId) {
return PersistencePromise.resolve(this.findMutationBatch(batchId));
}
getNextMutationBatchAfterBatchId(transaction, batchId) {
const nextBatchId = batchId + 1;
// The requested batchId may still be out of range so normalize it to the
// start of the queue.
const rawIndex = this.indexOfBatchId(nextBatchId);
const index = rawIndex < 0 ? 0 : rawIndex;
return PersistencePromise.resolve(this.mutationQueue.length > index ? this.mutationQueue[index] : null);
}
getHighestUnacknowledgedBatchId() {
return PersistencePromise.resolve(this.mutationQueue.length === 0 ? BATCHID_UNKNOWN : this.nextBatchId - 1);
}
getAllMutationBatches(transaction) {
return PersistencePromise.resolve(this.mutationQueue.slice());
}
getAllMutationBatchesAffectingDocumentKey(transaction, documentKey) {
const start = new DocReference(documentKey, 0);
const end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
const result = [];
this.batchesByDocumentKey.forEachInRange([start, end], ref => {
const batch = this.findMutationBatch(ref.targetOrBatchId);
result.push(batch);
});
return PersistencePromise.resolve(result);
}
getAllMutationBatchesAffectingDocumentKeys(transaction, documentKeys) {
let uniqueBatchIDs = new SortedSet(primitiveComparator);
documentKeys.forEach(documentKey => {
const start = new DocReference(documentKey, 0);
const end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
this.batchesByDocumentKey.forEachInRange([start, end], ref => {
uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
});
});
return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
}
getAllMutationBatchesAffectingQuery(transaction, query) {
// Use the query path as a prefix for testing if a document matches the
// query.
const prefix = query.path;
const immediateChildrenPathLength = prefix.length + 1;
// Construct a document reference for actually scanning the index. Unlike
// the prefix the document key in this reference must have an even number of
// segments. The empty segment can be used a suffix of the query path
// because it precedes all other segments in an ordered traversal.
let startPath = prefix;
if (!DocumentKey.isDocumentKey(startPath)) {
startPath = startPath.child('');
}
const start = new DocReference(new DocumentKey(startPath), 0);
// Find unique batchIDs referenced by all documents potentially matching the
// query.
let uniqueBatchIDs = new SortedSet(primitiveComparator);
this.batchesByDocumentKey.forEachWhile(ref => {
const rowKeyPath = ref.key.path;
if (!prefix.isPrefixOf(rowKeyPath)) {
return false;
}
else {
// Rows with document keys more than one segment longer than the query
// path can't be matches. For example, a query on 'rooms' can't match
// the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
if (rowKeyPath.length === immediateChildrenPathLength) {
uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
}
return true;
}
}, start);
return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
}
findMutationBatches(batchIDs) {
// Construct an array of matching batches, sorted by batchID to ensure that
// multiple mutations affecting the same document key are applied in order.
const result = [];
batchIDs.forEach(batchId => {
const batch = this.findMutationBatch(batchId);
if (batch !== null) {
result.push(batch);
}
});
return result;
}
removeMutationBatch(transaction, batch) {
// Find the position of the first batch for removal.
const batchIndex = this.indexOfExistingBatchId(batch.batchId, 'removed');
hardAssert(batchIndex === 0);
this.mutationQueue.shift();
let references = this.batchesByDocumentKey;
return PersistencePromise.forEach(batch.mutations, (mutation) => {
const ref = new DocReference(mutation.key, batch.batchId);
references = references.delete(ref);
return this.referenceDelegate.markPotentiallyOrphaned(transaction, mutation.key);
}).next(() => {
this.batchesByDocumentKey = references;
});
}
removeCachedMutationKeys(batchId) {
// No-op since the memory mutation queue does not maintain a separate cache.
}
containsKey(txn, key) {
const ref = new DocReference(key, 0);
const firstRef = this.batchesByDocumentKey.firstAfterOrEqual(ref);
return PersistencePromise.resolve(key.isEqual(firstRef && firstRef.key));
}
performConsistencyCheck(txn) {
if (this.mutationQueue.length === 0) ;
return PersistencePromise.resolve();
}
/**
* Finds the index of the given batchId in the mutation queue and asserts that
* the resulting index is within the bounds of the queue.
*
* @param batchId - The batchId to search for
* @param action - A description of what the caller is doing, phrased in passive
* form (e.g. "acknowledged" in a routine that acknowledges batches).
*/
indexOfExistingBatchId(batchId, action) {
const index = this.indexOfBatchId(batchId);
return index;
}
/**
* Finds the index of the given batchId in the mutation queue. This operation
* is O(1).
*
* @returns The computed index of the batch with the given batchId, based on
* the state of the queue. Note this index can be negative if the requested
* batchId has already been remvoed from the queue or past the end of the
* queue if the batchId is larger than the last added batch.
*/
indexOfBatchId(batchId) {
if (this.mutationQueue.length === 0) {
// As an index this is past the end of the queue
return 0;
}
// Examine the front of the queue to figure out the difference between the
// batchId and indexes in the array. Note that since the queue is ordered
// by batchId, if the first batch has a larger batchId then the requested
// batchId doesn't exist in the queue.
const firstBatchId = this.mutationQueue[0].batchId;
return batchId - firstBatchId;
}
/**
* A version of lookupMutationBatch that doesn't return a promise, this makes
* other functions that uses this code easier to read and more efficent.
*/
findMutationBatch(batchId) {
const index = this.indexOfBatchId(batchId);
if (index < 0 || index >= this.mutationQueue.length) {
return null;
}
const batch = this.mutationQueue[index];
return batch;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function documentEntryMap() {
return new SortedMap(DocumentKey.comparator);
}
/**
* The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke
* `newMemoryRemoteDocumentCache()`.
*/
class MemoryRemoteDocumentCacheImpl {
/**
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
constructor(sizer) {
this.sizer = sizer;
/** Underlying cache of documents and their read times. */
this.docs = documentEntryMap();
/** Size of all cached documents. */
this.size = 0;
}
setIndexManager(indexManager) {
this.indexManager = indexManager;
}
/**
* Adds the supplied entry to the cache and updates the cache size as appropriate.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
addEntry(transaction, doc) {
const key = doc.key;
const entry = this.docs.get(key);
const previousSize = entry ? entry.size : 0;
const currentSize = this.sizer(doc);
this.docs = this.docs.insert(key, {
document: doc.mutableCopy(),
size: currentSize
});
this.size += currentSize - previousSize;
return this.indexManager.addToCollectionParentIndex(transaction, key.path.popLast());
}
/**
* Removes the specified entry from the cache and updates the cache size as appropriate.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
removeEntry(documentKey) {
const entry = this.docs.get(documentKey);
if (entry) {
this.docs = this.docs.remove(documentKey);
this.size -= entry.size;
}
}
getEntry(transaction, documentKey) {
const entry = this.docs.get(documentKey);
return PersistencePromise.resolve(entry
? entry.document.mutableCopy()
: MutableDocument.newInvalidDocument(documentKey));
}
getEntries(transaction, documentKeys) {
let results = mutableDocumentMap();
documentKeys.forEach(documentKey => {
const entry = this.docs.get(documentKey);
results = results.insert(documentKey, entry
? entry.document.mutableCopy()
: MutableDocument.newInvalidDocument(documentKey));
});
return PersistencePromise.resolve(results);
}
getDocumentsMatchingQuery(transaction, query, offset, mutatedDocs) {
let results = mutableDocumentMap();
// Documents are ordered by key, so we can use a prefix scan to narrow down
// the documents we need to match the query against.
const collectionPath = query.path;
const prefix = new DocumentKey(collectionPath.child(''));
const iterator = this.docs.getIteratorFrom(prefix);
while (iterator.hasNext()) {
const { key, value: { document } } = iterator.getNext();
if (!collectionPath.isPrefixOf(key.path)) {
break;
}
if (key.path.length > collectionPath.length + 1) {
// Exclude entries from subcollections.
continue;
}
if (indexOffsetComparator(newIndexOffsetFromDocument(document), offset) <= 0) {
// The document sorts before the offset.
continue;
}
if (!mutatedDocs.has(document.key) && !queryMatches(query, document)) {
// The document cannot possibly match the query.
continue;
}
results = results.insert(document.key, document.mutableCopy());
}
return PersistencePromise.resolve(results);
}
getAllFromCollectionGroup(transaction, collectionGroup, offset, limti) {
// This method should only be called from the IndexBackfiller if persistence
// is enabled.
fail();
}
forEachDocumentKey(transaction, f) {
return PersistencePromise.forEach(this.docs, (key) => f(key));
}
newChangeBuffer(options) {
// `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps
// a separate changelog and does not need special handling for removals.
return new MemoryRemoteDocumentChangeBuffer(this);
}
getSize(txn) {
return PersistencePromise.resolve(this.size);
}
}
/**
* Creates a new memory-only RemoteDocumentCache.
*
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
function newMemoryRemoteDocumentCache(sizer) {
return new MemoryRemoteDocumentCacheImpl(sizer);
}
/**
* Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
*/
class MemoryRemoteDocumentChangeBuffer extends RemoteDocumentChangeBuffer {
constructor(documentCache) {
super();
this.documentCache = documentCache;
}
applyChanges(transaction) {
const promises = [];
this.changes.forEach((key, doc) => {
if (doc.isValidDocument()) {
promises.push(this.documentCache.addEntry(transaction, doc));
}
else {
this.documentCache.removeEntry(key);
}
});
return PersistencePromise.waitFor(promises);
}
getFromCache(transaction, documentKey) {
return this.documentCache.getEntry(transaction, documentKey);
}
getAllFromCache(transaction, documentKeys) {
return this.documentCache.getEntries(transaction, documentKeys);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryTargetCache {
constructor(persistence) {
this.persistence = persistence;
/**
* Maps a target to the data about that target
*/
this.targets = new ObjectMap(t => canonifyTarget(t), targetEquals);
/** The last received snapshot version. */
this.lastRemoteSnapshotVersion = SnapshotVersion.min();
/** The highest numbered target ID encountered. */
this.highestTargetId = 0;
/** The highest sequence number encountered. */
this.highestSequenceNumber = 0;
/**
* A ordered bidirectional mapping between documents and the remote target
* IDs.
*/
this.references = new ReferenceSet();
this.targetCount = 0;
this.targetIdGenerator = TargetIdGenerator.forTargetCache();
}
forEachTarget(txn, f) {
this.targets.forEach((_, targetData) => f(targetData));
return PersistencePromise.resolve();
}
getLastRemoteSnapshotVersion(transaction) {
return PersistencePromise.resolve(this.lastRemoteSnapshotVersion);
}
getHighestSequenceNumber(transaction) {
return PersistencePromise.resolve(this.highestSequenceNumber);
}
allocateTargetId(transaction) {
this.highestTargetId = this.targetIdGenerator.next();
return PersistencePromise.resolve(this.highestTargetId);
}
setTargetsMetadata(transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
if (lastRemoteSnapshotVersion) {
this.lastRemoteSnapshotVersion = lastRemoteSnapshotVersion;
}
if (highestListenSequenceNumber > this.highestSequenceNumber) {
this.highestSequenceNumber = highestListenSequenceNumber;
}
return PersistencePromise.resolve();
}
saveTargetData(targetData) {
this.targets.set(targetData.target, targetData);
const targetId = targetData.targetId;
if (targetId > this.highestTargetId) {
this.targetIdGenerator = new TargetIdGenerator(targetId);
this.highestTargetId = targetId;
}
if (targetData.sequenceNumber > this.highestSequenceNumber) {
this.highestSequenceNumber = targetData.sequenceNumber;
}
}
addTargetData(transaction, targetData) {
this.saveTargetData(targetData);
this.targetCount += 1;
return PersistencePromise.resolve();
}
updateTargetData(transaction, targetData) {
this.saveTargetData(targetData);
return PersistencePromise.resolve();
}
removeTargetData(transaction, targetData) {
this.targets.delete(targetData.target);
this.references.removeReferencesForId(targetData.targetId);
this.targetCount -= 1;
return PersistencePromise.resolve();
}
removeTargets(transaction, upperBound, activeTargetIds) {
let count = 0;
const removals = [];
this.targets.forEach((key, targetData) => {
if (targetData.sequenceNumber <= upperBound &&
activeTargetIds.get(targetData.targetId) === null) {
this.targets.delete(key);
removals.push(this.removeMatchingKeysForTargetId(transaction, targetData.targetId));
count++;
}
});
return PersistencePromise.waitFor(removals).next(() => count);
}
getTargetCount(transaction) {
return PersistencePromise.resolve(this.targetCount);
}
getTargetData(transaction, target) {
const targetData = this.targets.get(target) || null;
return PersistencePromise.resolve(targetData);
}
addMatchingKeys(txn, keys, targetId) {
this.references.addReferences(keys, targetId);
return PersistencePromise.resolve();
}
removeMatchingKeys(txn, keys, targetId) {
this.references.removeReferences(keys, targetId);
const referenceDelegate = this.persistence.referenceDelegate;
const promises = [];
if (referenceDelegate) {
keys.forEach(key => {
promises.push(referenceDelegate.markPotentiallyOrphaned(txn, key));
});
}
return PersistencePromise.waitFor(promises);
}
removeMatchingKeysForTargetId(txn, targetId) {
this.references.removeReferencesForId(targetId);
return PersistencePromise.resolve();
}
getMatchingKeysForTargetId(txn, targetId) {
const matchingKeys = this.references.referencesForId(targetId);
return PersistencePromise.resolve(matchingKeys);
}
containsKey(txn, key) {
return PersistencePromise.resolve(this.references.containsKey(key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$d = 'MemoryPersistence';
/**
* A memory-backed instance of Persistence. Data is stored only in RAM and
* not persisted across sessions.
*/
class MemoryPersistence {
/**
* The constructor accepts a factory for creating a reference delegate. This
* allows both the delegate and this instance to have strong references to
* each other without having nullable fields that would then need to be
* checked or asserted on every access.
*/
constructor(referenceDelegateFactory, serializer) {
this.mutationQueues = {};
this.overlays = {};
this.listenSequence = new ListenSequence(0);
this._started = false;
this._started = true;
this.referenceDelegate = referenceDelegateFactory(this);
this.targetCache = new MemoryTargetCache(this);
const sizer = (doc) => this.referenceDelegate.documentSize(doc);
this.indexManager = new MemoryIndexManager();
this.remoteDocumentCache = newMemoryRemoteDocumentCache(sizer);
this.serializer = new LocalSerializer(serializer);
this.bundleCache = new MemoryBundleCache(this.serializer);
}
start() {
return Promise.resolve();
}
shutdown() {
// No durable state to ensure is closed on shutdown.
this._started = false;
return Promise.resolve();
}
get started() {
return this._started;
}
setDatabaseDeletedListener() {
// No op.
}
setNetworkEnabled() {
// No op.
}
getIndexManager(user) {
// We do not currently support indices for memory persistence, so we can
// return the same shared instance of the memory index manager.
return this.indexManager;
}
getDocumentOverlayCache(user) {
let overlay = this.overlays[user.toKey()];
if (!overlay) {
overlay = new MemoryDocumentOverlayCache();
this.overlays[user.toKey()] = overlay;
}
return overlay;
}
getMutationQueue(user, indexManager) {
let queue = this.mutationQueues[user.toKey()];
if (!queue) {
queue = new MemoryMutationQueue(indexManager, this.referenceDelegate);
this.mutationQueues[user.toKey()] = queue;
}
return queue;
}
getTargetCache() {
return this.targetCache;
}
getRemoteDocumentCache() {
return this.remoteDocumentCache;
}
getBundleCache() {
return this.bundleCache;
}
runTransaction(action, mode, transactionOperation) {
logDebug(LOG_TAG$d, 'Starting transaction:', action);
const txn = new MemoryTransaction(this.listenSequence.next());
this.referenceDelegate.onTransactionStarted();
return transactionOperation(txn)
.next(result => {
return this.referenceDelegate
.onTransactionCommitted(txn)
.next(() => result);
})
.toPromise()
.then(result => {
txn.raiseOnCommittedEvent();
return result;
});
}
mutationQueuesContainKey(transaction, key) {
return PersistencePromise.or(Object.values(this.mutationQueues).map(queue => () => queue.containsKey(transaction, key)));
}
}
/**
* Memory persistence is not actually transactional, but future implementations
* may have transaction-scoped state.
*/
class MemoryTransaction extends PersistenceTransaction {
constructor(currentSequenceNumber) {
super();
this.currentSequenceNumber = currentSequenceNumber;
}
}
class MemoryEagerDelegate {
constructor(persistence) {
this.persistence = persistence;
/** Tracks all documents that are active in Query views. */
this.localViewReferences = new ReferenceSet();
/** The list of documents that are potentially GCed after each transaction. */
this._orphanedDocuments = null;
}
static factory(persistence) {
return new MemoryEagerDelegate(persistence);
}
get orphanedDocuments() {
if (!this._orphanedDocuments) {
throw fail();
}
else {
return this._orphanedDocuments;
}
}
addReference(txn, targetId, key) {
this.localViewReferences.addReference(key, targetId);
this.orphanedDocuments.delete(key.toString());
return PersistencePromise.resolve();
}
removeReference(txn, targetId, key) {
this.localViewReferences.removeReference(key, targetId);
this.orphanedDocuments.add(key.toString());
return PersistencePromise.resolve();
}
markPotentiallyOrphaned(txn, key) {
this.orphanedDocuments.add(key.toString());
return PersistencePromise.resolve();
}
removeTarget(txn, targetData) {
const orphaned = this.localViewReferences.removeReferencesForId(targetData.targetId);
orphaned.forEach(key => this.orphanedDocuments.add(key.toString()));
const cache = this.persistence.getTargetCache();
return cache
.getMatchingKeysForTargetId(txn, targetData.targetId)
.next(keys => {
keys.forEach(key => this.orphanedDocuments.add(key.toString()));
})
.next(() => cache.removeTargetData(txn, targetData));
}
onTransactionStarted() {
this._orphanedDocuments = new Set();
}
onTransactionCommitted(txn) {
// Remove newly orphaned documents.
const cache = this.persistence.getRemoteDocumentCache();
const changeBuffer = cache.newChangeBuffer();
return PersistencePromise.forEach(this.orphanedDocuments, (path) => {
const key = DocumentKey.fromPath(path);
return this.isReferenced(txn, key).next(isReferenced => {
if (!isReferenced) {
changeBuffer.removeEntry(key, SnapshotVersion.min());
}
});
}).next(() => {
this._orphanedDocuments = null;
return changeBuffer.apply(txn);
});
}
updateLimboDocument(txn, key) {
return this.isReferenced(txn, key).next(isReferenced => {
if (isReferenced) {
this.orphanedDocuments.delete(key.toString());
}
else {
this.orphanedDocuments.add(key.toString());
}
});
}
documentSize(doc) {
// For eager GC, we don't care about the document size, there are no size thresholds.
return 0;
}
isReferenced(txn, key) {
return PersistencePromise.or([
() => PersistencePromise.resolve(this.localViewReferences.containsKey(key)),
() => this.persistence.getTargetCache().containsKey(txn, key),
() => this.persistence.mutationQueuesContainKey(txn, key)
]);
}
}
class MemoryLruDelegate {
constructor(persistence, lruParams) {
this.persistence = persistence;
this.orphanedSequenceNumbers = new ObjectMap(k => encodeResourcePath(k.path), (l, r) => l.isEqual(r));
this.garbageCollector = newLruGarbageCollector(this, lruParams);
}
static factory(persistence, lruParams) {
return new MemoryLruDelegate(persistence, lruParams);
}
// No-ops, present so memory persistence doesn't have to care which delegate
// it has.
onTransactionStarted() { }
onTransactionCommitted(txn) {
return PersistencePromise.resolve();
}
forEachTarget(txn, f) {
return this.persistence.getTargetCache().forEachTarget(txn, f);
}
getSequenceNumberCount(txn) {
const docCountPromise = this.orphanedDocumentCount(txn);
const targetCountPromise = this.persistence
.getTargetCache()
.getTargetCount(txn);
return targetCountPromise.next(targetCount => docCountPromise.next(docCount => targetCount + docCount));
}
orphanedDocumentCount(txn) {
let orphanedCount = 0;
return this.forEachOrphanedDocumentSequenceNumber(txn, _ => {
orphanedCount++;
}).next(() => orphanedCount);
}
forEachOrphanedDocumentSequenceNumber(txn, f) {
return PersistencePromise.forEach(this.orphanedSequenceNumbers, (key, sequenceNumber) => {
// Pass in the exact sequence number as the upper bound so we know it won't be pinned by
// being too recent.
return this.isPinned(txn, key, sequenceNumber).next(isPinned => {
if (!isPinned) {
return f(sequenceNumber);
}
else {
return PersistencePromise.resolve();
}
});
});
}
removeTargets(txn, upperBound, activeTargetIds) {
return this.persistence
.getTargetCache()
.removeTargets(txn, upperBound, activeTargetIds);
}
removeOrphanedDocuments(txn, upperBound) {
let count = 0;
const cache = this.persistence.getRemoteDocumentCache();
const changeBuffer = cache.newChangeBuffer();
const p = cache.forEachDocumentKey(txn, key => {
return this.isPinned(txn, key, upperBound).next(isPinned => {
if (!isPinned) {
count++;
changeBuffer.removeEntry(key, SnapshotVersion.min());
}
});
});
return p.next(() => changeBuffer.apply(txn)).next(() => count);
}
markPotentiallyOrphaned(txn, key) {
this.orphanedSequenceNumbers.set(key, txn.currentSequenceNumber);
return PersistencePromise.resolve();
}
removeTarget(txn, targetData) {
const updated = targetData.withSequenceNumber(txn.currentSequenceNumber);
return this.persistence.getTargetCache().updateTargetData(txn, updated);
}
addReference(txn, targetId, key) {
this.orphanedSequenceNumbers.set(key, txn.currentSequenceNumber);
return PersistencePromise.resolve();
}
removeReference(txn, targetId, key) {
this.orphanedSequenceNumbers.set(key, txn.currentSequenceNumber);
return PersistencePromise.resolve();
}
updateLimboDocument(txn, key) {
this.orphanedSequenceNumbers.set(key, txn.currentSequenceNumber);
return PersistencePromise.resolve();
}
documentSize(document) {
let documentSize = document.key.toString().length;
if (document.isFoundDocument()) {
documentSize += estimateByteSize(document.data.value);
}
return documentSize;
}
isPinned(txn, key, upperBound) {
return PersistencePromise.or([
() => this.persistence.mutationQueuesContainKey(txn, key),
() => this.persistence.getTargetCache().containsKey(txn, key),
() => {
const orphanedAt = this.orphanedSequenceNumbers.get(key);
return PersistencePromise.resolve(orphanedAt !== undefined && orphanedAt > upperBound);
}
]);
}
getCacheSize(txn) {
return this.persistence.getRemoteDocumentCache().getSize(txn);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Performs database creation and schema upgrades. */
class SchemaConverter {
constructor(serializer) {
this.serializer = serializer;
}
/**
* Performs database creation and schema upgrades.
*
* Note that in production, this method is only ever used to upgrade the schema
* to SCHEMA_VERSION. Different values of toVersion are only used for testing
* and local feature development.
*/
createOrUpgrade(db, txn, fromVersion, toVersion) {
const simpleDbTransaction = new SimpleDbTransaction('createOrUpgrade', txn);
if (fromVersion < 1 && toVersion >= 1) {
createPrimaryClientStore(db);
createMutationQueue(db);
createQueryCache(db);
createLegacyRemoteDocumentCache(db);
}
// Migration 2 to populate the targetGlobal object no longer needed since
// migration 3 unconditionally clears it.
let p = PersistencePromise.resolve();
if (fromVersion < 3 && toVersion >= 3) {
// Brand new clients don't need to drop and recreate--only clients that
// potentially have corrupt data.
if (fromVersion !== 0) {
dropQueryCache(db);
createQueryCache(db);
}
p = p.next(() => writeEmptyTargetGlobalEntry(simpleDbTransaction));
}
if (fromVersion < 4 && toVersion >= 4) {
if (fromVersion !== 0) {
// Schema version 3 uses auto-generated keys to generate globally unique
// mutation batch IDs (this was previously ensured internally by the
// client). To migrate to the new schema, we have to read all mutations
// and write them back out. We preserve the existing batch IDs to guarantee
// consistency with other object stores. Any further mutation batch IDs will
// be auto-generated.
p = p.next(() => upgradeMutationBatchSchemaAndMigrateData(db, simpleDbTransaction));
}
p = p.next(() => {
createClientMetadataStore(db);
});
}
if (fromVersion < 5 && toVersion >= 5) {
p = p.next(() => this.removeAcknowledgedMutations(simpleDbTransaction));
}
if (fromVersion < 6 && toVersion >= 6) {
p = p.next(() => {
createDocumentGlobalStore(db);
return this.addDocumentGlobal(simpleDbTransaction);
});
}
if (fromVersion < 7 && toVersion >= 7) {
p = p.next(() => this.ensureSequenceNumbers(simpleDbTransaction));
}
if (fromVersion < 8 && toVersion >= 8) {
p = p.next(() => this.createCollectionParentIndex(db, simpleDbTransaction));
}
if (fromVersion < 9 && toVersion >= 9) {
p = p.next(() => {
// Multi-Tab used to manage its own changelog, but this has been moved
// to the DbRemoteDocument object store itself. Since the previous change
// log only contained transient data, we can drop its object store.
dropRemoteDocumentChangesStore(db);
// Note: Schema version 9 used to create a read time index for the
// RemoteDocumentCache. This is now done with schema version 13.
});
}
if (fromVersion < 10 && toVersion >= 10) {
p = p.next(() => this.rewriteCanonicalIds(simpleDbTransaction));
}
if (fromVersion < 11 && toVersion >= 11) {
p = p.next(() => {
createBundlesStore(db);
createNamedQueriesStore(db);
});
}
if (fromVersion < 12 && toVersion >= 12) {
p = p.next(() => {
createDocumentOverlayStore(db);
});
}
if (fromVersion < 13 && toVersion >= 13) {
p = p
.next(() => createRemoteDocumentCache(db))
.next(() => this.rewriteRemoteDocumentCache(db, simpleDbTransaction))
.next(() => db.deleteObjectStore(DbRemoteDocumentStore$1));
}
if (fromVersion < 14 && toVersion >= 14) {
p = p.next(() => this.runOverlayMigration(db, simpleDbTransaction));
}
if (fromVersion < 15 && toVersion >= 15) {
p = p.next(() => createFieldIndex(db));
}
return p;
}
addDocumentGlobal(txn) {
let byteSize = 0;
return txn
.store(DbRemoteDocumentStore$1)
.iterate((_, doc) => {
byteSize += dbDocumentSize(doc);
})
.next(() => {
const metadata = { byteSize };
return txn
.store(DbRemoteDocumentGlobalStore)
.put(DbRemoteDocumentGlobalKey, metadata);
});
}
removeAcknowledgedMutations(txn) {
const queuesStore = txn.store(DbMutationQueueStore);
const mutationsStore = txn.store(DbMutationBatchStore);
return queuesStore.loadAll().next(queues => {
return PersistencePromise.forEach(queues, (queue) => {
const range = IDBKeyRange.bound([queue.userId, BATCHID_UNKNOWN], [queue.userId, queue.lastAcknowledgedBatchId]);
return mutationsStore
.loadAll(DbMutationBatchUserMutationsIndex, range)
.next(dbBatches => {
return PersistencePromise.forEach(dbBatches, (dbBatch) => {
hardAssert(dbBatch.userId === queue.userId);
const batch = fromDbMutationBatch(this.serializer, dbBatch);
return removeMutationBatch(txn, queue.userId, batch).next(() => { });
});
});
});
});
}
/**
* Ensures that every document in the remote document cache has a corresponding sentinel row
* with a sequence number. Missing rows are given the most recently used sequence number.
*/
ensureSequenceNumbers(txn) {
const documentTargetStore = txn.store(DbTargetDocumentStore);
const documentsStore = txn.store(DbRemoteDocumentStore$1);
const globalTargetStore = txn.store(DbTargetGlobalStore);
return globalTargetStore.get(DbTargetGlobalKey).next(metadata => {
const writeSentinelKey = (path) => {
return documentTargetStore.put({
targetId: 0,
path: encodeResourcePath(path),
sequenceNumber: metadata.highestListenSequenceNumber
});
};
const promises = [];
return documentsStore
.iterate((key, doc) => {
const path = new ResourcePath(key);
const docSentinelKey = sentinelKey(path);
promises.push(documentTargetStore.get(docSentinelKey).next(maybeSentinel => {
if (!maybeSentinel) {
return writeSentinelKey(path);
}
else {
return PersistencePromise.resolve();
}
}));
})
.next(() => PersistencePromise.waitFor(promises));
});
}
createCollectionParentIndex(db, txn) {
// Create the index.
db.createObjectStore(DbCollectionParentStore, {
keyPath: DbCollectionParentKeyPath
});
const collectionParentsStore = txn.store(DbCollectionParentStore);
// Helper to add an index entry iff we haven't already written it.
const cache = new MemoryCollectionParentIndex();
const addEntry = (collectionPath) => {
if (cache.add(collectionPath)) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
return collectionParentsStore.put({
collectionId,
parent: encodeResourcePath(parentPath)
});
}
};
// Index existing remote documents.
return txn
.store(DbRemoteDocumentStore$1)
.iterate({ keysOnly: true }, (pathSegments, _) => {
const path = new ResourcePath(pathSegments);
return addEntry(path.popLast());
})
.next(() => {
// Index existing mutations.
return txn
.store(DbDocumentMutationStore)
.iterate({ keysOnly: true }, ([userID, encodedPath, batchId], _) => {
const path = decodeResourcePath(encodedPath);
return addEntry(path.popLast());
});
});
}
rewriteCanonicalIds(txn) {
const targetStore = txn.store(DbTargetStore);
return targetStore.iterate((key, originalDbTarget) => {
const originalTargetData = fromDbTarget(originalDbTarget);
const updatedDbTarget = toDbTarget(this.serializer, originalTargetData);
return targetStore.put(updatedDbTarget);
});
}
rewriteRemoteDocumentCache(db, transaction) {
const legacyRemoteDocumentStore = transaction.store(DbRemoteDocumentStore$1);
const writes = [];
return legacyRemoteDocumentStore
.iterate((_, legacyDocument) => {
const remoteDocumentStore = transaction.store(DbRemoteDocumentStore);
const path = extractKey(legacyDocument).path.toArray();
const dbRemoteDocument = {
prefixPath: path.slice(0, path.length - 2),
collectionGroup: path[path.length - 2],
documentId: path[path.length - 1],
readTime: legacyDocument.readTime || [0, 0],
unknownDocument: legacyDocument.unknownDocument,
noDocument: legacyDocument.noDocument,
document: legacyDocument.document,
hasCommittedMutations: !!legacyDocument.hasCommittedMutations
};
writes.push(remoteDocumentStore.put(dbRemoteDocument));
})
.next(() => PersistencePromise.waitFor(writes));
}
runOverlayMigration(db, transaction) {
const mutationsStore = transaction.store(DbMutationBatchStore);
const remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer);
const memoryPersistence = new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer.remoteSerializer);
return mutationsStore.loadAll().next(dbBatches => {
const userToDocumentSet = new Map();
dbBatches.forEach(dbBatch => {
var _a;
let documentSet = (_a = userToDocumentSet.get(dbBatch.userId)) !== null && _a !== void 0 ? _a : documentKeySet();
const batch = fromDbMutationBatch(this.serializer, dbBatch);
batch.keys().forEach(key => (documentSet = documentSet.add(key)));
userToDocumentSet.set(dbBatch.userId, documentSet);
});
return PersistencePromise.forEach(userToDocumentSet, (allDocumentKeysForUser, userId) => {
const user = new User(userId);
const documentOverlayCache = IndexedDbDocumentOverlayCache.forUser(this.serializer, user);
// NOTE: The index manager and the reference delegate are
// irrelevant for the purpose of recalculating and saving
// overlays. We can therefore simply use the memory
// implementation.
const indexManager = memoryPersistence.getIndexManager(user);
const mutationQueue = IndexedDbMutationQueue.forUser(user, this.serializer, indexManager, memoryPersistence.referenceDelegate);
const localDocumentsView = new LocalDocumentsView(remoteDocumentCache, mutationQueue, documentOverlayCache, indexManager);
return localDocumentsView
.recalculateAndSaveOverlaysForDocumentKeys(new IndexedDbTransaction(transaction, ListenSequence.INVALID), allDocumentKeysForUser)
.next();
});
});
}
}
function sentinelKey(path) {
return [0, encodeResourcePath(path)];
}
function createPrimaryClientStore(db) {
db.createObjectStore(DbPrimaryClientStore);
}
function createMutationQueue(db) {
db.createObjectStore(DbMutationQueueStore, {
keyPath: DbMutationQueueKeyPath
});
const mutationBatchesStore = db.createObjectStore(DbMutationBatchStore, {
keyPath: DbMutationBatchKeyPath,
autoIncrement: true
});
mutationBatchesStore.createIndex(DbMutationBatchUserMutationsIndex, DbMutationBatchUserMutationsKeyPath, { unique: true });
db.createObjectStore(DbDocumentMutationStore);
}
/**
* Upgrade function to migrate the 'mutations' store from V1 to V3. Loads
* and rewrites all data.
*/
function upgradeMutationBatchSchemaAndMigrateData(db, txn) {
const v1MutationsStore = txn.store(DbMutationBatchStore);
return v1MutationsStore.loadAll().next(existingMutations => {
db.deleteObjectStore(DbMutationBatchStore);
const mutationsStore = db.createObjectStore(DbMutationBatchStore, {
keyPath: DbMutationBatchKeyPath,
autoIncrement: true
});
mutationsStore.createIndex(DbMutationBatchUserMutationsIndex, DbMutationBatchUserMutationsKeyPath, { unique: true });
const v3MutationsStore = txn.store(DbMutationBatchStore);
const writeAll = existingMutations.map(mutation => v3MutationsStore.put(mutation));
return PersistencePromise.waitFor(writeAll);
});
}
function createLegacyRemoteDocumentCache(db) {
db.createObjectStore(DbRemoteDocumentStore$1);
}
function createRemoteDocumentCache(db) {
const remoteDocumentStore = db.createObjectStore(DbRemoteDocumentStore, {
keyPath: DbRemoteDocumentKeyPath
});
remoteDocumentStore.createIndex(DbRemoteDocumentDocumentKeyIndex, DbRemoteDocumentDocumentKeyIndexPath);
remoteDocumentStore.createIndex(DbRemoteDocumentCollectionGroupIndex, DbRemoteDocumentCollectionGroupIndexPath);
}
function createDocumentGlobalStore(db) {
db.createObjectStore(DbRemoteDocumentGlobalStore);
}
function createQueryCache(db) {
const targetDocumentsStore = db.createObjectStore(DbTargetDocumentStore, {
keyPath: DbTargetDocumentKeyPath
});
targetDocumentsStore.createIndex(DbTargetDocumentDocumentTargetsIndex, DbTargetDocumentDocumentTargetsKeyPath, { unique: true });
const targetStore = db.createObjectStore(DbTargetStore, {
keyPath: DbTargetKeyPath
});
// NOTE: This is unique only because the TargetId is the suffix.
targetStore.createIndex(DbTargetQueryTargetsIndexName, DbTargetQueryTargetsKeyPath, { unique: true });
db.createObjectStore(DbTargetGlobalStore);
}
function dropQueryCache(db) {
db.deleteObjectStore(DbTargetDocumentStore);
db.deleteObjectStore(DbTargetStore);
db.deleteObjectStore(DbTargetGlobalStore);
}
function dropRemoteDocumentChangesStore(db) {
if (db.objectStoreNames.contains('remoteDocumentChanges')) {
db.deleteObjectStore('remoteDocumentChanges');
}
}
/**
* Creates the target global singleton row.
*
* @param txn - The version upgrade transaction for indexeddb
*/
function writeEmptyTargetGlobalEntry(txn) {
const globalStore = txn.store(DbTargetGlobalStore);
const metadata = {
highestTargetId: 0,
highestListenSequenceNumber: 0,
lastRemoteSnapshotVersion: SnapshotVersion.min().toTimestamp(),
targetCount: 0
};
return globalStore.put(DbTargetGlobalKey, metadata);
}
function createClientMetadataStore(db) {
db.createObjectStore(DbClientMetadataStore, {
keyPath: DbClientMetadataKeyPath
});
}
function createBundlesStore(db) {
db.createObjectStore(DbBundleStore, {
keyPath: DbBundleKeyPath
});
}
function createNamedQueriesStore(db) {
db.createObjectStore(DbNamedQueryStore, {
keyPath: DbNamedQueryKeyPath
});
}
function createFieldIndex(db) {
const indexConfigurationStore = db.createObjectStore(DbIndexConfigurationStore, {
keyPath: DbIndexConfigurationKeyPath,
autoIncrement: true
});
indexConfigurationStore.createIndex(DbIndexConfigurationCollectionGroupIndex, DbIndexConfigurationCollectionGroupIndexPath, { unique: false });
const indexStateStore = db.createObjectStore(DbIndexStateStore, {
keyPath: DbIndexStateKeyPath
});
indexStateStore.createIndex(DbIndexStateSequenceNumberIndex, DbIndexStateSequenceNumberIndexPath, { unique: false });
const indexEntryStore = db.createObjectStore(DbIndexEntryStore, {
keyPath: DbIndexEntryKeyPath
});
indexEntryStore.createIndex(DbIndexEntryDocumentKeyIndex, DbIndexEntryDocumentKeyIndexPath, { unique: false });
}
function createDocumentOverlayStore(db) {
const documentOverlayStore = db.createObjectStore(DbDocumentOverlayStore, {
keyPath: DbDocumentOverlayKeyPath
});
documentOverlayStore.createIndex(DbDocumentOverlayCollectionPathOverlayIndex, DbDocumentOverlayCollectionPathOverlayIndexPath, { unique: false });
documentOverlayStore.createIndex(DbDocumentOverlayCollectionGroupOverlayIndex, DbDocumentOverlayCollectionGroupOverlayIndexPath, { unique: false });
}
function extractKey(remoteDoc) {
if (remoteDoc.document) {
return new DocumentKey(ResourcePath.fromString(remoteDoc.document.name).popFirst(5));
}
else if (remoteDoc.noDocument) {
return DocumentKey.fromSegments(remoteDoc.noDocument.path);
}
else if (remoteDoc.unknownDocument) {
return DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
}
else {
return fail();
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$c = 'IndexedDbPersistence';
/**
* Oldest acceptable age in milliseconds for client metadata before the client
* is considered inactive and its associated data is garbage collected.
*/
const MAX_CLIENT_AGE_MS = 30 * 60 * 1000; // 30 minutes
/**
* Oldest acceptable metadata age for clients that may participate in the
* primary lease election. Clients that have not updated their client metadata
* within 5 seconds are not eligible to receive a primary lease.
*/
const MAX_PRIMARY_ELIGIBLE_AGE_MS = 5000;
/**
* The interval at which clients will update their metadata, including
* refreshing their primary lease if held or potentially trying to acquire it if
* not held.
*
* Primary clients may opportunistically refresh their metadata earlier
* if they're already performing an IndexedDB operation.
*/
const CLIENT_METADATA_REFRESH_INTERVAL_MS = 4000;
/** User-facing error when the primary lease is required but not available. */
const PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG = 'Failed to obtain exclusive access to the persistence layer. To allow ' +
'shared access, multi-tab synchronization has to be enabled in all tabs. ' +
'If you are using `experimentalForceOwningTab:true`, make sure that only ' +
'one tab has persistence enabled at any given time.';
const UNSUPPORTED_PLATFORM_ERROR_MSG = 'This platform is either missing IndexedDB or is known to have ' +
'an incomplete implementation. Offline persistence has been disabled.';
// The format of the LocalStorage key that stores zombied client is:
// firestore_zombie_<persistence_prefix>_<instance_key>
const ZOMBIED_CLIENTS_KEY_PREFIX = 'firestore_zombie';
/**
* The name of the main (and currently only) IndexedDB database. This name is
* appended to the prefix provided to the IndexedDbPersistence constructor.
*/
const MAIN_DATABASE = 'main';
/**
* An IndexedDB-backed instance of Persistence. Data is stored persistently
* across sessions.
*
* On Web only, the Firestore SDKs support shared access to its persistence
* layer. This allows multiple browser tabs to read and write to IndexedDb and
* to synchronize state even without network connectivity. Shared access is
* currently optional and not enabled unless all clients invoke
* `enablePersistence()` with `{synchronizeTabs:true}`.
*
* In multi-tab mode, if multiple clients are active at the same time, the SDK
* will designate one client as the primary client. An effort is made to pick
* a visible, network-connected and active client, and this client is
* responsible for letting other clients know about its presence. The primary
* client writes a unique client-generated identifier (the client ID) to
* IndexedDbs owner store every 4 seconds. If the primary client fails to
* update this entry, another client can acquire the lease and take over as
* primary.
*
* Some persistence operations in the SDK are designated as primary-client only
* operations. This includes the acknowledgment of mutations and all updates of
* remote documents. The effects of these operations are written to persistence
* and then broadcast to other tabs via LocalStorage (see
* `WebStorageSharedClientState`), which then refresh their state from
* persistence.
*
* Similarly, the primary client listens to notifications sent by secondary
* clients to discover persistence changes written by secondary clients, such as
* the addition of new mutations and query targets.
*
* If multi-tab is not enabled and another tab already obtained the primary
* lease, IndexedDbPersistence enters a failed state and all subsequent
* operations will automatically fail.
*
* Additionally, there is an optimization so that when a tab is closed, the
* primary lease is released immediately (this is especially important to make
* sure that a refreshed tab is able to immediately re-acquire the primary
* lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
* since it is an asynchronous API. So in addition to attempting to give up the
* lease, the leaseholder writes its client ID to a "zombiedClient" entry in
* LocalStorage which acts as an indicator that another tab should go ahead and
* take the primary lease immediately regardless of the current lease timestamp.
*
* TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
* longer optional.
*/
class IndexedDbPersistence {
constructor(
/**
* Whether to synchronize the in-memory state of multiple tabs and share
* access to local persistence.
*/
allowTabSynchronization, persistenceKey, clientId, lruParams, queue, window, document, serializer, sequenceNumberSyncer,
/**
* If set to true, forcefully obtains database access. Existing tabs will
* no longer be able to access IndexedDB.
*/
forceOwningTab, schemaVersion = SCHEMA_VERSION) {
this.allowTabSynchronization = allowTabSynchronization;
this.persistenceKey = persistenceKey;
this.clientId = clientId;
this.queue = queue;
this.window = window;
this.document = document;
this.sequenceNumberSyncer = sequenceNumberSyncer;
this.forceOwningTab = forceOwningTab;
this.schemaVersion = schemaVersion;
this.listenSequence = null;
this._started = false;
this.isPrimary = false;
this.networkEnabled = true;
/** Our window.unload handler, if registered. */
this.windowUnloadHandler = null;
this.inForeground = false;
/** Our 'visibilitychange' listener if registered. */
this.documentVisibilityHandler = null;
/** The client metadata refresh task. */
this.clientMetadataRefresher = null;
/** The last time we garbage collected the client metadata object store. */
this.lastGarbageCollectionTime = Number.NEGATIVE_INFINITY;
/** A listener to notify on primary state changes. */
this.primaryStateListener = _ => Promise.resolve();
if (!IndexedDbPersistence.isAvailable()) {
throw new FirestoreError(Code.UNIMPLEMENTED, UNSUPPORTED_PLATFORM_ERROR_MSG);
}
this.referenceDelegate = new IndexedDbLruDelegateImpl(this, lruParams);
this.dbName = persistenceKey + MAIN_DATABASE;
this.serializer = new LocalSerializer(serializer);
this.simpleDb = new SimpleDb(this.dbName, this.schemaVersion, new SchemaConverter(this.serializer));
this.targetCache = new IndexedDbTargetCache(this.referenceDelegate, this.serializer);
this.remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer);
this.bundleCache = new IndexedDbBundleCache();
if (this.window && this.window.localStorage) {
this.webStorage = this.window.localStorage;
}
else {
this.webStorage = null;
if (forceOwningTab === false) {
logError(LOG_TAG$c, 'LocalStorage is unavailable. As a result, persistence may not work ' +
'reliably. In particular enablePersistence() could fail immediately ' +
'after refreshing the page.');
}
}
}
/**
* Attempt to start IndexedDb persistence.
*
* @returns Whether persistence was enabled.
*/
start() {
// NOTE: This is expected to fail sometimes (in the case of another tab
// already having the persistence lock), so it's the first thing we should
// do.
return this.updateClientMetadataAndTryBecomePrimary()
.then(() => {
if (!this.isPrimary && !this.allowTabSynchronization) {
// Fail `start()` if `synchronizeTabs` is disabled and we cannot
// obtain the primary lease.
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
this.attachVisibilityHandler();
this.attachWindowUnloadHook();
this.scheduleClientMetadataAndPrimaryLeaseRefreshes();
return this.runTransaction('getHighestListenSequenceNumber', 'readonly', txn => this.targetCache.getHighestSequenceNumber(txn));
})
.then(highestListenSequenceNumber => {
this.listenSequence = new ListenSequence(highestListenSequenceNumber, this.sequenceNumberSyncer);
})
.then(() => {
this._started = true;
})
.catch(reason => {
this.simpleDb && this.simpleDb.close();
return Promise.reject(reason);
});
}
/**
* Registers a listener that gets called when the primary state of the
* instance changes. Upon registering, this listener is invoked immediately
* with the current primary state.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setPrimaryStateListener(primaryStateListener) {
this.primaryStateListener = async (primaryState) => {
if (this.started) {
return primaryStateListener(primaryState);
}
};
return primaryStateListener(this.isPrimary);
}
/**
* Registers a listener that gets called when the database receives a
* version change event indicating that it has deleted.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setDatabaseDeletedListener(databaseDeletedListener) {
this.simpleDb.setVersionChangeListener(async (event) => {
// Check if an attempt is made to delete IndexedDB.
if (event.newVersion === null) {
await databaseDeletedListener();
}
});
}
/**
* Adjusts the current network state in the client's metadata, potentially
* affecting the primary lease.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setNetworkEnabled(networkEnabled) {
if (this.networkEnabled !== networkEnabled) {
this.networkEnabled = networkEnabled;
// Schedule a primary lease refresh for immediate execution. The eventual
// lease update will be propagated via `primaryStateListener`.
this.queue.enqueueAndForget(async () => {
if (this.started) {
await this.updateClientMetadataAndTryBecomePrimary();
}
});
}
}
/**
* Updates the client metadata in IndexedDb and attempts to either obtain or
* extend the primary lease for the local client. Asynchronously notifies the
* primary state listener if the client either newly obtained or released its
* primary lease.
*/
updateClientMetadataAndTryBecomePrimary() {
return this.runTransaction('updateClientMetadataAndTryBecomePrimary', 'readwrite', txn => {
const metadataStore = clientMetadataStore(txn);
return metadataStore
.put({
clientId: this.clientId,
updateTimeMs: Date.now(),
networkEnabled: this.networkEnabled,
inForeground: this.inForeground
})
.next(() => {
if (this.isPrimary) {
return this.verifyPrimaryLease(txn).next(success => {
if (!success) {
this.isPrimary = false;
this.queue.enqueueRetryable(() => this.primaryStateListener(false));
}
});
}
})
.next(() => this.canActAsPrimary(txn))
.next(canActAsPrimary => {
if (this.isPrimary && !canActAsPrimary) {
return this.releasePrimaryLeaseIfHeld(txn).next(() => false);
}
else if (canActAsPrimary) {
return this.acquireOrExtendPrimaryLease(txn).next(() => true);
}
else {
return /* canActAsPrimary= */ false;
}
});
})
.catch(e => {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG$c, 'Failed to extend owner lease: ', e);
// Proceed with the existing state. Any subsequent access to
// IndexedDB will verify the lease.
return this.isPrimary;
}
if (!this.allowTabSynchronization) {
throw e;
}
logDebug(LOG_TAG$c, 'Releasing owner lease after error during lease refresh', e);
return /* isPrimary= */ false;
})
.then(isPrimary => {
if (this.isPrimary !== isPrimary) {
this.queue.enqueueRetryable(() => this.primaryStateListener(isPrimary));
}
this.isPrimary = isPrimary;
});
}
verifyPrimaryLease(txn) {
const store = primaryClientStore(txn);
return store.get(DbPrimaryClientKey).next(primaryClient => {
return PersistencePromise.resolve(this.isLocalClient(primaryClient));
});
}
removeClientMetadata(txn) {
const metadataStore = clientMetadataStore(txn);
return metadataStore.delete(this.clientId);
}
/**
* If the garbage collection threshold has passed, prunes the
* RemoteDocumentChanges and the ClientMetadata store based on the last update
* time of all clients.
*/
async maybeGarbageCollectMultiClientState() {
if (this.isPrimary &&
!this.isWithinAge(this.lastGarbageCollectionTime, MAX_CLIENT_AGE_MS)) {
this.lastGarbageCollectionTime = Date.now();
const inactiveClients = await this.runTransaction('maybeGarbageCollectMultiClientState', 'readwrite-primary', txn => {
const metadataStore = getStore(txn, DbClientMetadataStore);
return metadataStore.loadAll().next(existingClients => {
const active = this.filterActiveClients(existingClients, MAX_CLIENT_AGE_MS);
const inactive = existingClients.filter(client => active.indexOf(client) === -1);
// Delete metadata for clients that are no longer considered active.
return PersistencePromise.forEach(inactive, (inactiveClient) => metadataStore.delete(inactiveClient.clientId)).next(() => inactive);
});
}).catch(() => {
// Ignore primary lease violations or any other type of error. The next
// primary will run `maybeGarbageCollectMultiClientState()` again.
// We don't use `ignoreIfPrimaryLeaseLoss()` since we don't want to depend
// on LocalStore.
return [];
});
// Delete potential leftover entries that may continue to mark the
// inactive clients as zombied in LocalStorage.
// Ideally we'd delete the IndexedDb and LocalStorage zombie entries for
// the client atomically, but we can't. So we opt to delete the IndexedDb
// entries first to avoid potentially reviving a zombied client.
if (this.webStorage) {
for (const inactiveClient of inactiveClients) {
this.webStorage.removeItem(this.zombiedClientLocalStorageKey(inactiveClient.clientId));
}
}
}
}
/**
* Schedules a recurring timer to update the client metadata and to either
* extend or acquire the primary lease if the client is eligible.
*/
scheduleClientMetadataAndPrimaryLeaseRefreshes() {
this.clientMetadataRefresher = this.queue.enqueueAfterDelay("client_metadata_refresh" /* TimerId.ClientMetadataRefresh */, CLIENT_METADATA_REFRESH_INTERVAL_MS, () => {
return this.updateClientMetadataAndTryBecomePrimary()
.then(() => this.maybeGarbageCollectMultiClientState())
.then(() => this.scheduleClientMetadataAndPrimaryLeaseRefreshes());
});
}
/** Checks whether `client` is the local client. */
isLocalClient(client) {
return client ? client.ownerId === this.clientId : false;
}
/**
* Evaluate the state of all active clients and determine whether the local
* client is or can act as the holder of the primary lease. Returns whether
* the client is eligible for the lease, but does not actually acquire it.
* May return 'false' even if there is no active leaseholder and another
* (foreground) client should become leaseholder instead.
*/
canActAsPrimary(txn) {
if (this.forceOwningTab) {
return PersistencePromise.resolve(true);
}
const store = primaryClientStore(txn);
return store
.get(DbPrimaryClientKey)
.next(currentPrimary => {
const currentLeaseIsValid = currentPrimary !== null &&
this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
!this.isClientZombied(currentPrimary.ownerId);
// A client is eligible for the primary lease if:
// - its network is enabled and the client's tab is in the foreground.
// - its network is enabled and no other client's tab is in the
// foreground.
// - every clients network is disabled and the client's tab is in the
// foreground.
// - every clients network is disabled and no other client's tab is in
// the foreground.
// - the `forceOwningTab` setting was passed in.
if (currentLeaseIsValid) {
if (this.isLocalClient(currentPrimary) && this.networkEnabled) {
return true;
}
if (!this.isLocalClient(currentPrimary)) {
if (!currentPrimary.allowTabSynchronization) {
// Fail the `canActAsPrimary` check if the current leaseholder has
// not opted into multi-tab synchronization. If this happens at
// client startup, we reject the Promise returned by
// `enablePersistence()` and the user can continue to use Firestore
// with in-memory persistence.
// If this fails during a lease refresh, we will instead block the
// AsyncQueue from executing further operations. Note that this is
// acceptable since mixing & matching different `synchronizeTabs`
// settings is not supported.
//
// TODO(b/114226234): Remove this check when `synchronizeTabs` can
// no longer be turned off.
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
return false;
}
}
if (this.networkEnabled && this.inForeground) {
return true;
}
return clientMetadataStore(txn)
.loadAll()
.next(existingClients => {
// Process all existing clients and determine whether at least one of
// them is better suited to obtain the primary lease.
const preferredCandidate = this.filterActiveClients(existingClients, MAX_PRIMARY_ELIGIBLE_AGE_MS).find(otherClient => {
if (this.clientId !== otherClient.clientId) {
const otherClientHasBetterNetworkState = !this.networkEnabled && otherClient.networkEnabled;
const otherClientHasBetterVisibility = !this.inForeground && otherClient.inForeground;
const otherClientHasSameNetworkState = this.networkEnabled === otherClient.networkEnabled;
if (otherClientHasBetterNetworkState ||
(otherClientHasBetterVisibility &&
otherClientHasSameNetworkState)) {
return true;
}
}
return false;
});
return preferredCandidate === undefined;
});
})
.next(canActAsPrimary => {
if (this.isPrimary !== canActAsPrimary) {
logDebug(LOG_TAG$c, `Client ${canActAsPrimary ? 'is' : 'is not'} eligible for a primary lease.`);
}
return canActAsPrimary;
});
}
async shutdown() {
// The shutdown() operations are idempotent and can be called even when
// start() aborted (e.g. because it couldn't acquire the persistence lease).
this._started = false;
this.markClientZombied();
if (this.clientMetadataRefresher) {
this.clientMetadataRefresher.cancel();
this.clientMetadataRefresher = null;
}
this.detachVisibilityHandler();
this.detachWindowUnloadHook();
// Use `SimpleDb.runTransaction` directly to avoid failing if another tab
// has obtained the primary lease.
await this.simpleDb.runTransaction('shutdown', 'readwrite', [DbPrimaryClientStore, DbClientMetadataStore], simpleDbTxn => {
const persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, ListenSequence.INVALID);
return this.releasePrimaryLeaseIfHeld(persistenceTransaction).next(() => this.removeClientMetadata(persistenceTransaction));
});
this.simpleDb.close();
// Remove the entry marking the client as zombied from LocalStorage since
// we successfully deleted its metadata from IndexedDb.
this.removeClientZombiedEntry();
}
/**
* Returns clients that are not zombied and have an updateTime within the
* provided threshold.
*/
filterActiveClients(clients, activityThresholdMs) {
return clients.filter(client => this.isWithinAge(client.updateTimeMs, activityThresholdMs) &&
!this.isClientZombied(client.clientId));
}
/**
* Returns the IDs of the clients that are currently active. If multi-tab
* is not supported, returns an array that only contains the local client's
* ID.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
getActiveClients() {
return this.runTransaction('getActiveClients', 'readonly', txn => {
return clientMetadataStore(txn)
.loadAll()
.next(clients => this.filterActiveClients(clients, MAX_CLIENT_AGE_MS).map(clientMetadata => clientMetadata.clientId));
});
}
get started() {
return this._started;
}
getMutationQueue(user, indexManager) {
return IndexedDbMutationQueue.forUser(user, this.serializer, indexManager, this.referenceDelegate);
}
getTargetCache() {
return this.targetCache;
}
getRemoteDocumentCache() {
return this.remoteDocumentCache;
}
getIndexManager(user) {
return new IndexedDbIndexManager(user, this.serializer.remoteSerializer.databaseId);
}
getDocumentOverlayCache(user) {
return IndexedDbDocumentOverlayCache.forUser(this.serializer, user);
}
getBundleCache() {
return this.bundleCache;
}
runTransaction(action, mode, transactionOperation) {
logDebug(LOG_TAG$c, 'Starting transaction:', action);
const simpleDbMode = mode === 'readonly' ? 'readonly' : 'readwrite';
const objectStores = getObjectStores(this.schemaVersion);
let persistenceTransaction;
// Do all transactions as readwrite against all object stores, since we
// are the only reader/writer.
return this.simpleDb
.runTransaction(action, simpleDbMode, objectStores, simpleDbTxn => {
persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, this.listenSequence
? this.listenSequence.next()
: ListenSequence.INVALID);
if (mode === 'readwrite-primary') {
// While we merely verify that we have (or can acquire) the lease
// immediately, we wait to extend the primary lease until after
// executing transactionOperation(). This ensures that even if the
// transactionOperation takes a long time, we'll use a recent
// leaseTimestampMs in the extended (or newly acquired) lease.
return this.verifyPrimaryLease(persistenceTransaction)
.next(holdsPrimaryLease => {
if (holdsPrimaryLease) {
return /* holdsPrimaryLease= */ true;
}
return this.canActAsPrimary(persistenceTransaction);
})
.next(holdsPrimaryLease => {
if (!holdsPrimaryLease) {
logError(`Failed to obtain primary lease for action '${action}'.`);
this.isPrimary = false;
this.queue.enqueueRetryable(() => this.primaryStateListener(false));
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_LOST_ERROR_MSG);
}
return transactionOperation(persistenceTransaction);
})
.next(result => {
return this.acquireOrExtendPrimaryLease(persistenceTransaction).next(() => result);
});
}
else {
return this.verifyAllowTabSynchronization(persistenceTransaction).next(() => transactionOperation(persistenceTransaction));
}
})
.then(result => {
persistenceTransaction.raiseOnCommittedEvent();
return result;
});
}
/**
* Verifies that the current tab is the primary leaseholder or alternatively
* that the leaseholder has opted into multi-tab synchronization.
*/
// TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer
// be turned off.
verifyAllowTabSynchronization(txn) {
const store = primaryClientStore(txn);
return store.get(DbPrimaryClientKey).next(currentPrimary => {
const currentLeaseIsValid = currentPrimary !== null &&
this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
!this.isClientZombied(currentPrimary.ownerId);
if (currentLeaseIsValid && !this.isLocalClient(currentPrimary)) {
if (!this.forceOwningTab &&
(!this.allowTabSynchronization ||
!currentPrimary.allowTabSynchronization)) {
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
}
});
}
/**
* Obtains or extends the new primary lease for the local client. This
* method does not verify that the client is eligible for this lease.
*/
acquireOrExtendPrimaryLease(txn) {
const newPrimary = {
ownerId: this.clientId,
allowTabSynchronization: this.allowTabSynchronization,
leaseTimestampMs: Date.now()
};
return primaryClientStore(txn).put(DbPrimaryClientKey, newPrimary);
}
static isAvailable() {
return SimpleDb.isAvailable();
}
/** Checks the primary lease and removes it if we are the current primary. */
releasePrimaryLeaseIfHeld(txn) {
const store = primaryClientStore(txn);
return store.get(DbPrimaryClientKey).next(primaryClient => {
if (this.isLocalClient(primaryClient)) {
logDebug(LOG_TAG$c, 'Releasing primary lease.');
return store.delete(DbPrimaryClientKey);
}
else {
return PersistencePromise.resolve();
}
});
}
/** Verifies that `updateTimeMs` is within `maxAgeMs`. */
isWithinAge(updateTimeMs, maxAgeMs) {
const now = Date.now();
const minAcceptable = now - maxAgeMs;
const maxAcceptable = now;
if (updateTimeMs < minAcceptable) {
return false;
}
else if (updateTimeMs > maxAcceptable) {
logError(`Detected an update time that is in the future: ${updateTimeMs} > ${maxAcceptable}`);
return false;
}
return true;
}
attachVisibilityHandler() {
if (this.document !== null &&
typeof this.document.addEventListener === 'function') {
this.documentVisibilityHandler = () => {
this.queue.enqueueAndForget(() => {
this.inForeground = this.document.visibilityState === 'visible';
return this.updateClientMetadataAndTryBecomePrimary();
});
};
this.document.addEventListener('visibilitychange', this.documentVisibilityHandler);
this.inForeground = this.document.visibilityState === 'visible';
}
}
detachVisibilityHandler() {
if (this.documentVisibilityHandler) {
this.document.removeEventListener('visibilitychange', this.documentVisibilityHandler);
this.documentVisibilityHandler = null;
}
}
/**
* Attaches a window.unload handler that will synchronously write our
* clientId to a "zombie client id" location in LocalStorage. This can be used
* by tabs trying to acquire the primary lease to determine that the lease
* is no longer valid even if the timestamp is recent. This is particularly
* important for the refresh case (so the tab correctly re-acquires the
* primary lease). LocalStorage is used for this rather than IndexedDb because
* it is a synchronous API and so can be used reliably from an unload
* handler.
*/
attachWindowUnloadHook() {
var _a;
if (typeof ((_a = this.window) === null || _a === void 0 ? void 0 : _a.addEventListener) === 'function') {
this.windowUnloadHandler = () => {
// Note: In theory, this should be scheduled on the AsyncQueue since it
// accesses internal state. We execute this code directly during shutdown
// to make sure it gets a chance to run.
this.markClientZombied();
const safariIndexdbBugVersionRegex = /(?:Version|Mobile)\/1[456]/;
if (isSafari() &&
(navigator.appVersion.match(safariIndexdbBugVersionRegex) ||
navigator.userAgent.match(safariIndexdbBugVersionRegex))) {
// On Safari 14, 15, and 16, we do not run any cleanup actions as it might
// trigger a bug that prevents Safari from re-opening IndexedDB during
// the next page load.
// See https://bugs.webkit.org/show_bug.cgi?id=226547
this.queue.enterRestrictedMode(/* purgeExistingTasks= */ true);
}
this.queue.enqueueAndForget(() => {
// Attempt graceful shutdown (including releasing our primary lease),
// but there's no guarantee it will complete.
return this.shutdown();
});
};
this.window.addEventListener('pagehide', this.windowUnloadHandler);
}
}
detachWindowUnloadHook() {
if (this.windowUnloadHandler) {
this.window.removeEventListener('pagehide', this.windowUnloadHandler);
this.windowUnloadHandler = null;
}
}
/**
* Returns whether a client is "zombied" based on its LocalStorage entry.
* Clients become zombied when their tab closes without running all of the
* cleanup logic in `shutdown()`.
*/
isClientZombied(clientId) {
var _a;
try {
const isZombied = ((_a = this.webStorage) === null || _a === void 0 ? void 0 : _a.getItem(this.zombiedClientLocalStorageKey(clientId))) !== null;
logDebug(LOG_TAG$c, `Client '${clientId}' ${isZombied ? 'is' : 'is not'} zombied in LocalStorage`);
return isZombied;
}
catch (e) {
// Gracefully handle if LocalStorage isn't working.
logError(LOG_TAG$c, 'Failed to get zombied client id.', e);
return false;
}
}
/**
* Record client as zombied (a client that had its tab closed). Zombied
* clients are ignored during primary tab selection.
*/
markClientZombied() {
if (!this.webStorage) {
return;
}
try {
this.webStorage.setItem(this.zombiedClientLocalStorageKey(this.clientId), String(Date.now()));
}
catch (e) {
// Gracefully handle if LocalStorage isn't available / working.
logError('Failed to set zombie client id.', e);
}
}
/** Removes the zombied client entry if it exists. */
removeClientZombiedEntry() {
if (!this.webStorage) {
return;
}
try {
this.webStorage.removeItem(this.zombiedClientLocalStorageKey(this.clientId));
}
catch (e) {
// Ignore
}
}
zombiedClientLocalStorageKey(clientId) {
return `${ZOMBIED_CLIENTS_KEY_PREFIX}_${this.persistenceKey}_${clientId}`;
}
}
/**
* Helper to get a typed SimpleDbStore for the primary client object store.
*/
function primaryClientStore(txn) {
return getStore(txn, DbPrimaryClientStore);
}
/**
* Helper to get a typed SimpleDbStore for the client metadata object store.
*/
function clientMetadataStore(txn) {
return getStore(txn, DbClientMetadataStore);
}
/**
* Generates a string used as a prefix when storing data in IndexedDB and
* LocalStorage.
*/
function indexedDbStoragePrefix(databaseId, persistenceKey) {
// Use two different prefix formats:
//
// * firestore / persistenceKey / projectID . databaseID / ...
// * firestore / persistenceKey / projectID / ...
//
// projectIDs are DNS-compatible names and cannot contain dots
// so there's no danger of collisions.
let database = databaseId.projectId;
if (!databaseId.isDefaultDatabase) {
database += '.' + databaseId.database;
}
return 'firestore/' + persistenceKey + '/' + database + '/';
}
async function indexedDbClearPersistence(persistenceKey) {
if (!SimpleDb.isAvailable()) {
return Promise.resolve();
}
const dbName = persistenceKey + MAIN_DATABASE;
await SimpleDb.delete(dbName);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compares two array for equality using comparator. The method computes the
* intersection and invokes `onAdd` for every element that is in `after` but not
* `before`. `onRemove` is invoked for every element in `before` but missing
* from `after`.
*
* The method creates a copy of both `before` and `after` and runs in O(n log
* n), where n is the size of the two lists.
*
* @param before - The elements that exist in the original array.
* @param after - The elements to diff against the original array.
* @param comparator - The comparator for the elements in before and after.
* @param onAdd - A function to invoke for every element that is part of `
* after` but not `before`.
* @param onRemove - A function to invoke for every element that is part of
* `before` but not `after`.
*/
function diffArrays(before, after, comparator, onAdd, onRemove) {
before = [...before];
after = [...after];
before.sort(comparator);
after.sort(comparator);
const bLen = before.length;
const aLen = after.length;
let a = 0;
let b = 0;
while (a < aLen && b < bLen) {
const cmp = comparator(before[b], after[a]);
if (cmp < 0) {
// The element was removed if the next element in our ordered
// walkthrough is only in `before`.
onRemove(before[b++]);
}
else if (cmp > 0) {
// The element was added if the next element in our ordered walkthrough
// is only in `after`.
onAdd(after[a++]);
}
else {
a++;
b++;
}
}
while (a < aLen) {
onAdd(after[a++]);
}
while (b < bLen) {
onRemove(before[b++]);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$b = 'LocalStore';
/**
* The maximum time to leave a resume token buffered without writing it out.
* This value is arbitrary: it's long enough to avoid several writes
* (possibly indefinitely if updates come more frequently than this) but
* short enough that restarting after crashing will still have a pretty
* recent resume token.
*/
const RESUME_TOKEN_MAX_AGE_MICROS = 5 * 60 * 1e6;
/**
* Implements `LocalStore` interface.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
class LocalStoreImpl {
constructor(
/** Manages our in-memory or durable persistence. */
persistence, queryEngine, initialUser, serializer) {
this.persistence = persistence;
this.queryEngine = queryEngine;
this.serializer = serializer;
/**
* Maps a targetID to data about its target.
*
* PORTING NOTE: We are using an immutable data structure on Web to make re-runs
* of `applyRemoteEvent()` idempotent.
*/
this.targetDataByTarget = new SortedMap(primitiveComparator);
/** Maps a target to its targetID. */
// TODO(wuandy): Evaluate if TargetId can be part of Target.
this.targetIdByTarget = new ObjectMap(t => canonifyTarget(t), targetEquals);
/**
* A per collection group index of the last read time processed by
* `getNewDocumentChanges()`.
*
* PORTING NOTE: This is only used for multi-tab synchronization.
*/
this.collectionGroupReadTime = new Map();
this.remoteDocuments = persistence.getRemoteDocumentCache();
this.targetCache = persistence.getTargetCache();
this.bundleCache = persistence.getBundleCache();
this.initializeUserComponents(initialUser);
}
initializeUserComponents(user) {
// TODO(indexing): Add spec tests that test these components change after a
// user change
this.documentOverlayCache = this.persistence.getDocumentOverlayCache(user);
this.indexManager = this.persistence.getIndexManager(user);
this.mutationQueue = this.persistence.getMutationQueue(user, this.indexManager);
this.localDocuments = new LocalDocumentsView(this.remoteDocuments, this.mutationQueue, this.documentOverlayCache, this.indexManager);
this.remoteDocuments.setIndexManager(this.indexManager);
this.queryEngine.initialize(this.localDocuments, this.indexManager);
}
collectGarbage(garbageCollector) {
return this.persistence.runTransaction('Collect garbage', 'readwrite-primary', txn => garbageCollector.collect(txn, this.targetDataByTarget));
}
}
function newLocalStore(
/** Manages our in-memory or durable persistence. */
persistence, queryEngine, initialUser, serializer) {
return new LocalStoreImpl(persistence, queryEngine, initialUser, serializer);
}
/**
* Tells the LocalStore that the currently authenticated user has changed.
*
* In response the local store switches the mutation queue to the new user and
* returns any resulting document changes.
*/
// PORTING NOTE: Android and iOS only return the documents affected by the
// change.
async function localStoreHandleUserChange(localStore, user) {
const localStoreImpl = debugCast(localStore);
const result = await localStoreImpl.persistence.runTransaction('Handle user change', 'readonly', txn => {
// Swap out the mutation queue, grabbing the pending mutation batches
// before and after.
let oldBatches;
return localStoreImpl.mutationQueue
.getAllMutationBatches(txn)
.next(promisedOldBatches => {
oldBatches = promisedOldBatches;
localStoreImpl.initializeUserComponents(user);
return localStoreImpl.mutationQueue.getAllMutationBatches(txn);
})
.next(newBatches => {
const removedBatchIds = [];
const addedBatchIds = [];
// Union the old/new changed keys.
let changedKeys = documentKeySet();
for (const batch of oldBatches) {
removedBatchIds.push(batch.batchId);
for (const mutation of batch.mutations) {
changedKeys = changedKeys.add(mutation.key);
}
}
for (const batch of newBatches) {
addedBatchIds.push(batch.batchId);
for (const mutation of batch.mutations) {
changedKeys = changedKeys.add(mutation.key);
}
}
// Return the set of all (potentially) changed documents and the list
// of mutation batch IDs that were affected by change.
return localStoreImpl.localDocuments
.getDocuments(txn, changedKeys)
.next(affectedDocuments => {
return {
affectedDocuments,
removedBatchIds,
addedBatchIds
};
});
});
});
return result;
}
/* Accepts locally generated Mutations and commit them to storage. */
function localStoreWriteLocally(localStore, mutations) {
const localStoreImpl = debugCast(localStore);
const localWriteTime = Timestamp.now();
const keys = mutations.reduce((keys, m) => keys.add(m.key), documentKeySet());
let overlayedDocuments;
let mutationBatch;
return localStoreImpl.persistence
.runTransaction('Locally write mutations', 'readwrite', txn => {
// Figure out which keys do not have a remote version in the cache, this
// is needed to create the right overlay mutation: if no remote version
// presents, we do not need to create overlays as patch mutations.
// TODO(Overlay): Is there a better way to determine this? Using the
// document version does not work because local mutations set them back
// to 0.
let remoteDocs = mutableDocumentMap();
let docsWithoutRemoteVersion = documentKeySet();
return localStoreImpl.remoteDocuments
.getEntries(txn, keys)
.next(docs => {
remoteDocs = docs;
remoteDocs.forEach((key, doc) => {
if (!doc.isValidDocument()) {
docsWithoutRemoteVersion = docsWithoutRemoteVersion.add(key);
}
});
})
.next(() => {
// Load and apply all existing mutations. This lets us compute the
// current base state for all non-idempotent transforms before applying
// any additional user-provided writes.
return localStoreImpl.localDocuments.getOverlayedDocuments(txn, remoteDocs);
})
.next((docs) => {
overlayedDocuments = docs;
// For non-idempotent mutations (such as `FieldValue.increment()`),
// we record the base state in a separate patch mutation. This is
// later used to guarantee consistent values and prevents flicker
// even if the backend sends us an update that already includes our
// transform.
const baseMutations = [];
for (const mutation of mutations) {
const baseValue = mutationExtractBaseValue(mutation, overlayedDocuments.get(mutation.key).overlayedDocument);
if (baseValue != null) {
// NOTE: The base state should only be applied if there's some
// existing document to override, so use a Precondition of
// exists=true
baseMutations.push(new PatchMutation(mutation.key, baseValue, extractFieldMask(baseValue.value.mapValue), Precondition.exists(true)));
}
}
return localStoreImpl.mutationQueue.addMutationBatch(txn, localWriteTime, baseMutations, mutations);
})
.next(batch => {
mutationBatch = batch;
const overlays = batch.applyToLocalDocumentSet(overlayedDocuments, docsWithoutRemoteVersion);
return localStoreImpl.documentOverlayCache.saveOverlays(txn, batch.batchId, overlays);
});
})
.then(() => ({
batchId: mutationBatch.batchId,
changes: convertOverlayedDocumentMapToDocumentMap(overlayedDocuments)
}));
}
/**
* Acknowledges the given batch.
*
* On the happy path when a batch is acknowledged, the local store will
*
* + remove the batch from the mutation queue;
* + apply the changes to the remote document cache;
* + recalculate the latency compensated view implied by those changes (there
* may be mutations in the queue that affect the documents but haven't been
* acknowledged yet); and
* + give the changed documents back the sync engine
*
* @returns The resulting (modified) documents.
*/
function localStoreAcknowledgeBatch(localStore, batchResult) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Acknowledge batch', 'readwrite-primary', txn => {
const affected = batchResult.batch.keys();
const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
return applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer)
.next(() => documentBuffer.apply(txn))
.next(() => localStoreImpl.mutationQueue.performConsistencyCheck(txn))
.next(() => localStoreImpl.documentOverlayCache.removeOverlaysForBatchId(txn, affected, batchResult.batch.batchId))
.next(() => localStoreImpl.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(txn, getKeysWithTransformResults(batchResult)))
.next(() => localStoreImpl.localDocuments.getDocuments(txn, affected));
});
}
function getKeysWithTransformResults(batchResult) {
let result = documentKeySet();
for (let i = 0; i < batchResult.mutationResults.length; ++i) {
const mutationResult = batchResult.mutationResults[i];
if (mutationResult.transformResults.length > 0) {
result = result.add(batchResult.batch.mutations[i].key);
}
}
return result;
}
/**
* Removes mutations from the MutationQueue for the specified batch;
* LocalDocuments will be recalculated.
*
* @returns The resulting modified documents.
*/
function localStoreRejectBatch(localStore, batchId) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Reject batch', 'readwrite-primary', txn => {
let affectedKeys;
return localStoreImpl.mutationQueue
.lookupMutationBatch(txn, batchId)
.next((batch) => {
hardAssert(batch !== null);
affectedKeys = batch.keys();
return localStoreImpl.mutationQueue.removeMutationBatch(txn, batch);
})
.next(() => localStoreImpl.mutationQueue.performConsistencyCheck(txn))
.next(() => localStoreImpl.documentOverlayCache.removeOverlaysForBatchId(txn, affectedKeys, batchId))
.next(() => localStoreImpl.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(txn, affectedKeys))
.next(() => localStoreImpl.localDocuments.getDocuments(txn, affectedKeys));
});
}
/**
* Returns the largest (latest) batch id in mutation queue that is pending
* server response.
*
* Returns `BATCHID_UNKNOWN` if the queue is empty.
*/
function localStoreGetHighestUnacknowledgedBatchId(localStore) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get highest unacknowledged batch id', 'readonly', txn => localStoreImpl.mutationQueue.getHighestUnacknowledgedBatchId(txn));
}
/**
* Returns the last consistent snapshot processed (used by the RemoteStore to
* determine whether to buffer incoming snapshots from the backend).
*/
function localStoreGetLastRemoteSnapshotVersion(localStore) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get last remote snapshot version', 'readonly', txn => localStoreImpl.targetCache.getLastRemoteSnapshotVersion(txn));
}
/**
* Updates the "ground-state" (remote) documents. We assume that the remote
* event reflects any write batches that have been acknowledged or rejected
* (i.e. we do not re-apply local mutations to updates from this event).
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
function localStoreApplyRemoteEventToLocalCache(localStore, remoteEvent) {
const localStoreImpl = debugCast(localStore);
const remoteVersion = remoteEvent.snapshotVersion;
let newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
return localStoreImpl.persistence
.runTransaction('Apply remote event', 'readwrite-primary', txn => {
const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
// Reset newTargetDataByTargetMap in case this transaction gets re-run.
newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
const promises = [];
remoteEvent.targetChanges.forEach((change, targetId) => {
const oldTargetData = newTargetDataByTargetMap.get(targetId);
if (!oldTargetData) {
return;
}
// Only update the remote keys if the target is still active. This
// ensures that we can persist the updated target data along with
// the updated assignment.
promises.push(localStoreImpl.targetCache
.removeMatchingKeys(txn, change.removedDocuments, targetId)
.next(() => {
return localStoreImpl.targetCache.addMatchingKeys(txn, change.addedDocuments, targetId);
}));
let newTargetData = oldTargetData.withSequenceNumber(txn.currentSequenceNumber);
if (remoteEvent.targetMismatches.get(targetId) !== null) {
newTargetData = newTargetData
.withResumeToken(ByteString.EMPTY_BYTE_STRING, SnapshotVersion.min())
.withLastLimboFreeSnapshotVersion(SnapshotVersion.min());
}
else if (change.resumeToken.approximateByteSize() > 0) {
newTargetData = newTargetData.withResumeToken(change.resumeToken, remoteVersion);
}
newTargetDataByTargetMap = newTargetDataByTargetMap.insert(targetId, newTargetData);
// Update the target data if there are target changes (or if
// sufficient time has passed since the last update).
if (shouldPersistTargetData(oldTargetData, newTargetData, change)) {
promises.push(localStoreImpl.targetCache.updateTargetData(txn, newTargetData));
}
});
let changedDocs = mutableDocumentMap();
let existenceChangedKeys = documentKeySet();
remoteEvent.documentUpdates.forEach(key => {
if (remoteEvent.resolvedLimboDocuments.has(key)) {
promises.push(localStoreImpl.persistence.referenceDelegate.updateLimboDocument(txn, key));
}
});
// Each loop iteration only affects its "own" doc, so it's safe to get all
// the remote documents in advance in a single call.
promises.push(populateDocumentChangeBuffer(txn, documentBuffer, remoteEvent.documentUpdates).next(result => {
changedDocs = result.changedDocuments;
existenceChangedKeys = result.existenceChangedKeys;
}));
// HACK: The only reason we allow a null snapshot version is so that we
// can synthesize remote events when we get permission denied errors while
// trying to resolve the state of a locally cached document that is in
// limbo.
if (!remoteVersion.isEqual(SnapshotVersion.min())) {
const updateRemoteVersion = localStoreImpl.targetCache
.getLastRemoteSnapshotVersion(txn)
.next(lastRemoteSnapshotVersion => {
return localStoreImpl.targetCache.setTargetsMetadata(txn, txn.currentSequenceNumber, remoteVersion);
});
promises.push(updateRemoteVersion);
}
return PersistencePromise.waitFor(promises)
.next(() => documentBuffer.apply(txn))
.next(() => localStoreImpl.localDocuments.getLocalViewOfDocuments(txn, changedDocs, existenceChangedKeys))
.next(() => changedDocs);
})
.then(changedDocs => {
localStoreImpl.targetDataByTarget = newTargetDataByTargetMap;
return changedDocs;
});
}
/**
* Populates document change buffer with documents from backend or a bundle.
* Returns the document changes resulting from applying those documents, and
* also a set of documents whose existence state are changed as a result.
*
* @param txn - Transaction to use to read existing documents from storage.
* @param documentBuffer - Document buffer to collect the resulted changes to be
* applied to storage.
* @param documents - Documents to be applied.
*/
function populateDocumentChangeBuffer(txn, documentBuffer, documents) {
let updatedKeys = documentKeySet();
let existenceChangedKeys = documentKeySet();
documents.forEach(k => (updatedKeys = updatedKeys.add(k)));
return documentBuffer.getEntries(txn, updatedKeys).next(existingDocs => {
let changedDocuments = mutableDocumentMap();
documents.forEach((key, doc) => {
const existingDoc = existingDocs.get(key);
// Check if see if there is a existence state change for this document.
if (doc.isFoundDocument() !== existingDoc.isFoundDocument()) {
existenceChangedKeys = existenceChangedKeys.add(key);
}
// Note: The order of the steps below is important, since we want
// to ensure that rejected limbo resolutions (which fabricate
// NoDocuments with SnapshotVersion.min()) never add documents to
// cache.
if (doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min())) {
// NoDocuments with SnapshotVersion.min() are used in manufactured
// events. We remove these documents from cache since we lost
// access.
documentBuffer.removeEntry(key, doc.readTime);
changedDocuments = changedDocuments.insert(key, doc);
}
else if (!existingDoc.isValidDocument() ||
doc.version.compareTo(existingDoc.version) > 0 ||
(doc.version.compareTo(existingDoc.version) === 0 &&
existingDoc.hasPendingWrites)) {
documentBuffer.addEntry(doc);
changedDocuments = changedDocuments.insert(key, doc);
}
else {
logDebug(LOG_TAG$b, 'Ignoring outdated watch update for ', key, '. Current version:', existingDoc.version, ' Watch version:', doc.version);
}
});
return { changedDocuments, existenceChangedKeys };
});
}
/**
* Returns true if the newTargetData should be persisted during an update of
* an active target. TargetData should always be persisted when a target is
* being released and should not call this function.
*
* While the target is active, TargetData updates can be omitted when nothing
* about the target has changed except metadata like the resume token or
* snapshot version. Occasionally it's worth the extra write to prevent these
* values from getting too stale after a crash, but this doesn't have to be
* too frequent.
*/
function shouldPersistTargetData(oldTargetData, newTargetData, change) {
// Always persist target data if we don't already have a resume token.
if (oldTargetData.resumeToken.approximateByteSize() === 0) {
return true;
}
// Don't allow resume token changes to be buffered indefinitely. This
// allows us to be reasonably up-to-date after a crash and avoids needing
// to loop over all active queries on shutdown. Especially in the browser
// we may not get time to do anything interesting while the current tab is
// closing.
const timeDelta = newTargetData.snapshotVersion.toMicroseconds() -
oldTargetData.snapshotVersion.toMicroseconds();
if (timeDelta >= RESUME_TOKEN_MAX_AGE_MICROS) {
return true;
}
// Otherwise if the only thing that has changed about a target is its resume
// token it's not worth persisting. Note that the RemoteStore keeps an
// in-memory view of the currently active targets which includes the current
// resume token, so stream failure or user changes will still use an
// up-to-date resume token regardless of what we do here.
const changes = change.addedDocuments.size +
change.modifiedDocuments.size +
change.removedDocuments.size;
return changes > 0;
}
/**
* Notifies local store of the changed views to locally pin documents.
*/
async function localStoreNotifyLocalViewChanges(localStore, viewChanges) {
const localStoreImpl = debugCast(localStore);
try {
await localStoreImpl.persistence.runTransaction('notifyLocalViewChanges', 'readwrite', txn => {
return PersistencePromise.forEach(viewChanges, (viewChange) => {
return PersistencePromise.forEach(viewChange.addedKeys, (key) => localStoreImpl.persistence.referenceDelegate.addReference(txn, viewChange.targetId, key)).next(() => PersistencePromise.forEach(viewChange.removedKeys, (key) => localStoreImpl.persistence.referenceDelegate.removeReference(txn, viewChange.targetId, key)));
});
});
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
// If `notifyLocalViewChanges` fails, we did not advance the sequence
// number for the documents that were included in this transaction.
// This might trigger them to be deleted earlier than they otherwise
// would have, but it should not invalidate the integrity of the data.
logDebug(LOG_TAG$b, 'Failed to update sequence numbers: ' + e);
}
else {
throw e;
}
}
for (const viewChange of viewChanges) {
const targetId = viewChange.targetId;
if (!viewChange.fromCache) {
const targetData = localStoreImpl.targetDataByTarget.get(targetId);
// Advance the last limbo free snapshot version
const lastLimboFreeSnapshotVersion = targetData.snapshotVersion;
const updatedTargetData = targetData.withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion);
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.insert(targetId, updatedTargetData);
// TODO(b/272564316): Apply the optimization done on other platforms.
// This is a problem for web because saving the updated targetData from
// non-primary client conflicts with what primary client saved.
}
}
}
/**
* Gets the mutation batch after the passed in batchId in the mutation queue
* or null if empty.
* @param afterBatchId - If provided, the batch to search after.
* @returns The next mutation or null if there wasn't one.
*/
function localStoreGetNextMutationBatch(localStore, afterBatchId) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get next mutation batch', 'readonly', txn => {
if (afterBatchId === undefined) {
afterBatchId = BATCHID_UNKNOWN;
}
return localStoreImpl.mutationQueue.getNextMutationBatchAfterBatchId(txn, afterBatchId);
});
}
/**
* Reads the current value of a Document with a given key or null if not
* found - used for testing.
*/
function localStoreReadDocument(localStore, key) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('read document', 'readonly', txn => localStoreImpl.localDocuments.getDocument(txn, key));
}
/**
* Assigns the given target an internal ID so that its results can be pinned so
* they don't get GC'd. A target must be allocated in the local store before
* the store can be used to manage its view.
*
* Allocating an already allocated `Target` will return the existing `TargetData`
* for that `Target`.
*/
function localStoreAllocateTarget(localStore, target) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence
.runTransaction('Allocate target', 'readwrite', txn => {
let targetData;
return localStoreImpl.targetCache
.getTargetData(txn, target)
.next((cached) => {
if (cached) {
// This target has been listened to previously, so reuse the
// previous targetID.
// TODO(mcg): freshen last accessed date?
targetData = cached;
return PersistencePromise.resolve(targetData);
}
else {
return localStoreImpl.targetCache
.allocateTargetId(txn)
.next(targetId => {
targetData = new TargetData(target, targetId, "TargetPurposeListen" /* TargetPurpose.Listen */, txn.currentSequenceNumber);
return localStoreImpl.targetCache
.addTargetData(txn, targetData)
.next(() => targetData);
});
}
});
})
.then(targetData => {
// If Multi-Tab is enabled, the existing target data may be newer than
// the in-memory data
const cachedTargetData = localStoreImpl.targetDataByTarget.get(targetData.targetId);
if (cachedTargetData === null ||
targetData.snapshotVersion.compareTo(cachedTargetData.snapshotVersion) >
0) {
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.insert(targetData.targetId, targetData);
localStoreImpl.targetIdByTarget.set(target, targetData.targetId);
}
return targetData;
});
}
/**
* Returns the TargetData as seen by the LocalStore, including updates that may
* have not yet been persisted to the TargetCache.
*/
// Visible for testing.
function localStoreGetTargetData(localStore, transaction, target) {
const localStoreImpl = debugCast(localStore);
const targetId = localStoreImpl.targetIdByTarget.get(target);
if (targetId !== undefined) {
return PersistencePromise.resolve(localStoreImpl.targetDataByTarget.get(targetId));
}
else {
return localStoreImpl.targetCache.getTargetData(transaction, target);
}
}
/**
* Unpins all the documents associated with the given target. If
* `keepPersistedTargetData` is set to false and Eager GC enabled, the method
* directly removes the associated target data from the target cache.
*
* Releasing a non-existing `Target` is a no-op.
*/
// PORTING NOTE: `keepPersistedTargetData` is multi-tab only.
async function localStoreReleaseTarget(localStore, targetId, keepPersistedTargetData) {
const localStoreImpl = debugCast(localStore);
const targetData = localStoreImpl.targetDataByTarget.get(targetId);
const mode = keepPersistedTargetData ? 'readwrite' : 'readwrite-primary';
try {
if (!keepPersistedTargetData) {
await localStoreImpl.persistence.runTransaction('Release target', mode, txn => {
return localStoreImpl.persistence.referenceDelegate.removeTarget(txn, targetData);
});
}
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
// All `releaseTarget` does is record the final metadata state for the
// target, but we've been recording this periodically during target
// activity. If we lose this write this could cause a very slight
// difference in the order of target deletion during GC, but we
// don't define exact LRU semantics so this is acceptable.
logDebug(LOG_TAG$b, `Failed to update sequence numbers for target ${targetId}: ${e}`);
}
else {
throw e;
}
}
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.remove(targetId);
localStoreImpl.targetIdByTarget.delete(targetData.target);
}
/**
* Runs the specified query against the local store and returns the results,
* potentially taking advantage of query data from previous executions (such
* as the set of remote keys).
*
* @param usePreviousResults - Whether results from previous executions can
* be used to optimize this query execution.
*/
function localStoreExecuteQuery(localStore, query, usePreviousResults) {
const localStoreImpl = debugCast(localStore);
let lastLimboFreeSnapshotVersion = SnapshotVersion.min();
let remoteKeys = documentKeySet();
return localStoreImpl.persistence.runTransaction('Execute query', 'readwrite', // Use readwrite instead of readonly so indexes can be created
// Use readwrite instead of readonly so indexes can be created
txn => {
return localStoreGetTargetData(localStoreImpl, txn, queryToTarget(query))
.next(targetData => {
if (targetData) {
lastLimboFreeSnapshotVersion =
targetData.lastLimboFreeSnapshotVersion;
return localStoreImpl.targetCache
.getMatchingKeysForTargetId(txn, targetData.targetId)
.next(result => {
remoteKeys = result;
});
}
})
.next(() => localStoreImpl.queryEngine.getDocumentsMatchingQuery(txn, query, usePreviousResults
? lastLimboFreeSnapshotVersion
: SnapshotVersion.min(), usePreviousResults ? remoteKeys : documentKeySet()))
.next(documents => {
setMaxReadTime(localStoreImpl, queryCollectionGroup(query), documents);
return { documents, remoteKeys };
});
});
}
function applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer) {
const batch = batchResult.batch;
const docKeys = batch.keys();
let promiseChain = PersistencePromise.resolve();
docKeys.forEach(docKey => {
promiseChain = promiseChain
.next(() => documentBuffer.getEntry(txn, docKey))
.next(doc => {
const ackVersion = batchResult.docVersions.get(docKey);
hardAssert(ackVersion !== null);
if (doc.version.compareTo(ackVersion) < 0) {
batch.applyToRemoteDocument(doc, batchResult);
if (doc.isValidDocument()) {
// We use the commitVersion as the readTime rather than the
// document's updateTime since the updateTime is not advanced
// for updates that do not modify the underlying document.
doc.setReadTime(batchResult.commitVersion);
documentBuffer.addEntry(doc);
}
}
});
});
return promiseChain.next(() => localStoreImpl.mutationQueue.removeMutationBatch(txn, batch));
}
/** Returns the local view of the documents affected by a mutation batch. */
// PORTING NOTE: Multi-Tab only.
function localStoreLookupMutationDocuments(localStore, batchId) {
const localStoreImpl = debugCast(localStore);
const mutationQueueImpl = debugCast(localStoreImpl.mutationQueue);
return localStoreImpl.persistence.runTransaction('Lookup mutation documents', 'readonly', txn => {
return mutationQueueImpl.lookupMutationKeys(txn, batchId).next(keys => {
if (keys) {
return localStoreImpl.localDocuments.getDocuments(txn, keys);
}
else {
return PersistencePromise.resolve(null);
}
});
});
}
// PORTING NOTE: Multi-Tab only.
function localStoreRemoveCachedMutationBatchMetadata(localStore, batchId) {
const mutationQueueImpl = debugCast(debugCast(localStore, LocalStoreImpl).mutationQueue);
mutationQueueImpl.removeCachedMutationKeys(batchId);
}
// PORTING NOTE: Multi-Tab only.
function localStoreGetActiveClients(localStore) {
const persistenceImpl = debugCast(debugCast(localStore, LocalStoreImpl).persistence);
return persistenceImpl.getActiveClients();
}
// PORTING NOTE: Multi-Tab only.
function localStoreGetCachedTarget(localStore, targetId) {
const localStoreImpl = debugCast(localStore);
const targetCacheImpl = debugCast(localStoreImpl.targetCache);
const cachedTargetData = localStoreImpl.targetDataByTarget.get(targetId);
if (cachedTargetData) {
return Promise.resolve(cachedTargetData.target);
}
else {
return localStoreImpl.persistence.runTransaction('Get target data', 'readonly', txn => {
return targetCacheImpl
.getTargetDataForTarget(txn, targetId)
.next(targetData => (targetData ? targetData.target : null));
});
}
}
/**
* Returns the set of documents that have been updated since the last call.
* If this is the first call, returns the set of changes since client
* initialization. Further invocations will return document that have changed
* since the prior call.
*/
// PORTING NOTE: Multi-Tab only.
function localStoreGetNewDocumentChanges(localStore, collectionGroup) {
const localStoreImpl = debugCast(localStore);
// Get the current maximum read time for the collection. This should always
// exist, but to reduce the chance for regressions we default to
// SnapshotVersion.Min()
// TODO(indexing): Consider removing the default value.
const readTime = localStoreImpl.collectionGroupReadTime.get(collectionGroup) ||
SnapshotVersion.min();
return localStoreImpl.persistence
.runTransaction('Get new document changes', 'readonly', txn => localStoreImpl.remoteDocuments.getAllFromCollectionGroup(txn, collectionGroup, newIndexOffsetSuccessorFromReadTime(readTime, INITIAL_LARGEST_BATCH_ID),
/* limit= */ Number.MAX_SAFE_INTEGER))
.then(changedDocs => {
setMaxReadTime(localStoreImpl, collectionGroup, changedDocs);
return changedDocs;
});
}
/** Sets the collection group's maximum read time from the given documents. */
// PORTING NOTE: Multi-Tab only.
function setMaxReadTime(localStoreImpl, collectionGroup, changedDocs) {
let readTime = localStoreImpl.collectionGroupReadTime.get(collectionGroup) ||
SnapshotVersion.min();
changedDocs.forEach((_, doc) => {
if (doc.readTime.compareTo(readTime) > 0) {
readTime = doc.readTime;
}
});
localStoreImpl.collectionGroupReadTime.set(collectionGroup, readTime);
}
/**
* Creates a new target using the given bundle name, which will be used to
* hold the keys of all documents from the bundle in query-document mappings.
* This ensures that the loaded documents do not get garbage collected
* right away.
*/
function umbrellaTarget(bundleName) {
// It is OK that the path used for the query is not valid, because this will
// not be read and queried.
return queryToTarget(newQueryForPath(ResourcePath.fromString(`__bundle__/docs/${bundleName}`)));
}
/**
* Applies the documents from a bundle to the "ground-state" (remote)
* documents.
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
async function localStoreApplyBundledDocuments(localStore, bundleConverter, documents, bundleName) {
const localStoreImpl = debugCast(localStore);
let documentKeys = documentKeySet();
let documentMap = mutableDocumentMap();
for (const bundleDoc of documents) {
const documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
if (bundleDoc.document) {
documentKeys = documentKeys.add(documentKey);
}
const doc = bundleConverter.toMutableDocument(bundleDoc);
doc.setReadTime(bundleConverter.toSnapshotVersion(bundleDoc.metadata.readTime));
documentMap = documentMap.insert(documentKey, doc);
}
const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
// Allocates a target to hold all document keys from the bundle, such that
// they will not get garbage collected right away.
const umbrellaTargetData = await localStoreAllocateTarget(localStoreImpl, umbrellaTarget(bundleName));
return localStoreImpl.persistence.runTransaction('Apply bundle documents', 'readwrite', txn => {
return populateDocumentChangeBuffer(txn, documentBuffer, documentMap)
.next(documentChangeResult => {
documentBuffer.apply(txn);
return documentChangeResult;
})
.next(documentChangeResult => {
return localStoreImpl.targetCache
.removeMatchingKeysForTargetId(txn, umbrellaTargetData.targetId)
.next(() => localStoreImpl.targetCache.addMatchingKeys(txn, documentKeys, umbrellaTargetData.targetId))
.next(() => localStoreImpl.localDocuments.getLocalViewOfDocuments(txn, documentChangeResult.changedDocuments, documentChangeResult.existenceChangedKeys))
.next(() => documentChangeResult.changedDocuments);
});
});
}
/**
* Returns a promise of a boolean to indicate if the given bundle has already
* been loaded and the create time is newer than the current loading bundle.
*/
function localStoreHasNewerBundle(localStore, bundleMetadata) {
const localStoreImpl = debugCast(localStore);
const currentReadTime = fromVersion(bundleMetadata.createTime);
return localStoreImpl.persistence
.runTransaction('hasNewerBundle', 'readonly', transaction => {
return localStoreImpl.bundleCache.getBundleMetadata(transaction, bundleMetadata.id);
})
.then(cached => {
return !!cached && cached.createTime.compareTo(currentReadTime) >= 0;
});
}
/**
* Saves the given `BundleMetadata` to local persistence.
*/
function localStoreSaveBundle(localStore, bundleMetadata) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Save bundle', 'readwrite', transaction => {
return localStoreImpl.bundleCache.saveBundleMetadata(transaction, bundleMetadata);
});
}
/**
* Returns a promise of a `NamedQuery` associated with given query name. Promise
* resolves to undefined if no persisted data can be found.
*/
function localStoreGetNamedQuery(localStore, queryName) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get named query', 'readonly', transaction => localStoreImpl.bundleCache.getNamedQuery(transaction, queryName));
}
/**
* Saves the given `NamedQuery` to local persistence.
*/
async function localStoreSaveNamedQuery(localStore, query, documents = documentKeySet()) {
// Allocate a target for the named query such that it can be resumed
// from associated read time if users use it to listen.
// NOTE: this also means if no corresponding target exists, the new target
// will remain active and will not get collected, unless users happen to
// unlisten the query somehow.
const allocated = await localStoreAllocateTarget(localStore, queryToTarget(fromBundledQuery(query.bundledQuery)));
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Save named query', 'readwrite', transaction => {
const readTime = fromVersion(query.readTime);
// Simply save the query itself if it is older than what the SDK already
// has.
if (allocated.snapshotVersion.compareTo(readTime) >= 0) {
return localStoreImpl.bundleCache.saveNamedQuery(transaction, query);
}
// Update existing target data because the query from the bundle is newer.
const newTargetData = allocated.withResumeToken(ByteString.EMPTY_BYTE_STRING, readTime);
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.insert(newTargetData.targetId, newTargetData);
return localStoreImpl.targetCache
.updateTargetData(transaction, newTargetData)
.next(() => localStoreImpl.targetCache.removeMatchingKeysForTargetId(transaction, allocated.targetId))
.next(() => localStoreImpl.targetCache.addMatchingKeys(transaction, documents, allocated.targetId))
.next(() => localStoreImpl.bundleCache.saveNamedQuery(transaction, query));
});
}
async function localStoreConfigureFieldIndexes(localStore, newFieldIndexes) {
const localStoreImpl = debugCast(localStore);
const indexManager = localStoreImpl.indexManager;
const promises = [];
return localStoreImpl.persistence.runTransaction('Configure indexes', 'readwrite', transaction => indexManager
.getFieldIndexes(transaction)
.next(oldFieldIndexes => diffArrays(oldFieldIndexes, newFieldIndexes, fieldIndexSemanticComparator, fieldIndex => {
promises.push(indexManager.addFieldIndex(transaction, fieldIndex));
}, fieldIndex => {
promises.push(indexManager.deleteFieldIndex(transaction, fieldIndex));
}))
.next(() => PersistencePromise.waitFor(promises)));
}
function localStoreSetIndexAutoCreationEnabled(localStore, isEnabled) {
const localStoreImpl = debugCast(localStore);
localStoreImpl.queryEngine.indexAutoCreationEnabled = isEnabled;
}
function localStoreDeleteAllFieldIndexes(localStore) {
const localStoreImpl = debugCast(localStore);
const indexManager = localStoreImpl.indexManager;
return localStoreImpl.persistence.runTransaction('Delete All Indexes', 'readwrite', transaction => indexManager.deleteAllFieldIndexes(transaction));
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A tracker to keep a record of important details during database local query
* execution.
*/
class QueryContext {
constructor() {
/**
* Counts the number of documents passed through during local query execution.
*/
this._documentReadCount = 0;
}
get documentReadCount() {
return this._documentReadCount;
}
incrementDocumentReadCount(amount) {
this._documentReadCount += amount;
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const DEFAULT_INDEX_AUTO_CREATION_MIN_COLLECTION_SIZE = 100;
/**
* This cost represents the evaluation result of
* (([index, docKey] + [docKey, docContent]) per document in the result set)
* / ([docKey, docContent] per documents in full collection scan) coming from
* experiment [enter PR experiment URL here].
*/
function getDefaultRelativeIndexReadCostPerDocument() {
// These values were derived from an experiment where several members of the
// Firestore SDK team ran a performance test in various environments.
// Googlers can see b/299284287 for details.
if (isSafari()) {
return 8;
}
else if (SimpleDb.getAndroidVersion(getUA()) > 0) {
return 6;
}
else {
return 4;
}
}
/**
* The Firestore query engine.
*
* Firestore queries can be executed in three modes. The Query Engine determines
* what mode to use based on what data is persisted. The mode only determines
* the runtime complexity of the query - the result set is equivalent across all
* implementations.
*
* The Query engine will use indexed-based execution if a user has configured
* any index that can be used to execute query (via `setIndexConfiguration()`).
* Otherwise, the engine will try to optimize the query by re-using a previously
* persisted query result. If that is not possible, the query will be executed
* via a full collection scan.
*
* Index-based execution is the default when available. The query engine
* supports partial indexed execution and merges the result from the index
* lookup with documents that have not yet been indexed. The index evaluation
* matches the backend's format and as such, the SDK can use indexing for all
* queries that the backend supports.
*
* If no index exists, the query engine tries to take advantage of the target
* document mapping in the TargetCache. These mappings exists for all queries
* that have been synced with the backend at least once and allow the query
* engine to only read documents that previously matched a query plus any
* documents that were edited after the query was last listened to.
*
* There are some cases when this optimization is not guaranteed to produce
* the same results as full collection scans. In these cases, query
* processing falls back to full scans. These cases are:
*
* - Limit queries where a document that matched the query previously no longer
* matches the query.
*
* - Limit queries where a document edit may cause the document to sort below
* another document that is in the local cache.
*
* - Queries that have never been CURRENT or free of limbo documents.
*/
class QueryEngine {
constructor() {
this.initialized = false;
this.indexAutoCreationEnabled = false;
/**
* SDK only decides whether it should create index when collection size is
* larger than this.
*/
this.indexAutoCreationMinCollectionSize = DEFAULT_INDEX_AUTO_CREATION_MIN_COLLECTION_SIZE;
this.relativeIndexReadCostPerDocument = getDefaultRelativeIndexReadCostPerDocument();
}
/** Sets the document view to query against. */
initialize(localDocuments, indexManager) {
this.localDocumentsView = localDocuments;
this.indexManager = indexManager;
this.initialized = true;
}
/** Returns all local documents matching the specified query. */
getDocumentsMatchingQuery(transaction, query, lastLimboFreeSnapshotVersion, remoteKeys) {
// Stores the result from executing the query; using this object is more
// convenient than passing the result between steps of the persistence
// transaction and improves readability comparatively.
const queryResult = { result: null };
return this.performQueryUsingIndex(transaction, query)
.next(result => {
queryResult.result = result;
})
.next(() => {
if (queryResult.result) {
return;
}
return this.performQueryUsingRemoteKeys(transaction, query, remoteKeys, lastLimboFreeSnapshotVersion).next(result => {
queryResult.result = result;
});
})
.next(() => {
if (queryResult.result) {
return;
}
const context = new QueryContext();
return this.executeFullCollectionScan(transaction, query, context).next(result => {
queryResult.result = result;
if (this.indexAutoCreationEnabled) {
return this.createCacheIndexes(transaction, query, context, result.size);
}
});
})
.next(() => queryResult.result);
}
createCacheIndexes(transaction, query, context, resultSize) {
if (context.documentReadCount < this.indexAutoCreationMinCollectionSize) {
if (getLogLevel() <= LogLevel.DEBUG) {
logDebug('QueryEngine', 'SDK will not create cache indexes for query:', stringifyQuery(query), 'since it only creates cache indexes for collection contains', 'more than or equal to', this.indexAutoCreationMinCollectionSize, 'documents');
}
return PersistencePromise.resolve();
}
if (getLogLevel() <= LogLevel.DEBUG) {
logDebug('QueryEngine', 'Query:', stringifyQuery(query), 'scans', context.documentReadCount, 'local documents and returns', resultSize, 'documents as results.');
}
if (context.documentReadCount >
this.relativeIndexReadCostPerDocument * resultSize) {
if (getLogLevel() <= LogLevel.DEBUG) {
logDebug('QueryEngine', 'The SDK decides to create cache indexes for query:', stringifyQuery(query), 'as using cache indexes may help improve performance.');
}
return this.indexManager.createTargetIndexes(transaction, queryToTarget(query));
}
return PersistencePromise.resolve();
}
/**
* Performs an indexed query that evaluates the query based on a collection's
* persisted index values. Returns `null` if an index is not available.
*/
performQueryUsingIndex(transaction, query) {
if (queryMatchesAllDocuments(query)) {
// Queries that match all documents don't benefit from using
// key-based lookups. It is more efficient to scan all documents in a
// collection, rather than to perform individual lookups.
return PersistencePromise.resolve(null);
}
let target = queryToTarget(query);
return this.indexManager
.getIndexType(transaction, target)
.next(indexType => {
if (indexType === 0 /* IndexType.NONE */) {
// The target cannot be served from any index.
return null;
}
if (query.limit !== null && indexType === 1 /* IndexType.PARTIAL */) {
// We cannot apply a limit for targets that are served using a partial
// index. If a partial index will be used to serve the target, the
// query may return a superset of documents that match the target
// (e.g. if the index doesn't include all the target's filters), or
// may return the correct set of documents in the wrong order (e.g. if
// the index doesn't include a segment for one of the orderBys).
// Therefore, a limit should not be applied in such cases.
query = queryWithLimit(query, null, "F" /* LimitType.First */);
target = queryToTarget(query);
}
return this.indexManager
.getDocumentsMatchingTarget(transaction, target)
.next(keys => {
const sortedKeys = documentKeySet(...keys);
return this.localDocumentsView
.getDocuments(transaction, sortedKeys)
.next(indexedDocuments => {
return this.indexManager
.getMinOffset(transaction, target)
.next(offset => {
const previousResults = this.applyQuery(query, indexedDocuments);
if (this.needsRefill(query, previousResults, sortedKeys, offset.readTime)) {
// A limit query whose boundaries change due to local
// edits can be re-run against the cache by excluding the
// limit. This ensures that all documents that match the
// query's filters are included in the result set. The SDK
// can then apply the limit once all local edits are
// incorporated.
return this.performQueryUsingIndex(transaction, queryWithLimit(query, null, "F" /* LimitType.First */));
}
return this.appendRemainingResults(transaction, previousResults, query, offset);
});
});
});
});
}
/**
* Performs a query based on the target's persisted query mapping. Returns
* `null` if the mapping is not available or cannot be used.
*/
performQueryUsingRemoteKeys(transaction, query, remoteKeys, lastLimboFreeSnapshotVersion) {
if (queryMatchesAllDocuments(query)) {
// Queries that match all documents don't benefit from using
// key-based lookups. It is more efficient to scan all documents in a
// collection, rather than to perform individual lookups.
return PersistencePromise.resolve(null);
}
// Queries that have never seen a snapshot without limbo free documents
// should also be run as a full collection scan.
if (lastLimboFreeSnapshotVersion.isEqual(SnapshotVersion.min())) {
return PersistencePromise.resolve(null);
}
return this.localDocumentsView.getDocuments(transaction, remoteKeys).next(documents => {
const previousResults = this.applyQuery(query, documents);
if (this.needsRefill(query, previousResults, remoteKeys, lastLimboFreeSnapshotVersion)) {
return PersistencePromise.resolve(null);
}
if (getLogLevel() <= LogLevel.DEBUG) {
logDebug('QueryEngine', 'Re-using previous result from %s to execute query: %s', lastLimboFreeSnapshotVersion.toString(), stringifyQuery(query));
}
// Retrieve all results for documents that were updated since the last
// limbo-document free remote snapshot.
return this.appendRemainingResults(transaction, previousResults, query, newIndexOffsetSuccessorFromReadTime(lastLimboFreeSnapshotVersion, INITIAL_LARGEST_BATCH_ID)).next(results => results);
});
}
/** Applies the query filter and sorting to the provided documents. */
applyQuery(query, documents) {
// Sort the documents and re-apply the query filter since previously
// matching documents do not necessarily still match the query.
let queryResults = new SortedSet(newQueryComparator(query));
documents.forEach((_, maybeDoc) => {
if (queryMatches(query, maybeDoc)) {
queryResults = queryResults.add(maybeDoc);
}
});
return queryResults;
}
/**
* Determines if a limit query needs to be refilled from cache, making it
* ineligible for index-free execution.
*
* @param query - The query.
* @param sortedPreviousResults - The documents that matched the query when it
* was last synchronized, sorted by the query's comparator.
* @param remoteKeys - The document keys that matched the query at the last
* snapshot.
* @param limboFreeSnapshotVersion - The version of the snapshot when the
* query was last synchronized.
*/
needsRefill(query, sortedPreviousResults, remoteKeys, limboFreeSnapshotVersion) {
if (query.limit === null) {
// Queries without limits do not need to be refilled.
return false;
}
if (remoteKeys.size !== sortedPreviousResults.size) {
// The query needs to be refilled if a previously matching document no
// longer matches.
return true;
}
// Limit queries are not eligible for index-free query execution if there is
// a potential that an older document from cache now sorts before a document
// that was previously part of the limit. This, however, can only happen if
// the document at the edge of the limit goes out of limit.
// If a document that is not the limit boundary sorts differently,
// the boundary of the limit itself did not change and documents from cache
// will continue to be "rejected" by this boundary. Therefore, we can ignore
// any modifications that don't affect the last document.
const docAtLimitEdge = query.limitType === "F" /* LimitType.First */
? sortedPreviousResults.last()
: sortedPreviousResults.first();
if (!docAtLimitEdge) {
// We don't need to refill the query if there were already no documents.
return false;
}
return (docAtLimitEdge.hasPendingWrites ||
docAtLimitEdge.version.compareTo(limboFreeSnapshotVersion) > 0);
}
executeFullCollectionScan(transaction, query, context) {
if (getLogLevel() <= LogLevel.DEBUG) {
logDebug('QueryEngine', 'Using full collection scan to execute query:', stringifyQuery(query));
}
return this.localDocumentsView.getDocumentsMatchingQuery(transaction, query, IndexOffset.min(), context);
}
/**
* Combines the results from an indexed execution with the remaining documents
* that have not yet been indexed.
*/
appendRemainingResults(transaction, indexedResults, query, offset) {
// Retrieve all results for documents that were updated since the offset.
return this.localDocumentsView
.getDocumentsMatchingQuery(transaction, query, offset)
.next(remainingResults => {
// Merge with existing results
indexedResults.forEach(d => {
remainingResults = remainingResults.insert(d.key, d);
});
return remainingResults;
});
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The format of the LocalStorage key that stores the client state is:
// firestore_clients_<persistence_prefix>_<instance_key>
const CLIENT_STATE_KEY_PREFIX = 'firestore_clients';
/** Assembles the key for a client state in WebStorage */
function createWebStorageClientStateKey(persistenceKey, clientId) {
return `${CLIENT_STATE_KEY_PREFIX}_${persistenceKey}_${clientId}`;
}
// The format of the WebStorage key that stores the mutation state is:
// firestore_mutations_<persistence_prefix>_<batch_id>
// (for unauthenticated users)
// or: firestore_mutations_<persistence_prefix>_<batch_id>_<user_uid>
//
// 'user_uid' is last to avoid needing to escape '_' characters that it might
// contain.
const MUTATION_BATCH_KEY_PREFIX = 'firestore_mutations';
/** Assembles the key for a mutation batch in WebStorage */
function createWebStorageMutationBatchKey(persistenceKey, user, batchId) {
let mutationKey = `${MUTATION_BATCH_KEY_PREFIX}_${persistenceKey}_${batchId}`;
if (user.isAuthenticated()) {
mutationKey += `_${user.uid}`;
}
return mutationKey;
}
// The format of the WebStorage key that stores a query target's metadata is:
// firestore_targets_<persistence_prefix>_<target_id>
const QUERY_TARGET_KEY_PREFIX = 'firestore_targets';
/** Assembles the key for a query state in WebStorage */
function createWebStorageQueryTargetMetadataKey(persistenceKey, targetId) {
return `${QUERY_TARGET_KEY_PREFIX}_${persistenceKey}_${targetId}`;
}
// The WebStorage prefix that stores the primary tab's online state. The
// format of the key is:
// firestore_online_state_<persistence_prefix>
const ONLINE_STATE_KEY_PREFIX = 'firestore_online_state';
/** Assembles the key for the online state of the primary tab. */
function createWebStorageOnlineStateKey(persistenceKey) {
return `${ONLINE_STATE_KEY_PREFIX}_${persistenceKey}`;
}
// The WebStorage prefix that plays as a event to indicate the remote documents
// might have changed due to some secondary tabs loading a bundle.
// format of the key is:
// firestore_bundle_loaded_v2_<persistenceKey>
// The version ending with "v2" stores the list of modified collection groups.
const BUNDLE_LOADED_KEY_PREFIX = 'firestore_bundle_loaded_v2';
function createBundleLoadedKey(persistenceKey) {
return `${BUNDLE_LOADED_KEY_PREFIX}_${persistenceKey}`;
}
// The WebStorage key prefix for the key that stores the last sequence number allocated. The key
// looks like 'firestore_sequence_number_<persistence_prefix>'.
const SEQUENCE_NUMBER_KEY_PREFIX = 'firestore_sequence_number';
/** Assembles the key for the current sequence number. */
function createWebStorageSequenceNumberKey(persistenceKey) {
return `${SEQUENCE_NUMBER_KEY_PREFIX}_${persistenceKey}`;
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$a = 'SharedClientState';
/**
* Holds the state of a mutation batch, including its user ID, batch ID and
* whether the batch is 'pending', 'acknowledged' or 'rejected'.
*/
// Visible for testing
class MutationMetadata {
constructor(user, batchId, state, error) {
this.user = user;
this.batchId = batchId;
this.state = state;
this.error = error;
}
/**
* Parses a MutationMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(user, batchId, value) {
const mutationBatch = JSON.parse(value);
let validData = typeof mutationBatch === 'object' &&
['pending', 'acknowledged', 'rejected'].indexOf(mutationBatch.state) !==
-1 &&
(mutationBatch.error === undefined ||
typeof mutationBatch.error === 'object');
let firestoreError = undefined;
if (validData && mutationBatch.error) {
validData =
typeof mutationBatch.error.message === 'string' &&
typeof mutationBatch.error.code === 'string';
if (validData) {
firestoreError = new FirestoreError(mutationBatch.error.code, mutationBatch.error.message);
}
}
if (validData) {
return new MutationMetadata(user, batchId, mutationBatch.state, firestoreError);
}
else {
logError(LOG_TAG$a, `Failed to parse mutation state for ID '${batchId}': ${value}`);
return null;
}
}
toWebStorageJSON() {
const batchMetadata = {
state: this.state,
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
if (this.error) {
batchMetadata.error = {
code: this.error.code,
message: this.error.message
};
}
return JSON.stringify(batchMetadata);
}
}
/**
* Holds the state of a query target, including its target ID and whether the
* target is 'not-current', 'current' or 'rejected'.
*/
// Visible for testing
class QueryTargetMetadata {
constructor(targetId, state, error) {
this.targetId = targetId;
this.state = state;
this.error = error;
}
/**
* Parses a QueryTargetMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(targetId, value) {
const targetState = JSON.parse(value);
let validData = typeof targetState === 'object' &&
['not-current', 'current', 'rejected'].indexOf(targetState.state) !==
-1 &&
(targetState.error === undefined ||
typeof targetState.error === 'object');
let firestoreError = undefined;
if (validData && targetState.error) {
validData =
typeof targetState.error.message === 'string' &&
typeof targetState.error.code === 'string';
if (validData) {
firestoreError = new FirestoreError(targetState.error.code, targetState.error.message);
}
}
if (validData) {
return new QueryTargetMetadata(targetId, targetState.state, firestoreError);
}
else {
logError(LOG_TAG$a, `Failed to parse target state for ID '${targetId}': ${value}`);
return null;
}
}
toWebStorageJSON() {
const targetState = {
state: this.state,
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
if (this.error) {
targetState.error = {
code: this.error.code,
message: this.error.message
};
}
return JSON.stringify(targetState);
}
}
/**
* This class represents the immutable ClientState for a client read from
* WebStorage, containing the list of active query targets.
*/
class RemoteClientState {
constructor(clientId, activeTargetIds) {
this.clientId = clientId;
this.activeTargetIds = activeTargetIds;
}
/**
* Parses a RemoteClientState from the JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(clientId, value) {
const clientState = JSON.parse(value);
let validData = typeof clientState === 'object' &&
clientState.activeTargetIds instanceof Array;
let activeTargetIdsSet = targetIdSet();
for (let i = 0; validData && i < clientState.activeTargetIds.length; ++i) {
validData = isSafeInteger(clientState.activeTargetIds[i]);
activeTargetIdsSet = activeTargetIdsSet.add(clientState.activeTargetIds[i]);
}
if (validData) {
return new RemoteClientState(clientId, activeTargetIdsSet);
}
else {
logError(LOG_TAG$a, `Failed to parse client data for instance '${clientId}': ${value}`);
return null;
}
}
}
/**
* This class represents the online state for all clients participating in
* multi-tab. The online state is only written to by the primary client, and
* used in secondary clients to update their query views.
*/
class SharedOnlineState {
constructor(clientId, onlineState) {
this.clientId = clientId;
this.onlineState = onlineState;
}
/**
* Parses a SharedOnlineState from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(value) {
const onlineState = JSON.parse(value);
const validData = typeof onlineState === 'object' &&
['Unknown', 'Online', 'Offline'].indexOf(onlineState.onlineState) !==
-1 &&
typeof onlineState.clientId === 'string';
if (validData) {
return new SharedOnlineState(onlineState.clientId, onlineState.onlineState);
}
else {
logError(LOG_TAG$a, `Failed to parse online state: ${value}`);
return null;
}
}
}
/**
* Metadata state of the local client. Unlike `RemoteClientState`, this class is
* mutable and keeps track of all pending mutations, which allows us to
* update the range of pending mutation batch IDs as new mutations are added or
* removed.
*
* The data in `LocalClientState` is not read from WebStorage and instead
* updated via its instance methods. The updated state can be serialized via
* `toWebStorageJSON()`.
*/
// Visible for testing.
class LocalClientState {
constructor() {
this.activeTargetIds = targetIdSet();
}
addQueryTarget(targetId) {
this.activeTargetIds = this.activeTargetIds.add(targetId);
}
removeQueryTarget(targetId) {
this.activeTargetIds = this.activeTargetIds.delete(targetId);
}
/**
* Converts this entry into a JSON-encoded format we can use for WebStorage.
* Does not encode `clientId` as it is part of the key in WebStorage.
*/
toWebStorageJSON() {
const data = {
activeTargetIds: this.activeTargetIds.toArray(),
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
return JSON.stringify(data);
}
}
/**
* `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
* backing store for the SharedClientState. It keeps track of all active
* clients and supports modifications of the local client's data.
*/
class WebStorageSharedClientState {
constructor(window, queue, persistenceKey, localClientId, initialUser) {
this.window = window;
this.queue = queue;
this.persistenceKey = persistenceKey;
this.localClientId = localClientId;
this.syncEngine = null;
this.onlineStateHandler = null;
this.sequenceNumberHandler = null;
this.storageListener = this.handleWebStorageEvent.bind(this);
this.activeClients = new SortedMap(primitiveComparator);
this.started = false;
/**
* Captures WebStorage events that occur before `start()` is called. These
* events are replayed once `WebStorageSharedClientState` is started.
*/
this.earlyEvents = [];
// Escape the special characters mentioned here:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
const escapedPersistenceKey = persistenceKey.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
this.storage = this.window.localStorage;
this.currentUser = initialUser;
this.localClientStorageKey = createWebStorageClientStateKey(this.persistenceKey, this.localClientId);
this.sequenceNumberKey = createWebStorageSequenceNumberKey(this.persistenceKey);
this.activeClients = this.activeClients.insert(this.localClientId, new LocalClientState());
this.clientStateKeyRe = new RegExp(`^${CLIENT_STATE_KEY_PREFIX}_${escapedPersistenceKey}_([^_]*)$`);
this.mutationBatchKeyRe = new RegExp(`^${MUTATION_BATCH_KEY_PREFIX}_${escapedPersistenceKey}_(\\d+)(?:_(.*))?$`);
this.queryTargetKeyRe = new RegExp(`^${QUERY_TARGET_KEY_PREFIX}_${escapedPersistenceKey}_(\\d+)$`);
this.onlineStateKey = createWebStorageOnlineStateKey(this.persistenceKey);
this.bundleLoadedKey = createBundleLoadedKey(this.persistenceKey);
// Rather than adding the storage observer during start(), we add the
// storage observer during initialization. This ensures that we collect
// events before other components populate their initial state (during their
// respective start() calls). Otherwise, we might for example miss a
// mutation that is added after LocalStore's start() processed the existing
// mutations but before we observe WebStorage events.
this.window.addEventListener('storage', this.storageListener);
}
/** Returns 'true' if WebStorage is available in the current environment. */
static isAvailable(window) {
return !!(window && window.localStorage);
}
async start() {
// Retrieve the list of existing clients to backfill the data in
// SharedClientState.
const existingClients = await this.syncEngine.getActiveClients();
for (const clientId of existingClients) {
if (clientId === this.localClientId) {
continue;
}
const storageItem = this.getItem(createWebStorageClientStateKey(this.persistenceKey, clientId));
if (storageItem) {
const clientState = RemoteClientState.fromWebStorageEntry(clientId, storageItem);
if (clientState) {
this.activeClients = this.activeClients.insert(clientState.clientId, clientState);
}
}
}
this.persistClientState();
// Check if there is an existing online state and call the callback handler
// if applicable.
const onlineStateJSON = this.storage.getItem(this.onlineStateKey);
if (onlineStateJSON) {
const onlineState = this.fromWebStorageOnlineState(onlineStateJSON);
if (onlineState) {
this.handleOnlineStateEvent(onlineState);
}
}
for (const event of this.earlyEvents) {
this.handleWebStorageEvent(event);
}
this.earlyEvents = [];
// Register a window unload hook to remove the client metadata entry from
// WebStorage even if `shutdown()` was not called.
this.window.addEventListener('pagehide', () => this.shutdown());
this.started = true;
}
writeSequenceNumber(sequenceNumber) {
this.setItem(this.sequenceNumberKey, JSON.stringify(sequenceNumber));
}
getAllActiveQueryTargets() {
return this.extractActiveQueryTargets(this.activeClients);
}
isActiveQueryTarget(targetId) {
let found = false;
this.activeClients.forEach((key, value) => {
if (value.activeTargetIds.has(targetId)) {
found = true;
}
});
return found;
}
addPendingMutation(batchId) {
this.persistMutationState(batchId, 'pending');
}
updateMutationState(batchId, state, error) {
this.persistMutationState(batchId, state, error);
// Once a final mutation result is observed by other clients, they no longer
// access the mutation's metadata entry. Since WebStorage replays events
// in order, it is safe to delete the entry right after updating it.
this.removeMutationState(batchId);
}
addLocalQueryTarget(targetId) {
let queryState = 'not-current';
// Lookup an existing query state if the target ID was already registered
// by another tab
if (this.isActiveQueryTarget(targetId)) {
const storageItem = this.storage.getItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
if (storageItem) {
const metadata = QueryTargetMetadata.fromWebStorageEntry(targetId, storageItem);
if (metadata) {
queryState = metadata.state;
}
}
}
this.localClientState.addQueryTarget(targetId);
this.persistClientState();
return queryState;
}
removeLocalQueryTarget(targetId) {
this.localClientState.removeQueryTarget(targetId);
this.persistClientState();
}
isLocalQueryTarget(targetId) {
return this.localClientState.activeTargetIds.has(targetId);
}
clearQueryState(targetId) {
this.removeItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
}
updateQueryState(targetId, state, error) {
this.persistQueryTargetState(targetId, state, error);
}
handleUserChange(user, removedBatchIds, addedBatchIds) {
removedBatchIds.forEach(batchId => {
this.removeMutationState(batchId);
});
this.currentUser = user;
addedBatchIds.forEach(batchId => {
this.addPendingMutation(batchId);
});
}
setOnlineState(onlineState) {
this.persistOnlineState(onlineState);
}
notifyBundleLoaded(collectionGroups) {
this.persistBundleLoadedState(collectionGroups);
}
shutdown() {
if (this.started) {
this.window.removeEventListener('storage', this.storageListener);
this.removeItem(this.localClientStorageKey);
this.started = false;
}
}
getItem(key) {
const value = this.storage.getItem(key);
logDebug(LOG_TAG$a, 'READ', key, value);
return value;
}
setItem(key, value) {
logDebug(LOG_TAG$a, 'SET', key, value);
this.storage.setItem(key, value);
}
removeItem(key) {
logDebug(LOG_TAG$a, 'REMOVE', key);
this.storage.removeItem(key);
}
handleWebStorageEvent(event) {
// Note: The function is typed to take Event to be interface-compatible with
// `Window.addEventListener`.
const storageEvent = event;
if (storageEvent.storageArea === this.storage) {
logDebug(LOG_TAG$a, 'EVENT', storageEvent.key, storageEvent.newValue);
if (storageEvent.key === this.localClientStorageKey) {
logError('Received WebStorage notification for local change. Another client might have ' +
'garbage-collected our state');
return;
}
this.queue.enqueueRetryable(async () => {
if (!this.started) {
this.earlyEvents.push(storageEvent);
return;
}
if (storageEvent.key === null) {
return;
}
if (this.clientStateKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue != null) {
const clientState = this.fromWebStorageClientState(storageEvent.key, storageEvent.newValue);
if (clientState) {
return this.handleClientStateEvent(clientState.clientId, clientState);
}
}
else {
const clientId = this.fromWebStorageClientStateKey(storageEvent.key);
return this.handleClientStateEvent(clientId, null);
}
}
else if (this.mutationBatchKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue !== null) {
const mutationMetadata = this.fromWebStorageMutationMetadata(storageEvent.key, storageEvent.newValue);
if (mutationMetadata) {
return this.handleMutationBatchEvent(mutationMetadata);
}
}
}
else if (this.queryTargetKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue !== null) {
const queryTargetMetadata = this.fromWebStorageQueryTargetMetadata(storageEvent.key, storageEvent.newValue);
if (queryTargetMetadata) {
return this.handleQueryTargetEvent(queryTargetMetadata);
}
}
}
else if (storageEvent.key === this.onlineStateKey) {
if (storageEvent.newValue !== null) {
const onlineState = this.fromWebStorageOnlineState(storageEvent.newValue);
if (onlineState) {
return this.handleOnlineStateEvent(onlineState);
}
}
}
else if (storageEvent.key === this.sequenceNumberKey) {
const sequenceNumber = fromWebStorageSequenceNumber(storageEvent.newValue);
if (sequenceNumber !== ListenSequence.INVALID) {
this.sequenceNumberHandler(sequenceNumber);
}
}
else if (storageEvent.key === this.bundleLoadedKey) {
const collectionGroups = this.fromWebStoreBundleLoadedState(storageEvent.newValue);
await Promise.all(collectionGroups.map(cg => this.syncEngine.synchronizeWithChangedDocuments(cg)));
}
});
}
}
get localClientState() {
return this.activeClients.get(this.localClientId);
}
persistClientState() {
this.setItem(this.localClientStorageKey, this.localClientState.toWebStorageJSON());
}
persistMutationState(batchId, state, error) {
const mutationState = new MutationMetadata(this.currentUser, batchId, state, error);
const mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
this.setItem(mutationKey, mutationState.toWebStorageJSON());
}
removeMutationState(batchId) {
const mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
this.removeItem(mutationKey);
}
persistOnlineState(onlineState) {
const entry = {
clientId: this.localClientId,
onlineState
};
this.storage.setItem(this.onlineStateKey, JSON.stringify(entry));
}
persistQueryTargetState(targetId, state, error) {
const targetKey = createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId);
const targetMetadata = new QueryTargetMetadata(targetId, state, error);
this.setItem(targetKey, targetMetadata.toWebStorageJSON());
}
persistBundleLoadedState(collectionGroups) {
const json = JSON.stringify(Array.from(collectionGroups));
this.setItem(this.bundleLoadedKey, json);
}
/**
* Parses a client state key in WebStorage. Returns null if the key does not
* match the expected key format.
*/
fromWebStorageClientStateKey(key) {
const match = this.clientStateKeyRe.exec(key);
return match ? match[1] : null;
}
/**
* Parses a client state in WebStorage. Returns 'null' if the value could not
* be parsed.
*/
fromWebStorageClientState(key, value) {
const clientId = this.fromWebStorageClientStateKey(key);
return RemoteClientState.fromWebStorageEntry(clientId, value);
}
/**
* Parses a mutation batch state in WebStorage. Returns 'null' if the value
* could not be parsed.
*/
fromWebStorageMutationMetadata(key, value) {
const match = this.mutationBatchKeyRe.exec(key);
const batchId = Number(match[1]);
const userId = match[2] !== undefined ? match[2] : null;
return MutationMetadata.fromWebStorageEntry(new User(userId), batchId, value);
}
/**
* Parses a query target state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
fromWebStorageQueryTargetMetadata(key, value) {
const match = this.queryTargetKeyRe.exec(key);
const targetId = Number(match[1]);
return QueryTargetMetadata.fromWebStorageEntry(targetId, value);
}
/**
* Parses an online state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
fromWebStorageOnlineState(value) {
return SharedOnlineState.fromWebStorageEntry(value);
}
fromWebStoreBundleLoadedState(value) {
return JSON.parse(value);
}
async handleMutationBatchEvent(mutationBatch) {
if (mutationBatch.user.uid !== this.currentUser.uid) {
logDebug(LOG_TAG$a, `Ignoring mutation for non-active user ${mutationBatch.user.uid}`);
return;
}
return this.syncEngine.applyBatchState(mutationBatch.batchId, mutationBatch.state, mutationBatch.error);
}
handleQueryTargetEvent(targetMetadata) {
return this.syncEngine.applyTargetState(targetMetadata.targetId, targetMetadata.state, targetMetadata.error);
}
handleClientStateEvent(clientId, clientState) {
const updatedClients = clientState
? this.activeClients.insert(clientId, clientState)
: this.activeClients.remove(clientId);
const existingTargets = this.extractActiveQueryTargets(this.activeClients);
const newTargets = this.extractActiveQueryTargets(updatedClients);
const addedTargets = [];
const removedTargets = [];
newTargets.forEach(targetId => {
if (!existingTargets.has(targetId)) {
addedTargets.push(targetId);
}
});
existingTargets.forEach(targetId => {
if (!newTargets.has(targetId)) {
removedTargets.push(targetId);
}
});
return this.syncEngine.applyActiveTargetsChange(addedTargets, removedTargets).then(() => {
this.activeClients = updatedClients;
});
}
handleOnlineStateEvent(onlineState) {
// We check whether the client that wrote this online state is still active
// by comparing its client ID to the list of clients kept active in
// IndexedDb. If a client does not update their IndexedDb client state
// within 5 seconds, it is considered inactive and we don't emit an online
// state event.
if (this.activeClients.get(onlineState.clientId)) {
this.onlineStateHandler(onlineState.onlineState);
}
}
extractActiveQueryTargets(clients) {
let activeTargets = targetIdSet();
clients.forEach((kev, value) => {
activeTargets = activeTargets.unionWith(value.activeTargetIds);
});
return activeTargets;
}
}
function fromWebStorageSequenceNumber(seqString) {
let sequenceNumber = ListenSequence.INVALID;
if (seqString != null) {
try {
const parsed = JSON.parse(seqString);
hardAssert(typeof parsed === 'number');
sequenceNumber = parsed;
}
catch (e) {
logError(LOG_TAG$a, 'Failed to read sequence number from WebStorage', e);
}
}
return sequenceNumber;
}
/**
* `MemorySharedClientState` is a simple implementation of SharedClientState for
* clients using memory persistence. The state in this class remains fully
* isolated and no synchronization is performed.
*/
class MemorySharedClientState {
constructor() {
this.localState = new LocalClientState();
this.queryState = {};
this.onlineStateHandler = null;
this.sequenceNumberHandler = null;
}
addPendingMutation(batchId) {
// No op.
}
updateMutationState(batchId, state, error) {
// No op.
}
addLocalQueryTarget(targetId) {
this.localState.addQueryTarget(targetId);
return this.queryState[targetId] || 'not-current';
}
updateQueryState(targetId, state, error) {
this.queryState[targetId] = state;
}
removeLocalQueryTarget(targetId) {
this.localState.removeQueryTarget(targetId);
}
isLocalQueryTarget(targetId) {
return this.localState.activeTargetIds.has(targetId);
}
clearQueryState(targetId) {
delete this.queryState[targetId];
}
getAllActiveQueryTargets() {
return this.localState.activeTargetIds;
}
isActiveQueryTarget(targetId) {
return this.localState.activeTargetIds.has(targetId);
}
start() {
this.localState = new LocalClientState();
return Promise.resolve();
}
handleUserChange(user, removedBatchIds, addedBatchIds) {
// No op.
}
setOnlineState(onlineState) {
// No op.
}
shutdown() { }
writeSequenceNumber(sequenceNumber) { }
notifyBundleLoaded(collectionGroups) {
// No op.
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class NoopConnectivityMonitor {
addCallback(callback) {
// No-op.
}
shutdown() {
// No-op.
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a simple helper class that implements the Stream interface to
* bridge to other implementations that are streams but do not implement the
* interface. The stream callbacks are invoked with the callOn... methods.
*/
class StreamBridge {
constructor(args) {
this.sendFn = args.sendFn;
this.closeFn = args.closeFn;
}
onOpen(callback) {
this.wrappedOnOpen = callback;
}
onClose(callback) {
this.wrappedOnClose = callback;
}
onMessage(callback) {
this.wrappedOnMessage = callback;
}
close() {
this.closeFn();
}
send(msg) {
this.sendFn(msg);
}
callOnOpen() {
this.wrappedOnOpen();
}
callOnClose(err) {
this.wrappedOnClose(err);
}
callOnMessage(msg) {
this.wrappedOnMessage(msg);
}
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The value returned from the most recent invocation of
* `generateUniqueDebugId()`, or null if it has never been invoked.
*/
let lastUniqueDebugId = null;
/**
* Generates and returns an initial value for `lastUniqueDebugId`.
*
* The returned value is randomly selected from a range of integers that are
* represented as 8 hexadecimal digits. This means that (within reason) any
* numbers generated by incrementing the returned number by 1 will also be
* represented by 8 hexadecimal digits. This leads to all "IDs" having the same
* length when converted to a hexadecimal string, making reading logs containing
* these IDs easier to follow. And since the return value is randomly selected
* it will help to differentiate between logs from different executions.
*/
function generateInitialUniqueDebugId() {
const minResult = 0x10000000;
const maxResult = 0x90000000;
const resultRange = maxResult - minResult;
const resultOffset = Math.round(resultRange * Math.random());
return minResult + resultOffset;
}
/**
* Generates and returns a unique ID as a hexadecimal string.
*
* The returned ID is intended to be used in debug logging messages to help
* correlate log messages that may be spatially separated in the logs, but
* logically related. For example, a network connection could include the same
* "debug ID" string in all of its log messages to help trace a specific
* connection over time.
*
* @return the 10-character generated ID (e.g. "0xa1b2c3d4").
*/
function generateUniqueDebugId() {
if (lastUniqueDebugId === null) {
lastUniqueDebugId = generateInitialUniqueDebugId();
}
else {
lastUniqueDebugId++;
}
return '0x' + lastUniqueDebugId.toString(16);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Utilities for dealing with node.js-style APIs. See nodePromise for more
* details.
*/
/**
* Creates a node-style callback that resolves or rejects a new Promise. The
* callback is passed to the given action which can then use the callback as
* a parameter to a node-style function.
*
* The intent is to directly bridge a node-style function (which takes a
* callback) into a Promise without manually converting between the node-style
* callback and the promise at each call.
*
* In effect it allows you to convert:
*
* @example
* new Promise((resolve: (value?: fs.Stats) => void,
* reject: (error?: any) => void) => {
* fs.stat(path, (error?: any, stat?: fs.Stats) => {
* if (error) {
* reject(error);
* } else {
* resolve(stat);
* }
* });
* });
*
* Into
* @example
* nodePromise((callback: NodeCallback<fs.Stats>) => {
* fs.stat(path, callback);
* });
*
* @param action - a function that takes a node-style callback as an argument
* and then uses that callback to invoke some node-style API.
* @returns a new Promise which will be rejected if the callback is given the
* first Error parameter or will resolve to the value given otherwise.
*/
function nodePromise(action) {
return new Promise((resolve, reject) => {
action((error, value) => {
if (error) {
reject(error);
}
else {
resolve(value);
}
});
});
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO: Fetch runtime version from grpc-js/package.json instead
// when there's a cleaner way to dynamic require JSON in both Node ESM and CJS
const grpcVersion = '1.9.1';
const LOG_TAG$9 = 'GrpcConnection';
const X_GOOG_API_CLIENT_VALUE = `gl-node/${process.versions.node} fire/${SDK_VERSION} grpc/${grpcVersion}`;
function createMetadata(databasePath, authToken, appCheckToken, appId) {
hardAssert(authToken === null || authToken.type === 'OAuth');
const metadata = new grpc.Metadata();
if (authToken) {
authToken.headers.forEach((value, key) => metadata.set(key, value));
}
if (appCheckToken) {
appCheckToken.headers.forEach((value, key) => metadata.set(key, value));
}
if (appId) {
metadata.set('X-Firebase-GMPID', appId);
}
metadata.set('X-Goog-Api-Client', X_GOOG_API_CLIENT_VALUE);
// These headers are used to improve routing and project isolation by the
// backend.
// TODO(b/199767712): We are keeping 'Google-Cloud-Resource-Prefix' until Emulators can be
// released with cl/428820046. Currently blocked because Emulators are now built with Java
// 11 from Google3.
metadata.set('Google-Cloud-Resource-Prefix', databasePath);
metadata.set('x-goog-request-params', databasePath);
return metadata;
}
/**
* A Connection implemented by GRPC-Node.
*/
class GrpcConnection {
constructor(protos, databaseInfo) {
this.databaseInfo = databaseInfo;
// We cache stubs for the most-recently-used token.
this.cachedStub = null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.firestore = protos['google']['firestore']['v1'];
this.databasePath = `projects/${databaseInfo.databaseId.projectId}/databases/${databaseInfo.databaseId.database}`;
}
get shouldResourcePathBeIncludedInRequest() {
// Both `invokeRPC()` and `invokeStreamingRPC()` ignore their `path` arguments, and expect
// the "path" to be part of the given `request`.
return true;
}
ensureActiveStub() {
if (!this.cachedStub) {
logDebug(LOG_TAG$9, 'Creating Firestore stub.');
const credentials = this.databaseInfo.ssl
? grpc.credentials.createSsl()
: grpc.credentials.createInsecure();
this.cachedStub = new this.firestore.Firestore(this.databaseInfo.host, credentials);
}
return this.cachedStub;
}
invokeRPC(rpcName, path, request, authToken, appCheckToken) {
const streamId = generateUniqueDebugId();
const stub = this.ensureActiveStub();
const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
const jsonRequest = Object.assign({ database: this.databasePath }, request);
return nodePromise((callback) => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' ${streamId} invoked with request:`, request);
return stub[rpcName](jsonRequest, metadata, (grpcError, value) => {
if (grpcError) {
logDebug(LOG_TAG$9, `RPC '${rpcName}' ${streamId} failed with error:`, grpcError);
callback(new FirestoreError(mapCodeFromRpcCode(grpcError.code), grpcError.message));
}
else {
logDebug(LOG_TAG$9, `RPC '${rpcName}' ${streamId} completed with response:`, value);
callback(undefined, value);
}
});
});
}
invokeStreamingRPC(rpcName, path, request, authToken, appCheckToken, expectedResponseCount) {
const streamId = generateUniqueDebugId();
const results = [];
const responseDeferred = new Deferred();
logDebug(LOG_TAG$9, `RPC '${rpcName}' ${streamId} invoked (streaming) with request:`, request);
const stub = this.ensureActiveStub();
const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
const jsonRequest = Object.assign(Object.assign({}, request), { database: this.databasePath });
const stream = stub[rpcName](jsonRequest, metadata);
let callbackFired = false;
stream.on('data', (response) => {
logDebug(LOG_TAG$9, `RPC ${rpcName} ${streamId} received result:`, response);
results.push(response);
if (expectedResponseCount !== undefined &&
results.length === expectedResponseCount) {
callbackFired = true;
responseDeferred.resolve(results);
}
});
stream.on('end', () => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' ${streamId} completed.`);
if (!callbackFired) {
callbackFired = true;
responseDeferred.resolve(results);
}
});
stream.on('error', (grpcError) => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' ${streamId} failed with error:`, grpcError);
const code = mapCodeFromRpcCode(grpcError.code);
responseDeferred.reject(new FirestoreError(code, grpcError.message));
});
return responseDeferred.promise;
}
// TODO(mikelehen): This "method" is a monster. Should be refactored.
openStream(rpcName, authToken, appCheckToken) {
const streamId = generateUniqueDebugId();
const stub = this.ensureActiveStub();
const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
const grpcStream = stub[rpcName](metadata);
let closed = false;
const close = (err) => {
if (!closed) {
closed = true;
stream.callOnClose(err);
grpcStream.end();
}
};
const stream = new StreamBridge({
sendFn: (msg) => {
if (!closed) {
logDebug(LOG_TAG$9, `RPC '${rpcName}' stream ${streamId} sending:`, msg);
try {
grpcStream.write(msg);
}
catch (e) {
// This probably means we didn't conform to the proto. Make sure to
// log the message we sent.
logError('Failure sending:', msg);
logError('Error:', e);
throw e;
}
}
else {
logDebug(LOG_TAG$9, `RPC '${rpcName}' stream ${streamId} ` +
'not sending because gRPC stream is closed:', msg);
}
},
closeFn: () => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' stream ${streamId} closed locally via close().`);
close();
}
});
grpcStream.on('data', (msg) => {
if (!closed) {
logDebug(LOG_TAG$9, `RPC '${rpcName}' stream ${streamId} received:`, msg);
stream.callOnMessage(msg);
}
});
grpcStream.on('end', () => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' stream ${streamId} ended.`);
close();
});
grpcStream.on('error', (grpcError) => {
if (!closed) {
logWarn(LOG_TAG$9, `RPC '${rpcName}' stream ${streamId} error. Code:`, grpcError.code, 'Message:', grpcError.message);
const code = mapCodeFromRpcCode(grpcError.code);
close(new FirestoreError(code, grpcError.message));
}
});
logDebug(LOG_TAG$9, `Opening RPC '${rpcName}' stream ${streamId} ` +
`to ${this.databaseInfo.host}`);
// TODO(dimond): Since grpc has no explicit open status (or does it?) we
// simulate an onOpen in the next loop after the stream had it's listeners
// registered
setTimeout(() => {
stream.callOnOpen();
}, 0);
return stream;
}
/**
* Closes and cleans up any resources associated with the GrpcConnection.
* If a gRPC client has been generated for this connection, the gRPC client
* is closed. Failure to call terminate on a GrpcConnection can result
* in leaked resources of the gRPC client.
*/
terminate() {
if (this.cachedStub) {
this.cachedStub.close();
this.cachedStub = undefined;
}
}
}
const nested = {
google: {
nested: {
protobuf: {
options: {
csharp_namespace: "Google.Protobuf.WellKnownTypes",
go_package: "github.com/golang/protobuf/ptypes/wrappers",
java_package: "com.google.protobuf",
java_outer_classname: "WrappersProto",
java_multiple_files: true,
objc_class_prefix: "GPB",
cc_enable_arenas: true,
optimize_for: "SPEED"
},
nested: {
Timestamp: {
fields: {
seconds: {
type: "int64",
id: 1
},
nanos: {
type: "int32",
id: 2
}
}
},
FileDescriptorSet: {
fields: {
file: {
rule: "repeated",
type: "FileDescriptorProto",
id: 1
}
}
},
FileDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
"package": {
type: "string",
id: 2
},
dependency: {
rule: "repeated",
type: "string",
id: 3
},
publicDependency: {
rule: "repeated",
type: "int32",
id: 10,
options: {
packed: false
}
},
weakDependency: {
rule: "repeated",
type: "int32",
id: 11,
options: {
packed: false
}
},
messageType: {
rule: "repeated",
type: "DescriptorProto",
id: 4
},
enumType: {
rule: "repeated",
type: "EnumDescriptorProto",
id: 5
},
service: {
rule: "repeated",
type: "ServiceDescriptorProto",
id: 6
},
extension: {
rule: "repeated",
type: "FieldDescriptorProto",
id: 7
},
options: {
type: "FileOptions",
id: 8
},
sourceCodeInfo: {
type: "SourceCodeInfo",
id: 9
},
syntax: {
type: "string",
id: 12
}
}
},
DescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
field: {
rule: "repeated",
type: "FieldDescriptorProto",
id: 2
},
extension: {
rule: "repeated",
type: "FieldDescriptorProto",
id: 6
},
nestedType: {
rule: "repeated",
type: "DescriptorProto",
id: 3
},
enumType: {
rule: "repeated",
type: "EnumDescriptorProto",
id: 4
},
extensionRange: {
rule: "repeated",
type: "ExtensionRange",
id: 5
},
oneofDecl: {
rule: "repeated",
type: "OneofDescriptorProto",
id: 8
},
options: {
type: "MessageOptions",
id: 7
},
reservedRange: {
rule: "repeated",
type: "ReservedRange",
id: 9
},
reservedName: {
rule: "repeated",
type: "string",
id: 10
}
},
nested: {
ExtensionRange: {
fields: {
start: {
type: "int32",
id: 1
},
end: {
type: "int32",
id: 2
}
}
},
ReservedRange: {
fields: {
start: {
type: "int32",
id: 1
},
end: {
type: "int32",
id: 2
}
}
}
}
},
FieldDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
number: {
type: "int32",
id: 3
},
label: {
type: "Label",
id: 4
},
type: {
type: "Type",
id: 5
},
typeName: {
type: "string",
id: 6
},
extendee: {
type: "string",
id: 2
},
defaultValue: {
type: "string",
id: 7
},
oneofIndex: {
type: "int32",
id: 9
},
jsonName: {
type: "string",
id: 10
},
options: {
type: "FieldOptions",
id: 8
}
},
nested: {
Type: {
values: {
TYPE_DOUBLE: 1,
TYPE_FLOAT: 2,
TYPE_INT64: 3,
TYPE_UINT64: 4,
TYPE_INT32: 5,
TYPE_FIXED64: 6,
TYPE_FIXED32: 7,
TYPE_BOOL: 8,
TYPE_STRING: 9,
TYPE_GROUP: 10,
TYPE_MESSAGE: 11,
TYPE_BYTES: 12,
TYPE_UINT32: 13,
TYPE_ENUM: 14,
TYPE_SFIXED32: 15,
TYPE_SFIXED64: 16,
TYPE_SINT32: 17,
TYPE_SINT64: 18
}
},
Label: {
values: {
LABEL_OPTIONAL: 1,
LABEL_REQUIRED: 2,
LABEL_REPEATED: 3
}
}
}
},
OneofDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
options: {
type: "OneofOptions",
id: 2
}
}
},
EnumDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
value: {
rule: "repeated",
type: "EnumValueDescriptorProto",
id: 2
},
options: {
type: "EnumOptions",
id: 3
}
}
},
EnumValueDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
number: {
type: "int32",
id: 2
},
options: {
type: "EnumValueOptions",
id: 3
}
}
},
ServiceDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
method: {
rule: "repeated",
type: "MethodDescriptorProto",
id: 2
},
options: {
type: "ServiceOptions",
id: 3
}
}
},
MethodDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
inputType: {
type: "string",
id: 2
},
outputType: {
type: "string",
id: 3
},
options: {
type: "MethodOptions",
id: 4
},
clientStreaming: {
type: "bool",
id: 5
},
serverStreaming: {
type: "bool",
id: 6
}
}
},
FileOptions: {
fields: {
javaPackage: {
type: "string",
id: 1
},
javaOuterClassname: {
type: "string",
id: 8
},
javaMultipleFiles: {
type: "bool",
id: 10
},
javaGenerateEqualsAndHash: {
type: "bool",
id: 20,
options: {
deprecated: true
}
},
javaStringCheckUtf8: {
type: "bool",
id: 27
},
optimizeFor: {
type: "OptimizeMode",
id: 9,
options: {
"default": "SPEED"
}
},
goPackage: {
type: "string",
id: 11
},
ccGenericServices: {
type: "bool",
id: 16
},
javaGenericServices: {
type: "bool",
id: 17
},
pyGenericServices: {
type: "bool",
id: 18
},
deprecated: {
type: "bool",
id: 23
},
ccEnableArenas: {
type: "bool",
id: 31
},
objcClassPrefix: {
type: "string",
id: 36
},
csharpNamespace: {
type: "string",
id: 37
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
],
reserved: [
[
38,
38
]
],
nested: {
OptimizeMode: {
values: {
SPEED: 1,
CODE_SIZE: 2,
LITE_RUNTIME: 3
}
}
}
},
MessageOptions: {
fields: {
messageSetWireFormat: {
type: "bool",
id: 1
},
noStandardDescriptorAccessor: {
type: "bool",
id: 2
},
deprecated: {
type: "bool",
id: 3
},
mapEntry: {
type: "bool",
id: 7
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
],
reserved: [
[
8,
8
]
]
},
FieldOptions: {
fields: {
ctype: {
type: "CType",
id: 1,
options: {
"default": "STRING"
}
},
packed: {
type: "bool",
id: 2
},
jstype: {
type: "JSType",
id: 6,
options: {
"default": "JS_NORMAL"
}
},
lazy: {
type: "bool",
id: 5
},
deprecated: {
type: "bool",
id: 3
},
weak: {
type: "bool",
id: 10
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
],
reserved: [
[
4,
4
]
],
nested: {
CType: {
values: {
STRING: 0,
CORD: 1,
STRING_PIECE: 2
}
},
JSType: {
values: {
JS_NORMAL: 0,
JS_STRING: 1,
JS_NUMBER: 2
}
}
}
},
OneofOptions: {
fields: {
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
EnumOptions: {
fields: {
allowAlias: {
type: "bool",
id: 2
},
deprecated: {
type: "bool",
id: 3
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
EnumValueOptions: {
fields: {
deprecated: {
type: "bool",
id: 1
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
ServiceOptions: {
fields: {
deprecated: {
type: "bool",
id: 33
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
MethodOptions: {
fields: {
deprecated: {
type: "bool",
id: 33
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
UninterpretedOption: {
fields: {
name: {
rule: "repeated",
type: "NamePart",
id: 2
},
identifierValue: {
type: "string",
id: 3
},
positiveIntValue: {
type: "uint64",
id: 4
},
negativeIntValue: {
type: "int64",
id: 5
},
doubleValue: {
type: "double",
id: 6
},
stringValue: {
type: "bytes",
id: 7
},
aggregateValue: {
type: "string",
id: 8
}
},
nested: {
NamePart: {
fields: {
namePart: {
rule: "required",
type: "string",
id: 1
},
isExtension: {
rule: "required",
type: "bool",
id: 2
}
}
}
}
},
SourceCodeInfo: {
fields: {
location: {
rule: "repeated",
type: "Location",
id: 1
}
},
nested: {
Location: {
fields: {
path: {
rule: "repeated",
type: "int32",
id: 1
},
span: {
rule: "repeated",
type: "int32",
id: 2
},
leadingComments: {
type: "string",
id: 3
},
trailingComments: {
type: "string",
id: 4
},
leadingDetachedComments: {
rule: "repeated",
type: "string",
id: 6
}
}
}
}
},
GeneratedCodeInfo: {
fields: {
annotation: {
rule: "repeated",
type: "Annotation",
id: 1
}
},
nested: {
Annotation: {
fields: {
path: {
rule: "repeated",
type: "int32",
id: 1
},
sourceFile: {
type: "string",
id: 2
},
begin: {
type: "int32",
id: 3
},
end: {
type: "int32",
id: 4
}
}
}
}
},
Struct: {
fields: {
fields: {
keyType: "string",
type: "Value",
id: 1
}
}
},
Value: {
oneofs: {
kind: {
oneof: [
"nullValue",
"numberValue",
"stringValue",
"boolValue",
"structValue",
"listValue"
]
}
},
fields: {
nullValue: {
type: "NullValue",
id: 1
},
numberValue: {
type: "double",
id: 2
},
stringValue: {
type: "string",
id: 3
},
boolValue: {
type: "bool",
id: 4
},
structValue: {
type: "Struct",
id: 5
},
listValue: {
type: "ListValue",
id: 6
}
}
},
NullValue: {
values: {
NULL_VALUE: 0
}
},
ListValue: {
fields: {
values: {
rule: "repeated",
type: "Value",
id: 1
}
}
},
Empty: {
fields: {
}
},
DoubleValue: {
fields: {
value: {
type: "double",
id: 1
}
}
},
FloatValue: {
fields: {
value: {
type: "float",
id: 1
}
}
},
Int64Value: {
fields: {
value: {
type: "int64",
id: 1
}
}
},
UInt64Value: {
fields: {
value: {
type: "uint64",
id: 1
}
}
},
Int32Value: {
fields: {
value: {
type: "int32",
id: 1
}
}
},
UInt32Value: {
fields: {
value: {
type: "uint32",
id: 1
}
}
},
BoolValue: {
fields: {
value: {
type: "bool",
id: 1
}
}
},
StringValue: {
fields: {
value: {
type: "string",
id: 1
}
}
},
BytesValue: {
fields: {
value: {
type: "bytes",
id: 1
}
}
},
Any: {
fields: {
typeUrl: {
type: "string",
id: 1
},
value: {
type: "bytes",
id: 2
}
}
}
}
},
firestore: {
nested: {
v1: {
options: {
csharp_namespace: "Google.Cloud.Firestore.V1",
go_package: "google.golang.org/genproto/googleapis/firestore/v1;firestore",
java_multiple_files: true,
java_outer_classname: "WriteProto",
java_package: "com.google.firestore.v1",
objc_class_prefix: "GCFS",
php_namespace: "Google\\Cloud\\Firestore\\V1",
ruby_package: "Google::Cloud::Firestore::V1"
},
nested: {
AggregationResult: {
fields: {
aggregateFields: {
keyType: "string",
type: "Value",
id: 2
}
}
},
BitSequence: {
fields: {
bitmap: {
type: "bytes",
id: 1
},
padding: {
type: "int32",
id: 2
}
}
},
BloomFilter: {
fields: {
bits: {
type: "BitSequence",
id: 1
},
hashCount: {
type: "int32",
id: 2
}
}
},
DocumentMask: {
fields: {
fieldPaths: {
rule: "repeated",
type: "string",
id: 1
}
}
},
Precondition: {
oneofs: {
conditionType: {
oneof: [
"exists",
"updateTime"
]
}
},
fields: {
exists: {
type: "bool",
id: 1
},
updateTime: {
type: "google.protobuf.Timestamp",
id: 2
}
}
},
TransactionOptions: {
oneofs: {
mode: {
oneof: [
"readOnly",
"readWrite"
]
}
},
fields: {
readOnly: {
type: "ReadOnly",
id: 2
},
readWrite: {
type: "ReadWrite",
id: 3
}
},
nested: {
ReadWrite: {
fields: {
retryTransaction: {
type: "bytes",
id: 1
}
}
},
ReadOnly: {
oneofs: {
consistencySelector: {
oneof: [
"readTime"
]
}
},
fields: {
readTime: {
type: "google.protobuf.Timestamp",
id: 2
}
}
}
}
},
Document: {
fields: {
name: {
type: "string",
id: 1
},
fields: {
keyType: "string",
type: "Value",
id: 2
},
createTime: {
type: "google.protobuf.Timestamp",
id: 3
},
updateTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
Value: {
oneofs: {
valueType: {
oneof: [
"nullValue",
"booleanValue",
"integerValue",
"doubleValue",
"timestampValue",
"stringValue",
"bytesValue",
"referenceValue",
"geoPointValue",
"arrayValue",
"mapValue"
]
}
},
fields: {
nullValue: {
type: "google.protobuf.NullValue",
id: 11
},
booleanValue: {
type: "bool",
id: 1
},
integerValue: {
type: "int64",
id: 2
},
doubleValue: {
type: "double",
id: 3
},
timestampValue: {
type: "google.protobuf.Timestamp",
id: 10
},
stringValue: {
type: "string",
id: 17
},
bytesValue: {
type: "bytes",
id: 18
},
referenceValue: {
type: "string",
id: 5
},
geoPointValue: {
type: "google.type.LatLng",
id: 8
},
arrayValue: {
type: "ArrayValue",
id: 9
},
mapValue: {
type: "MapValue",
id: 6
}
}
},
ArrayValue: {
fields: {
values: {
rule: "repeated",
type: "Value",
id: 1
}
}
},
MapValue: {
fields: {
fields: {
keyType: "string",
type: "Value",
id: 1
}
}
},
Firestore: {
options: {
"(google.api.default_host)": "firestore.googleapis.com",
"(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore"
},
methods: {
GetDocument: {
requestType: "GetDocumentRequest",
responseType: "Document",
options: {
"(google.api.http).get": "/v1/{name=projects/*/databases/*/documents/*/**}"
},
parsedOptions: [
{
"(google.api.http)": {
get: "/v1/{name=projects/*/databases/*/documents/*/**}"
}
}
]
},
ListDocuments: {
requestType: "ListDocumentsRequest",
responseType: "ListDocumentsResponse",
options: {
"(google.api.http).get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
},
parsedOptions: [
{
"(google.api.http)": {
get: "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
}
}
]
},
UpdateDocument: {
requestType: "UpdateDocumentRequest",
responseType: "Document",
options: {
"(google.api.http).patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}",
"(google.api.http).body": "document",
"(google.api.method_signature)": "document,update_mask"
},
parsedOptions: [
{
"(google.api.http)": {
patch: "/v1/{document.name=projects/*/databases/*/documents/*/**}",
body: "document"
}
},
{
"(google.api.method_signature)": "document,update_mask"
}
]
},
DeleteDocument: {
requestType: "DeleteDocumentRequest",
responseType: "google.protobuf.Empty",
options: {
"(google.api.http).delete": "/v1/{name=projects/*/databases/*/documents/*/**}",
"(google.api.method_signature)": "name"
},
parsedOptions: [
{
"(google.api.http)": {
"delete": "/v1/{name=projects/*/databases/*/documents/*/**}"
}
},
{
"(google.api.method_signature)": "name"
}
]
},
BatchGetDocuments: {
requestType: "BatchGetDocumentsRequest",
responseType: "BatchGetDocumentsResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchGet",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:batchGet",
body: "*"
}
}
]
},
BeginTransaction: {
requestType: "BeginTransactionRequest",
responseType: "BeginTransactionResponse",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction",
"(google.api.http).body": "*",
"(google.api.method_signature)": "database"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:beginTransaction",
body: "*"
}
},
{
"(google.api.method_signature)": "database"
}
]
},
Commit: {
requestType: "CommitRequest",
responseType: "CommitResponse",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:commit",
"(google.api.http).body": "*",
"(google.api.method_signature)": "database,writes"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:commit",
body: "*"
}
},
{
"(google.api.method_signature)": "database,writes"
}
]
},
Rollback: {
requestType: "RollbackRequest",
responseType: "google.protobuf.Empty",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:rollback",
"(google.api.http).body": "*",
"(google.api.method_signature)": "database,transaction"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:rollback",
body: "*"
}
},
{
"(google.api.method_signature)": "database,transaction"
}
]
},
RunQuery: {
requestType: "RunQueryRequest",
responseType: "RunQueryResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runQuery",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery",
"(google.api.http).additional_bindings.body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:runQuery",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery",
body: "*"
}
}
}
]
},
RunAggregationQuery: {
requestType: "RunAggregationQueryRequest",
responseType: "RunAggregationQueryResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery",
"(google.api.http).additional_bindings.body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery",
body: "*"
}
}
}
]
},
PartitionQuery: {
requestType: "PartitionQueryRequest",
responseType: "PartitionQueryResponse",
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery",
"(google.api.http).additional_bindings.body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:partitionQuery",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery",
body: "*"
}
}
}
]
},
Write: {
requestType: "WriteRequest",
requestStream: true,
responseType: "WriteResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:write",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:write",
body: "*"
}
}
]
},
Listen: {
requestType: "ListenRequest",
requestStream: true,
responseType: "ListenResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:listen",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:listen",
body: "*"
}
}
]
},
ListCollectionIds: {
requestType: "ListCollectionIdsRequest",
responseType: "ListCollectionIdsResponse",
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds",
"(google.api.http).additional_bindings.body": "*",
"(google.api.method_signature)": "parent"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds",
body: "*"
}
}
},
{
"(google.api.method_signature)": "parent"
}
]
},
BatchWrite: {
requestType: "BatchWriteRequest",
responseType: "BatchWriteResponse",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchWrite",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:batchWrite",
body: "*"
}
}
]
},
CreateDocument: {
requestType: "CreateDocumentRequest",
responseType: "Document",
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}",
"(google.api.http).body": "document"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}",
body: "document"
}
}
]
}
}
},
GetDocumentRequest: {
oneofs: {
consistencySelector: {
oneof: [
"transaction",
"readTime"
]
}
},
fields: {
name: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
mask: {
type: "DocumentMask",
id: 2
},
transaction: {
type: "bytes",
id: 3
},
readTime: {
type: "google.protobuf.Timestamp",
id: 5
}
}
},
ListDocumentsRequest: {
oneofs: {
consistencySelector: {
oneof: [
"transaction",
"readTime"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
collectionId: {
type: "string",
id: 2,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
pageSize: {
type: "int32",
id: 3
},
pageToken: {
type: "string",
id: 4
},
orderBy: {
type: "string",
id: 6
},
mask: {
type: "DocumentMask",
id: 7
},
transaction: {
type: "bytes",
id: 8
},
readTime: {
type: "google.protobuf.Timestamp",
id: 10
},
showMissing: {
type: "bool",
id: 12
}
}
},
ListDocumentsResponse: {
fields: {
documents: {
rule: "repeated",
type: "Document",
id: 1
},
nextPageToken: {
type: "string",
id: 2
}
}
},
CreateDocumentRequest: {
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
collectionId: {
type: "string",
id: 2,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
documentId: {
type: "string",
id: 3
},
document: {
type: "Document",
id: 4,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
mask: {
type: "DocumentMask",
id: 5
}
}
},
UpdateDocumentRequest: {
fields: {
document: {
type: "Document",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
updateMask: {
type: "DocumentMask",
id: 2
},
mask: {
type: "DocumentMask",
id: 3
},
currentDocument: {
type: "Precondition",
id: 4
}
}
},
DeleteDocumentRequest: {
fields: {
name: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
currentDocument: {
type: "Precondition",
id: 2
}
}
},
BatchGetDocumentsRequest: {
oneofs: {
consistencySelector: {
oneof: [
"transaction",
"newTransaction",
"readTime"
]
}
},
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
documents: {
rule: "repeated",
type: "string",
id: 2
},
mask: {
type: "DocumentMask",
id: 3
},
transaction: {
type: "bytes",
id: 4
},
newTransaction: {
type: "TransactionOptions",
id: 5
},
readTime: {
type: "google.protobuf.Timestamp",
id: 7
}
}
},
BatchGetDocumentsResponse: {
oneofs: {
result: {
oneof: [
"found",
"missing"
]
}
},
fields: {
found: {
type: "Document",
id: 1
},
missing: {
type: "string",
id: 2
},
transaction: {
type: "bytes",
id: 3
},
readTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
BeginTransactionRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
options: {
type: "TransactionOptions",
id: 2
}
}
},
BeginTransactionResponse: {
fields: {
transaction: {
type: "bytes",
id: 1
}
}
},
CommitRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
writes: {
rule: "repeated",
type: "Write",
id: 2
},
transaction: {
type: "bytes",
id: 3
}
}
},
CommitResponse: {
fields: {
writeResults: {
rule: "repeated",
type: "WriteResult",
id: 1
},
commitTime: {
type: "google.protobuf.Timestamp",
id: 2
}
}
},
RollbackRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
transaction: {
type: "bytes",
id: 2,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
}
}
},
RunQueryRequest: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
},
consistencySelector: {
oneof: [
"transaction",
"newTransaction",
"readTime"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
structuredQuery: {
type: "StructuredQuery",
id: 2
},
transaction: {
type: "bytes",
id: 5
},
newTransaction: {
type: "TransactionOptions",
id: 6
},
readTime: {
type: "google.protobuf.Timestamp",
id: 7
}
}
},
RunQueryResponse: {
fields: {
transaction: {
type: "bytes",
id: 2
},
document: {
type: "Document",
id: 1
},
readTime: {
type: "google.protobuf.Timestamp",
id: 3
},
skippedResults: {
type: "int32",
id: 4
}
}
},
RunAggregationQueryRequest: {
oneofs: {
queryType: {
oneof: [
"structuredAggregationQuery"
]
},
consistencySelector: {
oneof: [
"transaction",
"newTransaction",
"readTime"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
structuredAggregationQuery: {
type: "StructuredAggregationQuery",
id: 2
},
transaction: {
type: "bytes",
id: 4
},
newTransaction: {
type: "TransactionOptions",
id: 5
},
readTime: {
type: "google.protobuf.Timestamp",
id: 6
}
}
},
RunAggregationQueryResponse: {
fields: {
result: {
type: "AggregationResult",
id: 1
},
transaction: {
type: "bytes",
id: 2
},
readTime: {
type: "google.protobuf.Timestamp",
id: 3
}
}
},
PartitionQueryRequest: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
structuredQuery: {
type: "StructuredQuery",
id: 2
},
partitionCount: {
type: "int64",
id: 3
},
pageToken: {
type: "string",
id: 4
},
pageSize: {
type: "int32",
id: 5
}
}
},
PartitionQueryResponse: {
fields: {
partitions: {
rule: "repeated",
type: "Cursor",
id: 1
},
nextPageToken: {
type: "string",
id: 2
}
}
},
WriteRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
streamId: {
type: "string",
id: 2
},
writes: {
rule: "repeated",
type: "Write",
id: 3
},
streamToken: {
type: "bytes",
id: 4
},
labels: {
keyType: "string",
type: "string",
id: 5
}
}
},
WriteResponse: {
fields: {
streamId: {
type: "string",
id: 1
},
streamToken: {
type: "bytes",
id: 2
},
writeResults: {
rule: "repeated",
type: "WriteResult",
id: 3
},
commitTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
ListenRequest: {
oneofs: {
targetChange: {
oneof: [
"addTarget",
"removeTarget"
]
}
},
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
addTarget: {
type: "Target",
id: 2
},
removeTarget: {
type: "int32",
id: 3
},
labels: {
keyType: "string",
type: "string",
id: 4
}
}
},
ListenResponse: {
oneofs: {
responseType: {
oneof: [
"targetChange",
"documentChange",
"documentDelete",
"documentRemove",
"filter"
]
}
},
fields: {
targetChange: {
type: "TargetChange",
id: 2
},
documentChange: {
type: "DocumentChange",
id: 3
},
documentDelete: {
type: "DocumentDelete",
id: 4
},
documentRemove: {
type: "DocumentRemove",
id: 6
},
filter: {
type: "ExistenceFilter",
id: 5
}
}
},
Target: {
oneofs: {
targetType: {
oneof: [
"query",
"documents"
]
},
resumeType: {
oneof: [
"resumeToken",
"readTime"
]
}
},
fields: {
query: {
type: "QueryTarget",
id: 2
},
documents: {
type: "DocumentsTarget",
id: 3
},
resumeToken: {
type: "bytes",
id: 4
},
readTime: {
type: "google.protobuf.Timestamp",
id: 11
},
targetId: {
type: "int32",
id: 5
},
once: {
type: "bool",
id: 6
},
expectedCount: {
type: "google.protobuf.Int32Value",
id: 12
}
},
nested: {
DocumentsTarget: {
fields: {
documents: {
rule: "repeated",
type: "string",
id: 2
}
}
},
QueryTarget: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
}
},
fields: {
parent: {
type: "string",
id: 1
},
structuredQuery: {
type: "StructuredQuery",
id: 2
}
}
}
}
},
TargetChange: {
fields: {
targetChangeType: {
type: "TargetChangeType",
id: 1
},
targetIds: {
rule: "repeated",
type: "int32",
id: 2
},
cause: {
type: "google.rpc.Status",
id: 3
},
resumeToken: {
type: "bytes",
id: 4
},
readTime: {
type: "google.protobuf.Timestamp",
id: 6
}
},
nested: {
TargetChangeType: {
values: {
NO_CHANGE: 0,
ADD: 1,
REMOVE: 2,
CURRENT: 3,
RESET: 4
}
}
}
},
ListCollectionIdsRequest: {
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
pageSize: {
type: "int32",
id: 2
},
pageToken: {
type: "string",
id: 3
}
}
},
ListCollectionIdsResponse: {
fields: {
collectionIds: {
rule: "repeated",
type: "string",
id: 1
},
nextPageToken: {
type: "string",
id: 2
}
}
},
BatchWriteRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
writes: {
rule: "repeated",
type: "Write",
id: 2
},
labels: {
keyType: "string",
type: "string",
id: 3
}
}
},
BatchWriteResponse: {
fields: {
writeResults: {
rule: "repeated",
type: "WriteResult",
id: 1
},
status: {
rule: "repeated",
type: "google.rpc.Status",
id: 2
}
}
},
StructuredQuery: {
fields: {
select: {
type: "Projection",
id: 1
},
from: {
rule: "repeated",
type: "CollectionSelector",
id: 2
},
where: {
type: "Filter",
id: 3
},
orderBy: {
rule: "repeated",
type: "Order",
id: 4
},
startAt: {
type: "Cursor",
id: 7
},
endAt: {
type: "Cursor",
id: 8
},
offset: {
type: "int32",
id: 6
},
limit: {
type: "google.protobuf.Int32Value",
id: 5
}
},
nested: {
CollectionSelector: {
fields: {
collectionId: {
type: "string",
id: 2
},
allDescendants: {
type: "bool",
id: 3
}
}
},
Filter: {
oneofs: {
filterType: {
oneof: [
"compositeFilter",
"fieldFilter",
"unaryFilter"
]
}
},
fields: {
compositeFilter: {
type: "CompositeFilter",
id: 1
},
fieldFilter: {
type: "FieldFilter",
id: 2
},
unaryFilter: {
type: "UnaryFilter",
id: 3
}
}
},
CompositeFilter: {
fields: {
op: {
type: "Operator",
id: 1
},
filters: {
rule: "repeated",
type: "Filter",
id: 2
}
},
nested: {
Operator: {
values: {
OPERATOR_UNSPECIFIED: 0,
AND: 1,
OR: 2
}
}
}
},
FieldFilter: {
fields: {
field: {
type: "FieldReference",
id: 1
},
op: {
type: "Operator",
id: 2
},
value: {
type: "Value",
id: 3
}
},
nested: {
Operator: {
values: {
OPERATOR_UNSPECIFIED: 0,
LESS_THAN: 1,
LESS_THAN_OR_EQUAL: 2,
GREATER_THAN: 3,
GREATER_THAN_OR_EQUAL: 4,
EQUAL: 5,
NOT_EQUAL: 6,
ARRAY_CONTAINS: 7,
IN: 8,
ARRAY_CONTAINS_ANY: 9,
NOT_IN: 10
}
}
}
},
UnaryFilter: {
oneofs: {
operandType: {
oneof: [
"field"
]
}
},
fields: {
op: {
type: "Operator",
id: 1
},
field: {
type: "FieldReference",
id: 2
}
},
nested: {
Operator: {
values: {
OPERATOR_UNSPECIFIED: 0,
IS_NAN: 2,
IS_NULL: 3,
IS_NOT_NAN: 4,
IS_NOT_NULL: 5
}
}
}
},
Order: {
fields: {
field: {
type: "FieldReference",
id: 1
},
direction: {
type: "Direction",
id: 2
}
}
},
FieldReference: {
fields: {
fieldPath: {
type: "string",
id: 2
}
}
},
Projection: {
fields: {
fields: {
rule: "repeated",
type: "FieldReference",
id: 2
}
}
},
Direction: {
values: {
DIRECTION_UNSPECIFIED: 0,
ASCENDING: 1,
DESCENDING: 2
}
}
}
},
StructuredAggregationQuery: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
}
},
fields: {
structuredQuery: {
type: "StructuredQuery",
id: 1
},
aggregations: {
rule: "repeated",
type: "Aggregation",
id: 3
}
},
nested: {
Aggregation: {
oneofs: {
operator: {
oneof: [
"count",
"sum",
"avg"
]
}
},
fields: {
count: {
type: "Count",
id: 1
},
sum: {
type: "Sum",
id: 2
},
avg: {
type: "Avg",
id: 3
},
alias: {
type: "string",
id: 7
}
},
nested: {
Count: {
fields: {
upTo: {
type: "google.protobuf.Int64Value",
id: 1
}
}
},
Sum: {
fields: {
field: {
type: "FieldReference",
id: 1
}
}
},
Avg: {
fields: {
field: {
type: "FieldReference",
id: 1
}
}
}
}
}
}
},
Cursor: {
fields: {
values: {
rule: "repeated",
type: "Value",
id: 1
},
before: {
type: "bool",
id: 2
}
}
},
Write: {
oneofs: {
operation: {
oneof: [
"update",
"delete",
"verify",
"transform"
]
}
},
fields: {
update: {
type: "Document",
id: 1
},
"delete": {
type: "string",
id: 2
},
verify: {
type: "string",
id: 5
},
transform: {
type: "DocumentTransform",
id: 6
},
updateMask: {
type: "DocumentMask",
id: 3
},
updateTransforms: {
rule: "repeated",
type: "DocumentTransform.FieldTransform",
id: 7
},
currentDocument: {
type: "Precondition",
id: 4
}
}
},
DocumentTransform: {
fields: {
document: {
type: "string",
id: 1
},
fieldTransforms: {
rule: "repeated",
type: "FieldTransform",
id: 2
}
},
nested: {
FieldTransform: {
oneofs: {
transformType: {
oneof: [
"setToServerValue",
"increment",
"maximum",
"minimum",
"appendMissingElements",
"removeAllFromArray"
]
}
},
fields: {
fieldPath: {
type: "string",
id: 1
},
setToServerValue: {
type: "ServerValue",
id: 2
},
increment: {
type: "Value",
id: 3
},
maximum: {
type: "Value",
id: 4
},
minimum: {
type: "Value",
id: 5
},
appendMissingElements: {
type: "ArrayValue",
id: 6
},
removeAllFromArray: {
type: "ArrayValue",
id: 7
}
},
nested: {
ServerValue: {
values: {
SERVER_VALUE_UNSPECIFIED: 0,
REQUEST_TIME: 1
}
}
}
}
}
},
WriteResult: {
fields: {
updateTime: {
type: "google.protobuf.Timestamp",
id: 1
},
transformResults: {
rule: "repeated",
type: "Value",
id: 2
}
}
},
DocumentChange: {
fields: {
document: {
type: "Document",
id: 1
},
targetIds: {
rule: "repeated",
type: "int32",
id: 5
},
removedTargetIds: {
rule: "repeated",
type: "int32",
id: 6
}
}
},
DocumentDelete: {
fields: {
document: {
type: "string",
id: 1
},
removedTargetIds: {
rule: "repeated",
type: "int32",
id: 6
},
readTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
DocumentRemove: {
fields: {
document: {
type: "string",
id: 1
},
removedTargetIds: {
rule: "repeated",
type: "int32",
id: 2
},
readTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
ExistenceFilter: {
fields: {
targetId: {
type: "int32",
id: 1
},
count: {
type: "int32",
id: 2
},
unchangedNames: {
type: "BloomFilter",
id: 3
}
}
}
}
}
}
},
api: {
options: {
go_package: "google.golang.org/genproto/googleapis/api/annotations;annotations",
java_multiple_files: true,
java_outer_classname: "HttpProto",
java_package: "com.google.api",
objc_class_prefix: "GAPI",
cc_enable_arenas: true
},
nested: {
http: {
type: "HttpRule",
id: 72295728,
extend: "google.protobuf.MethodOptions"
},
Http: {
fields: {
rules: {
rule: "repeated",
type: "HttpRule",
id: 1
}
}
},
HttpRule: {
oneofs: {
pattern: {
oneof: [
"get",
"put",
"post",
"delete",
"patch",
"custom"
]
}
},
fields: {
get: {
type: "string",
id: 2
},
put: {
type: "string",
id: 3
},
post: {
type: "string",
id: 4
},
"delete": {
type: "string",
id: 5
},
patch: {
type: "string",
id: 6
},
custom: {
type: "CustomHttpPattern",
id: 8
},
selector: {
type: "string",
id: 1
},
body: {
type: "string",
id: 7
},
additionalBindings: {
rule: "repeated",
type: "HttpRule",
id: 11
}
}
},
CustomHttpPattern: {
fields: {
kind: {
type: "string",
id: 1
},
path: {
type: "string",
id: 2
}
}
},
methodSignature: {
rule: "repeated",
type: "string",
id: 1051,
extend: "google.protobuf.MethodOptions"
},
defaultHost: {
type: "string",
id: 1049,
extend: "google.protobuf.ServiceOptions"
},
oauthScopes: {
type: "string",
id: 1050,
extend: "google.protobuf.ServiceOptions"
},
fieldBehavior: {
rule: "repeated",
type: "google.api.FieldBehavior",
id: 1052,
extend: "google.protobuf.FieldOptions"
},
FieldBehavior: {
values: {
FIELD_BEHAVIOR_UNSPECIFIED: 0,
OPTIONAL: 1,
REQUIRED: 2,
OUTPUT_ONLY: 3,
INPUT_ONLY: 4,
IMMUTABLE: 5,
UNORDERED_LIST: 6,
NON_EMPTY_DEFAULT: 7
}
}
}
},
type: {
options: {
cc_enable_arenas: true,
go_package: "google.golang.org/genproto/googleapis/type/latlng;latlng",
java_multiple_files: true,
java_outer_classname: "LatLngProto",
java_package: "com.google.type",
objc_class_prefix: "GTP"
},
nested: {
LatLng: {
fields: {
latitude: {
type: "double",
id: 1
},
longitude: {
type: "double",
id: 2
}
}
}
}
},
rpc: {
options: {
cc_enable_arenas: true,
go_package: "google.golang.org/genproto/googleapis/rpc/status;status",
java_multiple_files: true,
java_outer_classname: "StatusProto",
java_package: "com.google.rpc",
objc_class_prefix: "RPC"
},
nested: {
Status: {
fields: {
code: {
type: "int32",
id: 1
},
message: {
type: "string",
id: 2
},
details: {
rule: "repeated",
type: "google.protobuf.Any",
id: 3
}
}
}
}
}
}
}
};
var protos = {
nested: nested
};
var protos$1 = /*#__PURE__*/Object.freeze({
__proto__: null,
nested: nested,
'default': protos
});
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Used by tests so we can match @grpc/proto-loader behavior. */
const protoLoaderOptions = {
longs: String,
enums: String,
defaults: true,
oneofs: false
};
/**
* Loads the protocol buffer definitions for Firestore.
*
* @returns The GrpcObject representing our protos.
*/
function loadProtos() {
const packageDefinition = protoLoader.fromJSON(protos$1, protoLoaderOptions);
return grpc.loadPackageDefinition(packageDefinition);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Loads the GRPC stack */
function newConnection(databaseInfo) {
const protos = loadProtos();
return new GrpcConnection(protos, databaseInfo);
}
/** Return the Platform-specific connectivity monitor. */
function newConnectivityMonitor() {
return new NoopConnectivityMonitor();
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** The Platform's 'window' implementation or null if not available. */
function getWindow() {
if (process.env.USE_MOCK_PERSISTENCE === 'YES') {
// eslint-disable-next-line no-restricted-globals
return window;
}
return null;
}
/** The Platform's 'document' implementation or null if not available. */
function getDocument() {
return null;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function newSerializer(databaseId) {
return new JsonProtoSerializer(databaseId, /* useProto3Json= */ false);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$8 = 'ExponentialBackoff';
/**
* Initial backoff time in milliseconds after an error.
* Set to 1s according to https://cloud.google.com/apis/design/errors.
*/
const DEFAULT_BACKOFF_INITIAL_DELAY_MS = 1000;
const DEFAULT_BACKOFF_FACTOR = 1.5;
/** Maximum backoff time in milliseconds */
const DEFAULT_BACKOFF_MAX_DELAY_MS = 60 * 1000;
/**
* A helper for running delayed tasks following an exponential backoff curve
* between attempts.
*
* Each delay is made up of a "base" delay which follows the exponential
* backoff curve, and a +/- 50% "jitter" that is calculated and added to the
* base delay. This prevents clients from accidentally synchronizing their
* delays causing spikes of load to the backend.
*/
class ExponentialBackoff {
constructor(
/**
* The AsyncQueue to run backoff operations on.
*/
queue,
/**
* The ID to use when scheduling backoff operations on the AsyncQueue.
*/
timerId,
/**
* The initial delay (used as the base delay on the first retry attempt).
* Note that jitter will still be applied, so the actual delay could be as
* little as 0.5*initialDelayMs.
*/
initialDelayMs = DEFAULT_BACKOFF_INITIAL_DELAY_MS,
/**
* The multiplier to use to determine the extended base delay after each
* attempt.
*/
backoffFactor = DEFAULT_BACKOFF_FACTOR,
/**
* The maximum base delay after which no further backoff is performed.
* Note that jitter will still be applied, so the actual delay could be as
* much as 1.5*maxDelayMs.
*/
maxDelayMs = DEFAULT_BACKOFF_MAX_DELAY_MS) {
this.queue = queue;
this.timerId = timerId;
this.initialDelayMs = initialDelayMs;
this.backoffFactor = backoffFactor;
this.maxDelayMs = maxDelayMs;
this.currentBaseMs = 0;
this.timerPromise = null;
/** The last backoff attempt, as epoch milliseconds. */
this.lastAttemptTime = Date.now();
this.reset();
}
/**
* Resets the backoff delay.
*
* The very next backoffAndWait() will have no delay. If it is called again
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
* subsequent ones will increase according to the backoffFactor.
*/
reset() {
this.currentBaseMs = 0;
}
/**
* Resets the backoff delay to the maximum delay (e.g. for use after a
* RESOURCE_EXHAUSTED error).
*/
resetToMax() {
this.currentBaseMs = this.maxDelayMs;
}
/**
* Returns a promise that resolves after currentDelayMs, and increases the
* delay for any subsequent attempts. If there was a pending backoff operation
* already, it will be canceled.
*/
backoffAndRun(op) {
// Cancel any pending backoff operation.
this.cancel();
// First schedule using the current base (which may be 0 and should be
// honored as such).
const desiredDelayWithJitterMs = Math.floor(this.currentBaseMs + this.jitterDelayMs());
// Guard against lastAttemptTime being in the future due to a clock change.
const delaySoFarMs = Math.max(0, Date.now() - this.lastAttemptTime);
// Guard against the backoff delay already being past.
const remainingDelayMs = Math.max(0, desiredDelayWithJitterMs - delaySoFarMs);
if (remainingDelayMs > 0) {
logDebug(LOG_TAG$8, `Backing off for ${remainingDelayMs} ms ` +
`(base delay: ${this.currentBaseMs} ms, ` +
`delay with jitter: ${desiredDelayWithJitterMs} ms, ` +
`last attempt: ${delaySoFarMs} ms ago)`);
}
this.timerPromise = this.queue.enqueueAfterDelay(this.timerId, remainingDelayMs, () => {
this.lastAttemptTime = Date.now();
return op();
});
// Apply backoff factor to determine next delay and ensure it is within
// bounds.
this.currentBaseMs *= this.backoffFactor;
if (this.currentBaseMs < this.initialDelayMs) {
this.currentBaseMs = this.initialDelayMs;
}
if (this.currentBaseMs > this.maxDelayMs) {
this.currentBaseMs = this.maxDelayMs;
}
}
skipBackoff() {
if (this.timerPromise !== null) {
this.timerPromise.skipDelay();
this.timerPromise = null;
}
}
cancel() {
if (this.timerPromise !== null) {
this.timerPromise.cancel();
this.timerPromise = null;
}
}
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */
jitterDelayMs() {
return (Math.random() - 0.5) * this.currentBaseMs;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$7 = 'PersistentStream';
/** The time a stream stays open after it is marked idle. */
const IDLE_TIMEOUT_MS = 60 * 1000;
/** The time a stream stays open until we consider it healthy. */
const HEALTHY_TIMEOUT_MS = 10 * 1000;
/**
* A PersistentStream is an abstract base class that represents a streaming RPC
* to the Firestore backend. It's built on top of the connections own support
* for streaming RPCs, and adds several critical features for our clients:
*
* - Exponential backoff on failure
* - Authentication via CredentialsProvider
* - Dispatching all callbacks into the shared worker queue
* - Closing idle streams after 60 seconds of inactivity
*
* Subclasses of PersistentStream implement serialization of models to and
* from the JSON representation of the protocol buffers for a specific
* streaming RPC.
*
* ## Starting and Stopping
*
* Streaming RPCs are stateful and need to be start()ed before messages can
* be sent and received. The PersistentStream will call the onOpen() function
* of the listener once the stream is ready to accept requests.
*
* Should a start() fail, PersistentStream will call the registered onClose()
* listener with a FirestoreError indicating what went wrong.
*
* A PersistentStream can be started and stopped repeatedly.
*
* Generic types:
* SendType: The type of the outgoing message of the underlying
* connection stream
* ReceiveType: The type of the incoming message of the underlying
* connection stream
* ListenerType: The type of the listener that will be used for callbacks
*/
class PersistentStream {
constructor(queue, connectionTimerId, idleTimerId, healthTimerId, connection, authCredentialsProvider, appCheckCredentialsProvider, listener) {
this.queue = queue;
this.idleTimerId = idleTimerId;
this.healthTimerId = healthTimerId;
this.connection = connection;
this.authCredentialsProvider = authCredentialsProvider;
this.appCheckCredentialsProvider = appCheckCredentialsProvider;
this.listener = listener;
this.state = 0 /* PersistentStreamState.Initial */;
/**
* A close count that's incremented every time the stream is closed; used by
* getCloseGuardedDispatcher() to invalidate callbacks that happen after
* close.
*/
this.closeCount = 0;
this.idleTimer = null;
this.healthCheck = null;
this.stream = null;
this.backoff = new ExponentialBackoff(queue, connectionTimerId);
}
/**
* Returns true if start() has been called and no error has occurred. True
* indicates the stream is open or in the process of opening (which
* encompasses respecting backoff, getting auth tokens, and starting the
* actual RPC). Use isOpen() to determine if the stream is open and ready for
* outbound requests.
*/
isStarted() {
return (this.state === 1 /* PersistentStreamState.Starting */ ||
this.state === 5 /* PersistentStreamState.Backoff */ ||
this.isOpen());
}
/**
* Returns true if the underlying RPC is open (the onOpen() listener has been
* called) and the stream is ready for outbound requests.
*/
isOpen() {
return (this.state === 2 /* PersistentStreamState.Open */ ||
this.state === 3 /* PersistentStreamState.Healthy */);
}
/**
* Starts the RPC. Only allowed if isStarted() returns false. The stream is
* not immediately ready for use: onOpen() will be invoked when the RPC is
* ready for outbound requests, at which point isOpen() will return true.
*
* When start returns, isStarted() will return true.
*/
start() {
if (this.state === 4 /* PersistentStreamState.Error */) {
this.performBackoff();
return;
}
this.auth();
}
/**
* Stops the RPC. This call is idempotent and allowed regardless of the
* current isStarted() state.
*
* When stop returns, isStarted() and isOpen() will both return false.
*/
async stop() {
if (this.isStarted()) {
await this.close(0 /* PersistentStreamState.Initial */);
}
}
/**
* After an error the stream will usually back off on the next attempt to
* start it. If the error warrants an immediate restart of the stream, the
* sender can use this to indicate that the receiver should not back off.
*
* Each error will call the onClose() listener. That function can decide to
* inhibit backoff if required.
*/
inhibitBackoff() {
this.state = 0 /* PersistentStreamState.Initial */;
this.backoff.reset();
}
/**
* Marks this stream as idle. If no further actions are performed on the
* stream for one minute, the stream will automatically close itself and
* notify the stream's onClose() handler with Status.OK. The stream will then
* be in a !isStarted() state, requiring the caller to start the stream again
* before further use.
*
* Only streams that are in state 'Open' can be marked idle, as all other
* states imply pending network operations.
*/
markIdle() {
// Starts the idle time if we are in state 'Open' and are not yet already
// running a timer (in which case the previous idle timeout still applies).
if (this.isOpen() && this.idleTimer === null) {
this.idleTimer = this.queue.enqueueAfterDelay(this.idleTimerId, IDLE_TIMEOUT_MS, () => this.handleIdleCloseTimer());
}
}
/** Sends a message to the underlying stream. */
sendRequest(msg) {
this.cancelIdleCheck();
this.stream.send(msg);
}
/** Called by the idle timer when the stream should close due to inactivity. */
async handleIdleCloseTimer() {
if (this.isOpen()) {
// When timing out an idle stream there's no reason to force the stream into backoff when
// it restarts so set the stream state to Initial instead of Error.
return this.close(0 /* PersistentStreamState.Initial */);
}
}
/** Marks the stream as active again. */
cancelIdleCheck() {
if (this.idleTimer) {
this.idleTimer.cancel();
this.idleTimer = null;
}
}
/** Cancels the health check delayed operation. */
cancelHealthCheck() {
if (this.healthCheck) {
this.healthCheck.cancel();
this.healthCheck = null;
}
}
/**
* Closes the stream and cleans up as necessary:
*
* * closes the underlying GRPC stream;
* * calls the onClose handler with the given 'error';
* * sets internal stream state to 'finalState';
* * adjusts the backoff timer based on the error
*
* A new stream can be opened by calling start().
*
* @param finalState - the intended state of the stream after closing.
* @param error - the error the connection was closed with.
*/
async close(finalState, error) {
// Cancel any outstanding timers (they're guaranteed not to execute).
this.cancelIdleCheck();
this.cancelHealthCheck();
this.backoff.cancel();
// Invalidates any stream-related callbacks (e.g. from auth or the
// underlying stream), guaranteeing they won't execute.
this.closeCount++;
if (finalState !== 4 /* PersistentStreamState.Error */) {
// If this is an intentional close ensure we don't delay our next connection attempt.
this.backoff.reset();
}
else if (error && error.code === Code.RESOURCE_EXHAUSTED) {
// Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
logError(error.toString());
logError('Using maximum backoff delay to prevent overloading the backend.');
this.backoff.resetToMax();
}
else if (error &&
error.code === Code.UNAUTHENTICATED &&
this.state !== 3 /* PersistentStreamState.Healthy */) {
// "unauthenticated" error means the token was rejected. This should rarely
// happen since both Auth and AppCheck ensure a sufficient TTL when we
// request a token. If a user manually resets their system clock this can
// fail, however. In this case, we should get a Code.UNAUTHENTICATED error
// before we received the first message and we need to invalidate the token
// to ensure that we fetch a new token.
this.authCredentialsProvider.invalidateToken();
this.appCheckCredentialsProvider.invalidateToken();
}
// Clean up the underlying stream because we are no longer interested in events.
if (this.stream !== null) {
this.tearDown();
this.stream.close();
this.stream = null;
}
// This state must be assigned before calling onClose() to allow the callback to
// inhibit backoff or otherwise manipulate the state in its non-started state.
this.state = finalState;
// Notify the listener that the stream closed.
await this.listener.onClose(error);
}
/**
* Can be overridden to perform additional cleanup before the stream is closed.
* Calling super.tearDown() is not required.
*/
tearDown() { }
auth() {
this.state = 1 /* PersistentStreamState.Starting */;
const dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
// TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
const closeCount = this.closeCount;
Promise.all([
this.authCredentialsProvider.getToken(),
this.appCheckCredentialsProvider.getToken()
]).then(([authToken, appCheckToken]) => {
// Stream can be stopped while waiting for authentication.
// TODO(mikelehen): We really should just use dispatchIfNotClosed
// and let this dispatch onto the queue, but that opened a spec test can
// of worms that I don't want to deal with in this PR.
if (this.closeCount === closeCount) {
// Normally we'd have to schedule the callback on the AsyncQueue.
// However, the following calls are safe to be called outside the
// AsyncQueue since they don't chain asynchronous calls
this.startStream(authToken, appCheckToken);
}
}, (error) => {
dispatchIfNotClosed(() => {
const rpcError = new FirestoreError(Code.UNKNOWN, 'Fetching auth token failed: ' + error.message);
return this.handleStreamClose(rpcError);
});
});
}
startStream(authToken, appCheckToken) {
const dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
this.stream = this.startRpc(authToken, appCheckToken);
this.stream.onOpen(() => {
dispatchIfNotClosed(() => {
this.state = 2 /* PersistentStreamState.Open */;
this.healthCheck = this.queue.enqueueAfterDelay(this.healthTimerId, HEALTHY_TIMEOUT_MS, () => {
if (this.isOpen()) {
this.state = 3 /* PersistentStreamState.Healthy */;
}
return Promise.resolve();
});
return this.listener.onOpen();
});
});
this.stream.onClose((error) => {
dispatchIfNotClosed(() => {
return this.handleStreamClose(error);
});
});
this.stream.onMessage((msg) => {
dispatchIfNotClosed(() => {
return this.onMessage(msg);
});
});
}
performBackoff() {
this.state = 5 /* PersistentStreamState.Backoff */;
this.backoff.backoffAndRun(async () => {
this.state = 0 /* PersistentStreamState.Initial */;
this.start();
});
}
// Visible for tests
handleStreamClose(error) {
logDebug(LOG_TAG$7, `close with error: ${error}`);
this.stream = null;
// In theory the stream could close cleanly, however, in our current model
// we never expect this to happen because if we stop a stream ourselves,
// this callback will never be called. To prevent cases where we retry
// without a backoff accidentally, we set the stream to error in all cases.
return this.close(4 /* PersistentStreamState.Error */, error);
}
/**
* Returns a "dispatcher" function that dispatches operations onto the
* AsyncQueue but only runs them if closeCount remains unchanged. This allows
* us to turn auth / stream callbacks into no-ops if the stream is closed /
* re-opened, etc.
*/
getCloseGuardedDispatcher(startCloseCount) {
return (fn) => {
this.queue.enqueueAndForget(() => {
if (this.closeCount === startCloseCount) {
return fn();
}
else {
logDebug(LOG_TAG$7, 'stream callback skipped by getCloseGuardedDispatcher.');
return Promise.resolve();
}
});
};
}
}
/**
* A PersistentStream that implements the Listen RPC.
*
* Once the Listen stream has called the onOpen() listener, any number of
* listen() and unlisten() calls can be made to control what changes will be
* sent from the server for ListenResponses.
*/
class PersistentListenStream extends PersistentStream {
constructor(queue, connection, authCredentials, appCheckCredentials, serializer, listener) {
super(queue, "listen_stream_connection_backoff" /* TimerId.ListenStreamConnectionBackoff */, "listen_stream_idle" /* TimerId.ListenStreamIdle */, "health_check_timeout" /* TimerId.HealthCheckTimeout */, connection, authCredentials, appCheckCredentials, listener);
this.serializer = serializer;
}
startRpc(authToken, appCheckToken) {
return this.connection.openStream('Listen', authToken, appCheckToken);
}
onMessage(watchChangeProto) {
// A successful response means the stream is healthy
this.backoff.reset();
const watchChange = fromWatchChange(this.serializer, watchChangeProto);
const snapshot = versionFromListenResponse(watchChangeProto);
return this.listener.onWatchChange(watchChange, snapshot);
}
/**
* Registers interest in the results of the given target. If the target
* includes a resumeToken it will be included in the request. Results that
* affect the target will be streamed back as WatchChange messages that
* reference the targetId.
*/
watch(targetData) {
const request = {};
request.database = getEncodedDatabaseId(this.serializer);
request.addTarget = toTarget(this.serializer, targetData);
const labels = toListenRequestLabels(this.serializer, targetData);
if (labels) {
request.labels = labels;
}
this.sendRequest(request);
}
/**
* Unregisters interest in the results of the target associated with the
* given targetId.
*/
unwatch(targetId) {
const request = {};
request.database = getEncodedDatabaseId(this.serializer);
request.removeTarget = targetId;
this.sendRequest(request);
}
}
/**
* A Stream that implements the Write RPC.
*
* The Write RPC requires the caller to maintain special streamToken
* state in between calls, to help the server understand which responses the
* client has processed by the time the next request is made. Every response
* will contain a streamToken; this value must be passed to the next
* request.
*
* After calling start() on this stream, the next request must be a handshake,
* containing whatever streamToken is on hand. Once a response to this
* request is received, all pending mutations may be submitted. When
* submitting multiple batches of mutations at the same time, it's
* okay to use the same streamToken for the calls to writeMutations.
*
* TODO(b/33271235): Use proto types
*/
class PersistentWriteStream extends PersistentStream {
constructor(queue, connection, authCredentials, appCheckCredentials, serializer, listener) {
super(queue, "write_stream_connection_backoff" /* TimerId.WriteStreamConnectionBackoff */, "write_stream_idle" /* TimerId.WriteStreamIdle */, "health_check_timeout" /* TimerId.HealthCheckTimeout */, connection, authCredentials, appCheckCredentials, listener);
this.serializer = serializer;
this.handshakeComplete_ = false;
}
/**
* Tracks whether or not a handshake has been successfully exchanged and
* the stream is ready to accept mutations.
*/
get handshakeComplete() {
return this.handshakeComplete_;
}
// Override of PersistentStream.start
start() {
this.handshakeComplete_ = false;
this.lastStreamToken = undefined;
super.start();
}
tearDown() {
if (this.handshakeComplete_) {
this.writeMutations([]);
}
}
startRpc(authToken, appCheckToken) {
return this.connection.openStream('Write', authToken, appCheckToken);
}
onMessage(responseProto) {
// Always capture the last stream token.
hardAssert(!!responseProto.streamToken);
this.lastStreamToken = responseProto.streamToken;
if (!this.handshakeComplete_) {
// The first response is always the handshake response
hardAssert(!responseProto.writeResults || responseProto.writeResults.length === 0);
this.handshakeComplete_ = true;
return this.listener.onHandshakeComplete();
}
else {
// A successful first write response means the stream is healthy,
// Note, that we could consider a successful handshake healthy, however,
// the write itself might be causing an error we want to back off from.
this.backoff.reset();
const results = fromWriteResults(responseProto.writeResults, responseProto.commitTime);
const commitVersion = fromVersion(responseProto.commitTime);
return this.listener.onMutationResult(commitVersion, results);
}
}
/**
* Sends an initial streamToken to the server, performing the handshake
* required to make the StreamingWrite RPC work. Subsequent
* calls should wait until onHandshakeComplete was called.
*/
writeHandshake() {
// TODO(dimond): Support stream resumption. We intentionally do not set the
// stream token on the handshake, ignoring any stream token we might have.
const request = {};
request.database = getEncodedDatabaseId(this.serializer);
this.sendRequest(request);
}
/** Sends a group of mutations to the Firestore backend to apply. */
writeMutations(mutations) {
const request = {
streamToken: this.lastStreamToken,
writes: mutations.map(mutation => toMutation(this.serializer, mutation))
};
this.sendRequest(request);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Datastore and its related methods are a wrapper around the external Google
* Cloud Datastore grpc API, which provides an interface that is more convenient
* for the rest of the client SDK architecture to consume.
*/
class Datastore {
}
/**
* An implementation of Datastore that exposes additional state for internal
* consumption.
*/
class DatastoreImpl extends Datastore {
constructor(authCredentials, appCheckCredentials, connection, serializer) {
super();
this.authCredentials = authCredentials;
this.appCheckCredentials = appCheckCredentials;
this.connection = connection;
this.serializer = serializer;
this.terminated = false;
}
verifyInitialized() {
if (this.terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
}
}
/** Invokes the provided RPC with auth and AppCheck tokens. */
invokeRPC(rpcName, databaseId, resourcePath, request) {
this.verifyInitialized();
return Promise.all([
this.authCredentials.getToken(),
this.appCheckCredentials.getToken()
])
.then(([authToken, appCheckToken]) => {
return this.connection.invokeRPC(rpcName, toResourcePath(databaseId, resourcePath), request, authToken, appCheckToken);
})
.catch((error) => {
if (error.name === 'FirebaseError') {
if (error.code === Code.UNAUTHENTICATED) {
this.authCredentials.invalidateToken();
this.appCheckCredentials.invalidateToken();
}
throw error;
}
else {
throw new FirestoreError(Code.UNKNOWN, error.toString());
}
});
}
/** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */
invokeStreamingRPC(rpcName, databaseId, resourcePath, request, expectedResponseCount) {
this.verifyInitialized();
return Promise.all([
this.authCredentials.getToken(),
this.appCheckCredentials.getToken()
])
.then(([authToken, appCheckToken]) => {
return this.connection.invokeStreamingRPC(rpcName, toResourcePath(databaseId, resourcePath), request, authToken, appCheckToken, expectedResponseCount);
})
.catch((error) => {
if (error.name === 'FirebaseError') {
if (error.code === Code.UNAUTHENTICATED) {
this.authCredentials.invalidateToken();
this.appCheckCredentials.invalidateToken();
}
throw error;
}
else {
throw new FirestoreError(Code.UNKNOWN, error.toString());
}
});
}
terminate() {
this.terminated = true;
this.connection.terminate();
}
}
// TODO(firestorexp): Make sure there is only one Datastore instance per
// firestore-exp client.
function newDatastore(authCredentials, appCheckCredentials, connection, serializer) {
return new DatastoreImpl(authCredentials, appCheckCredentials, connection, serializer);
}
async function invokeCommitRpc(datastore, mutations) {
const datastoreImpl = debugCast(datastore);
const request = {
writes: mutations.map(m => toMutation(datastoreImpl.serializer, m))
};
await datastoreImpl.invokeRPC('Commit', datastoreImpl.serializer.databaseId, ResourcePath.emptyPath(), request);
}
async function invokeBatchGetDocumentsRpc(datastore, keys) {
const datastoreImpl = debugCast(datastore);
const request = {
documents: keys.map(k => toName(datastoreImpl.serializer, k))
};
const response = await datastoreImpl.invokeStreamingRPC('BatchGetDocuments', datastoreImpl.serializer.databaseId, ResourcePath.emptyPath(), request, keys.length);
const docs = new Map();
response.forEach(proto => {
const doc = fromBatchGetDocumentsResponse(datastoreImpl.serializer, proto);
docs.set(doc.key.toString(), doc);
});
const result = [];
keys.forEach(key => {
const doc = docs.get(key.toString());
hardAssert(!!doc);
result.push(doc);
});
return result;
}
async function invokeRunAggregationQueryRpc(datastore, query, aggregates) {
var _a;
const datastoreImpl = debugCast(datastore);
const { request, aliasMap, parent } = toRunAggregationQueryRequest(datastoreImpl.serializer, queryToAggregateTarget(query), aggregates);
if (!datastoreImpl.connection.shouldResourcePathBeIncludedInRequest) {
delete request.parent;
}
const response = await datastoreImpl.invokeStreamingRPC('RunAggregationQuery', datastoreImpl.serializer.databaseId, parent, request,
/*expectedResponseCount=*/ 1);
// Omit RunAggregationQueryResponse that only contain readTimes.
const filteredResult = response.filter(proto => !!proto.result);
hardAssert(filteredResult.length === 1);
// Remap the short-form aliases that were sent to the server
// to the client-side aliases. Users will access the results
// using the client-side alias.
const unmappedAggregateFields = (_a = filteredResult[0].result) === null || _a === void 0 ? void 0 : _a.aggregateFields;
const remappedFields = Object.keys(unmappedAggregateFields).reduce((accumulator, key) => {
accumulator[aliasMap[key]] = unmappedAggregateFields[key];
return accumulator;
}, {});
return remappedFields;
}
function newPersistentWriteStream(datastore, queue, listener) {
const datastoreImpl = debugCast(datastore);
datastoreImpl.verifyInitialized();
return new PersistentWriteStream(queue, datastoreImpl.connection, datastoreImpl.authCredentials, datastoreImpl.appCheckCredentials, datastoreImpl.serializer, listener);
}
function newPersistentWatchStream(datastore, queue, listener) {
const datastoreImpl = debugCast(datastore);
datastoreImpl.verifyInitialized();
return new PersistentListenStream(queue, datastoreImpl.connection, datastoreImpl.authCredentials, datastoreImpl.appCheckCredentials, datastoreImpl.serializer, listener);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$6 = 'OnlineStateTracker';
// To deal with transient failures, we allow multiple stream attempts before
// giving up and transitioning from OnlineState.Unknown to Offline.
// TODO(mikelehen): This used to be set to 2 as a mitigation for b/66228394.
// @jdimond thinks that bug is sufficiently fixed so that we can set this back
// to 1. If that works okay, we could potentially remove this logic entirely.
const MAX_WATCH_STREAM_FAILURES = 1;
// To deal with stream attempts that don't succeed or fail in a timely manner,
// we have a timeout for OnlineState to reach Online or Offline.
// If the timeout is reached, we transition to Offline rather than waiting
// indefinitely.
const ONLINE_STATE_TIMEOUT_MS = 10 * 1000;
/**
* A component used by the RemoteStore to track the OnlineState (that is,
* whether or not the client as a whole should be considered to be online or
* offline), implementing the appropriate heuristics.
*
* In particular, when the client is trying to connect to the backend, we
* allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
* a connection to succeed. If we have too many failures or the timeout elapses,
* then we set the OnlineState to Offline, and the client will behave as if
* it is offline (get()s will return cached data, etc.).
*/
class OnlineStateTracker {
constructor(asyncQueue, onlineStateHandler) {
this.asyncQueue = asyncQueue;
this.onlineStateHandler = onlineStateHandler;
/** The current OnlineState. */
this.state = "Unknown" /* OnlineState.Unknown */;
/**
* A count of consecutive failures to open the stream. If it reaches the
* maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
* Offline.
*/
this.watchStreamFailures = 0;
/**
* A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
* transition from OnlineState.Unknown to OnlineState.Offline without waiting
* for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
*/
this.onlineStateTimer = null;
/**
* Whether the client should log a warning message if it fails to connect to
* the backend (initially true, cleared after a successful stream, or if we've
* logged the message already).
*/
this.shouldWarnClientIsOffline = true;
}
/**
* Called by RemoteStore when a watch stream is started (including on each
* backoff attempt).
*
* If this is the first attempt, it sets the OnlineState to Unknown and starts
* the onlineStateTimer.
*/
handleWatchStreamStart() {
if (this.watchStreamFailures === 0) {
this.setAndBroadcast("Unknown" /* OnlineState.Unknown */);
this.onlineStateTimer = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */, ONLINE_STATE_TIMEOUT_MS, () => {
this.onlineStateTimer = null;
this.logClientOfflineWarningIfNecessary(`Backend didn't respond within ${ONLINE_STATE_TIMEOUT_MS / 1000} ` +
`seconds.`);
this.setAndBroadcast("Offline" /* OnlineState.Offline */);
// NOTE: handleWatchStreamFailure() will continue to increment
// watchStreamFailures even though we are already marked Offline,
// but this is non-harmful.
return Promise.resolve();
});
}
}
/**
* Updates our OnlineState as appropriate after the watch stream reports a
* failure. The first failure moves us to the 'Unknown' state. We then may
* allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
* actually transition to the 'Offline' state.
*/
handleWatchStreamFailure(error) {
if (this.state === "Online" /* OnlineState.Online */) {
this.setAndBroadcast("Unknown" /* OnlineState.Unknown */);
}
else {
this.watchStreamFailures++;
if (this.watchStreamFailures >= MAX_WATCH_STREAM_FAILURES) {
this.clearOnlineStateTimer();
this.logClientOfflineWarningIfNecessary(`Connection failed ${MAX_WATCH_STREAM_FAILURES} ` +
`times. Most recent error: ${error.toString()}`);
this.setAndBroadcast("Offline" /* OnlineState.Offline */);
}
}
}
/**
* Explicitly sets the OnlineState to the specified state.
*
* Note that this resets our timers / failure counters, etc. used by our
* Offline heuristics, so must not be used in place of
* handleWatchStreamStart() and handleWatchStreamFailure().
*/
set(newState) {
this.clearOnlineStateTimer();
this.watchStreamFailures = 0;
if (newState === "Online" /* OnlineState.Online */) {
// We've connected to watch at least once. Don't warn the developer
// about being offline going forward.
this.shouldWarnClientIsOffline = false;
}
this.setAndBroadcast(newState);
}
setAndBroadcast(newState) {
if (newState !== this.state) {
this.state = newState;
this.onlineStateHandler(newState);
}
}
logClientOfflineWarningIfNecessary(details) {
const message = `Could not reach Cloud Firestore backend. ${details}\n` +
`This typically indicates that your device does not have a healthy ` +
`Internet connection at the moment. The client will operate in offline ` +
`mode until it is able to successfully connect to the backend.`;
if (this.shouldWarnClientIsOffline) {
logError(message);
this.shouldWarnClientIsOffline = false;
}
else {
logDebug(LOG_TAG$6, message);
}
}
clearOnlineStateTimer() {
if (this.onlineStateTimer !== null) {
this.onlineStateTimer.cancel();
this.onlineStateTimer = null;
}
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$5 = 'RemoteStore';
// TODO(b/35853402): Negotiate this with the stream.
const MAX_PENDING_WRITES = 10;
class RemoteStoreImpl {
constructor(
/**
* The local store, used to fill the write pipeline with outbound mutations.
*/
localStore,
/** The client-side proxy for interacting with the backend. */
datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
this.localStore = localStore;
this.datastore = datastore;
this.asyncQueue = asyncQueue;
this.remoteSyncer = {};
/**
* A list of up to MAX_PENDING_WRITES writes that we have fetched from the
* LocalStore via fillWritePipeline() and have or will send to the write
* stream.
*
* Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
* restart the write stream. When the stream is established the writes in the
* pipeline will be sent in order.
*
* Writes remain in writePipeline until they are acknowledged by the backend
* and thus will automatically be re-sent if the stream is interrupted /
* restarted before they're acknowledged.
*
* Write responses from the backend are linked to their originating request
* purely based on order, and so we can just shift() writes from the front of
* the writePipeline as we receive responses.
*/
this.writePipeline = [];
/**
* A mapping of watched targets that the client cares about tracking and the
* user has explicitly called a 'listen' for this target.
*
* These targets may or may not have been sent to or acknowledged by the
* server. On re-establishing the listen stream, these targets should be sent
* to the server. The targets removed with unlistens are removed eagerly
* without waiting for confirmation from the listen stream.
*/
this.listenTargets = new Map();
/**
* A set of reasons for why the RemoteStore may be offline. If empty, the
* RemoteStore may start its network connections.
*/
this.offlineCauses = new Set();
/**
* Event handlers that get called when the network is disabled or enabled.
*
* PORTING NOTE: These functions are used on the Web client to create the
* underlying streams (to support tree-shakeable streams). On Android and iOS,
* the streams are created during construction of RemoteStore.
*/
this.onNetworkStatusChange = [];
this.connectivityMonitor = connectivityMonitor;
this.connectivityMonitor.addCallback((_) => {
asyncQueue.enqueueAndForget(async () => {
// Porting Note: Unlike iOS, `restartNetwork()` is called even when the
// network becomes unreachable as we don't have any other way to tear
// down our streams.
if (canUseNetwork(this)) {
logDebug(LOG_TAG$5, 'Restarting streams for network reachability change.');
await restartNetwork(this);
}
});
});
this.onlineStateTracker = new OnlineStateTracker(asyncQueue, onlineStateHandler);
}
}
function newRemoteStore(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
return new RemoteStoreImpl(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor);
}
/** Re-enables the network. Idempotent. */
function remoteStoreEnableNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.delete(0 /* OfflineCause.UserDisabled */);
return enableNetworkInternal(remoteStoreImpl);
}
async function enableNetworkInternal(remoteStoreImpl) {
if (canUseNetwork(remoteStoreImpl)) {
for (const networkStatusHandler of remoteStoreImpl.onNetworkStatusChange) {
await networkStatusHandler(/* enabled= */ true);
}
}
}
/**
* Temporarily disables the network. The network can be re-enabled using
* enableNetwork().
*/
async function remoteStoreDisableNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.add(0 /* OfflineCause.UserDisabled */);
await disableNetworkInternal(remoteStoreImpl);
// Set the OnlineState to Offline so get()s return from cache, etc.
remoteStoreImpl.onlineStateTracker.set("Offline" /* OnlineState.Offline */);
}
async function disableNetworkInternal(remoteStoreImpl) {
for (const networkStatusHandler of remoteStoreImpl.onNetworkStatusChange) {
await networkStatusHandler(/* enabled= */ false);
}
}
async function remoteStoreShutdown(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
logDebug(LOG_TAG$5, 'RemoteStore shutting down.');
remoteStoreImpl.offlineCauses.add(5 /* OfflineCause.Shutdown */);
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.connectivityMonitor.shutdown();
// Set the OnlineState to Unknown (rather than Offline) to avoid potentially
// triggering spurious listener events with cached data, etc.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
/**
* Starts new listen for the given target. Uses resume token if provided. It
* is a no-op if the target of given `TargetData` is already being listened to.
*/
function remoteStoreListen(remoteStore, targetData) {
const remoteStoreImpl = debugCast(remoteStore);
if (remoteStoreImpl.listenTargets.has(targetData.targetId)) {
return;
}
// Mark this as something the client is currently listening for.
remoteStoreImpl.listenTargets.set(targetData.targetId, targetData);
if (shouldStartWatchStream(remoteStoreImpl)) {
// The listen will be sent in onWatchStreamOpen
startWatchStream(remoteStoreImpl);
}
else if (ensureWatchStream(remoteStoreImpl).isOpen()) {
sendWatchRequest(remoteStoreImpl, targetData);
}
}
/**
* Removes the listen from server. It is a no-op if the given target id is
* not being listened to.
*/
function remoteStoreUnlisten(remoteStore, targetId) {
const remoteStoreImpl = debugCast(remoteStore);
const watchStream = ensureWatchStream(remoteStoreImpl);
remoteStoreImpl.listenTargets.delete(targetId);
if (watchStream.isOpen()) {
sendUnwatchRequest(remoteStoreImpl, targetId);
}
if (remoteStoreImpl.listenTargets.size === 0) {
if (watchStream.isOpen()) {
watchStream.markIdle();
}
else if (canUseNetwork(remoteStoreImpl)) {
// Revert to OnlineState.Unknown if the watch stream is not open and we
// have no listeners, since without any listens to send we cannot
// confirm if the stream is healthy and upgrade to OnlineState.Online.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
}
/**
* We need to increment the the expected number of pending responses we're due
* from watch so we wait for the ack to process any messages from this target.
*/
function sendWatchRequest(remoteStoreImpl, targetData) {
remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetData.targetId);
if (targetData.resumeToken.approximateByteSize() > 0 ||
targetData.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
const expectedCount = remoteStoreImpl.remoteSyncer.getRemoteKeysForTarget(targetData.targetId).size;
targetData = targetData.withExpectedCount(expectedCount);
}
ensureWatchStream(remoteStoreImpl).watch(targetData);
}
/**
* We need to increment the expected number of pending responses we're due
* from watch so we wait for the removal on the server before we process any
* messages from this target.
*/
function sendUnwatchRequest(remoteStoreImpl, targetId) {
remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetId);
ensureWatchStream(remoteStoreImpl).unwatch(targetId);
}
function startWatchStream(remoteStoreImpl) {
remoteStoreImpl.watchChangeAggregator = new WatchChangeAggregator({
getRemoteKeysForTarget: targetId => remoteStoreImpl.remoteSyncer.getRemoteKeysForTarget(targetId),
getTargetDataForTarget: targetId => remoteStoreImpl.listenTargets.get(targetId) || null,
getDatabaseId: () => remoteStoreImpl.datastore.serializer.databaseId
});
ensureWatchStream(remoteStoreImpl).start();
remoteStoreImpl.onlineStateTracker.handleWatchStreamStart();
}
/**
* Returns whether the watch stream should be started because it's necessary
* and has not yet been started.
*/
function shouldStartWatchStream(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
!ensureWatchStream(remoteStoreImpl).isStarted() &&
remoteStoreImpl.listenTargets.size > 0);
}
function canUseNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
return remoteStoreImpl.offlineCauses.size === 0;
}
function cleanUpWatchStreamState(remoteStoreImpl) {
remoteStoreImpl.watchChangeAggregator = undefined;
}
async function onWatchStreamOpen(remoteStoreImpl) {
remoteStoreImpl.listenTargets.forEach((targetData, targetId) => {
sendWatchRequest(remoteStoreImpl, targetData);
});
}
async function onWatchStreamClose(remoteStoreImpl, error) {
cleanUpWatchStreamState(remoteStoreImpl);
// If we still need the watch stream, retry the connection.
if (shouldStartWatchStream(remoteStoreImpl)) {
remoteStoreImpl.onlineStateTracker.handleWatchStreamFailure(error);
startWatchStream(remoteStoreImpl);
}
else {
// No need to restart watch stream because there are no active targets.
// The online state is set to unknown because there is no active attempt
// at establishing a connection
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
async function onWatchStreamChange(remoteStoreImpl, watchChange, snapshotVersion) {
// Mark the client as online since we got a message from the server
remoteStoreImpl.onlineStateTracker.set("Online" /* OnlineState.Online */);
if (watchChange instanceof WatchTargetChange &&
watchChange.state === 2 /* WatchTargetChangeState.Removed */ &&
watchChange.cause) {
// There was an error on a target, don't wait for a consistent snapshot
// to raise events
try {
await handleTargetError(remoteStoreImpl, watchChange);
}
catch (e) {
logDebug(LOG_TAG$5, 'Failed to remove targets %s: %s ', watchChange.targetIds.join(','), e);
await disableNetworkUntilRecovery(remoteStoreImpl, e);
}
return;
}
if (watchChange instanceof DocumentWatchChange) {
remoteStoreImpl.watchChangeAggregator.handleDocumentChange(watchChange);
}
else if (watchChange instanceof ExistenceFilterChange) {
remoteStoreImpl.watchChangeAggregator.handleExistenceFilter(watchChange);
}
else {
remoteStoreImpl.watchChangeAggregator.handleTargetChange(watchChange);
}
if (!snapshotVersion.isEqual(SnapshotVersion.min())) {
try {
const lastRemoteSnapshotVersion = await localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore);
if (snapshotVersion.compareTo(lastRemoteSnapshotVersion) >= 0) {
// We have received a target change with a global snapshot if the snapshot
// version is not equal to SnapshotVersion.min().
await raiseWatchSnapshot(remoteStoreImpl, snapshotVersion);
}
}
catch (e) {
logDebug(LOG_TAG$5, 'Failed to raise snapshot:', e);
await disableNetworkUntilRecovery(remoteStoreImpl, e);
}
}
}
/**
* Recovery logic for IndexedDB errors that takes the network offline until
* `op` succeeds. Retries are scheduled with backoff using
* `enqueueRetryable()`. If `op()` is not provided, IndexedDB access is
* validated via a generic operation.
*
* The returned Promise is resolved once the network is disabled and before
* any retry attempt.
*/
async function disableNetworkUntilRecovery(remoteStoreImpl, e, op) {
if (isIndexedDbTransactionError(e)) {
remoteStoreImpl.offlineCauses.add(1 /* OfflineCause.IndexedDbFailed */);
// Disable network and raise offline snapshots
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.onlineStateTracker.set("Offline" /* OnlineState.Offline */);
if (!op) {
// Use a simple read operation to determine if IndexedDB recovered.
// Ideally, we would expose a health check directly on SimpleDb, but
// RemoteStore only has access to persistence through LocalStore.
op = () => localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore);
}
// Probe IndexedDB periodically and re-enable network
remoteStoreImpl.asyncQueue.enqueueRetryable(async () => {
logDebug(LOG_TAG$5, 'Retrying IndexedDB access');
await op();
remoteStoreImpl.offlineCauses.delete(1 /* OfflineCause.IndexedDbFailed */);
await enableNetworkInternal(remoteStoreImpl);
});
}
else {
throw e;
}
}
/**
* Executes `op`. If `op` fails, takes the network offline until `op`
* succeeds. Returns after the first attempt.
*/
function executeWithRecovery(remoteStoreImpl, op) {
return op().catch(e => disableNetworkUntilRecovery(remoteStoreImpl, e, op));
}
/**
* Takes a batch of changes from the Datastore, repackages them as a
* RemoteEvent, and passes that on to the listener, which is typically the
* SyncEngine.
*/
function raiseWatchSnapshot(remoteStoreImpl, snapshotVersion) {
const remoteEvent = remoteStoreImpl.watchChangeAggregator.createRemoteEvent(snapshotVersion);
// Update in-memory resume tokens. LocalStore will update the
// persistent view of these when applying the completed RemoteEvent.
remoteEvent.targetChanges.forEach((change, targetId) => {
if (change.resumeToken.approximateByteSize() > 0) {
const targetData = remoteStoreImpl.listenTargets.get(targetId);
// A watched target might have been removed already.
if (targetData) {
remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(change.resumeToken, snapshotVersion));
}
}
});
// Re-establish listens for the targets that have been invalidated by
// existence filter mismatches.
remoteEvent.targetMismatches.forEach((targetId, targetPurpose) => {
const targetData = remoteStoreImpl.listenTargets.get(targetId);
if (!targetData) {
// A watched target might have been removed already.
return;
}
// Clear the resume token for the target, since we're in a known mismatch
// state.
remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(ByteString.EMPTY_BYTE_STRING, targetData.snapshotVersion));
// Cause a hard reset by unwatching and rewatching immediately, but
// deliberately don't send a resume token so that we get a full update.
sendUnwatchRequest(remoteStoreImpl, targetId);
// Mark the target we send as being on behalf of an existence filter
// mismatch, but don't actually retain that in listenTargets. This ensures
// that we flag the first re-listen this way without impacting future
// listens of this target (that might happen e.g. on reconnect).
const requestTargetData = new TargetData(targetData.target, targetId, targetPurpose, targetData.sequenceNumber);
sendWatchRequest(remoteStoreImpl, requestTargetData);
});
return remoteStoreImpl.remoteSyncer.applyRemoteEvent(remoteEvent);
}
/** Handles an error on a target */
async function handleTargetError(remoteStoreImpl, watchChange) {
const error = watchChange.cause;
for (const targetId of watchChange.targetIds) {
// A watched target might have been removed already.
if (remoteStoreImpl.listenTargets.has(targetId)) {
await remoteStoreImpl.remoteSyncer.rejectListen(targetId, error);
remoteStoreImpl.listenTargets.delete(targetId);
remoteStoreImpl.watchChangeAggregator.removeTarget(targetId);
}
}
}
/**
* Attempts to fill our write pipeline with writes from the LocalStore.
*
* Called internally to bootstrap or refill the write pipeline and by
* SyncEngine whenever there are new mutations to process.
*
* Starts the write stream if necessary.
*/
async function fillWritePipeline(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
const writeStream = ensureWriteStream(remoteStoreImpl);
let lastBatchIdRetrieved = remoteStoreImpl.writePipeline.length > 0
? remoteStoreImpl.writePipeline[remoteStoreImpl.writePipeline.length - 1]
.batchId
: BATCHID_UNKNOWN;
while (canAddToWritePipeline(remoteStoreImpl)) {
try {
const batch = await localStoreGetNextMutationBatch(remoteStoreImpl.localStore, lastBatchIdRetrieved);
if (batch === null) {
if (remoteStoreImpl.writePipeline.length === 0) {
writeStream.markIdle();
}
break;
}
else {
lastBatchIdRetrieved = batch.batchId;
addToWritePipeline(remoteStoreImpl, batch);
}
}
catch (e) {
await disableNetworkUntilRecovery(remoteStoreImpl, e);
}
}
if (shouldStartWriteStream(remoteStoreImpl)) {
startWriteStream(remoteStoreImpl);
}
}
/**
* Returns true if we can add to the write pipeline (i.e. the network is
* enabled and the write pipeline is not full).
*/
function canAddToWritePipeline(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
remoteStoreImpl.writePipeline.length < MAX_PENDING_WRITES);
}
/**
* Queues additional writes to be sent to the write stream, sending them
* immediately if the write stream is established.
*/
function addToWritePipeline(remoteStoreImpl, batch) {
remoteStoreImpl.writePipeline.push(batch);
const writeStream = ensureWriteStream(remoteStoreImpl);
if (writeStream.isOpen() && writeStream.handshakeComplete) {
writeStream.writeMutations(batch.mutations);
}
}
function shouldStartWriteStream(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
!ensureWriteStream(remoteStoreImpl).isStarted() &&
remoteStoreImpl.writePipeline.length > 0);
}
function startWriteStream(remoteStoreImpl) {
ensureWriteStream(remoteStoreImpl).start();
}
async function onWriteStreamOpen(remoteStoreImpl) {
ensureWriteStream(remoteStoreImpl).writeHandshake();
}
async function onWriteHandshakeComplete(remoteStoreImpl) {
const writeStream = ensureWriteStream(remoteStoreImpl);
// Send the write pipeline now that the stream is established.
for (const batch of remoteStoreImpl.writePipeline) {
writeStream.writeMutations(batch.mutations);
}
}
async function onMutationResult(remoteStoreImpl, commitVersion, results) {
const batch = remoteStoreImpl.writePipeline.shift();
const success = MutationBatchResult.from(batch, commitVersion, results);
await executeWithRecovery(remoteStoreImpl, () => remoteStoreImpl.remoteSyncer.applySuccessfulWrite(success));
// It's possible that with the completion of this mutation another
// slot has freed up.
await fillWritePipeline(remoteStoreImpl);
}
async function onWriteStreamClose(remoteStoreImpl, error) {
// If the write stream closed after the write handshake completes, a write
// operation failed and we fail the pending operation.
if (error && ensureWriteStream(remoteStoreImpl).handshakeComplete) {
// This error affects the actual write.
await handleWriteError(remoteStoreImpl, error);
}
// The write stream might have been started by refilling the write
// pipeline for failed writes
if (shouldStartWriteStream(remoteStoreImpl)) {
startWriteStream(remoteStoreImpl);
}
}
async function handleWriteError(remoteStoreImpl, error) {
// Only handle permanent errors here. If it's transient, just let the retry
// logic kick in.
if (isPermanentWriteError(error.code)) {
// This was a permanent error, the request itself was the problem
// so it's not going to succeed if we resend it.
const batch = remoteStoreImpl.writePipeline.shift();
// In this case it's also unlikely that the server itself is melting
// down -- this was just a bad request so inhibit backoff on the next
// restart.
ensureWriteStream(remoteStoreImpl).inhibitBackoff();
await executeWithRecovery(remoteStoreImpl, () => remoteStoreImpl.remoteSyncer.rejectFailedWrite(batch.batchId, error));
// It's possible that with the completion of this mutation
// another slot has freed up.
await fillWritePipeline(remoteStoreImpl);
}
}
async function restartNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.add(4 /* OfflineCause.ConnectivityChange */);
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
remoteStoreImpl.offlineCauses.delete(4 /* OfflineCause.ConnectivityChange */);
await enableNetworkInternal(remoteStoreImpl);
}
async function remoteStoreHandleCredentialChange(remoteStore, user) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.asyncQueue.verifyOperationInProgress();
logDebug(LOG_TAG$5, 'RemoteStore received new credentials');
const usesNetwork = canUseNetwork(remoteStoreImpl);
// Tear down and re-create our network streams. This will ensure we get a
// fresh auth token for the new user and re-fill the write pipeline with
// new mutations from the LocalStore (since mutations are per-user).
remoteStoreImpl.offlineCauses.add(3 /* OfflineCause.CredentialChange */);
await disableNetworkInternal(remoteStoreImpl);
if (usesNetwork) {
// Don't set the network status to Unknown if we are offline.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
await remoteStoreImpl.remoteSyncer.handleCredentialChange(user);
remoteStoreImpl.offlineCauses.delete(3 /* OfflineCause.CredentialChange */);
await enableNetworkInternal(remoteStoreImpl);
}
/**
* Toggles the network state when the client gains or loses its primary lease.
*/
async function remoteStoreApplyPrimaryState(remoteStore, isPrimary) {
const remoteStoreImpl = debugCast(remoteStore);
if (isPrimary) {
remoteStoreImpl.offlineCauses.delete(2 /* OfflineCause.IsSecondary */);
await enableNetworkInternal(remoteStoreImpl);
}
else if (!isPrimary) {
remoteStoreImpl.offlineCauses.add(2 /* OfflineCause.IsSecondary */);
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
/**
* If not yet initialized, registers the WatchStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/
function ensureWatchStream(remoteStoreImpl) {
if (!remoteStoreImpl.watchStream) {
// Create stream (but note that it is not started yet).
remoteStoreImpl.watchStream = newPersistentWatchStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
onOpen: onWatchStreamOpen.bind(null, remoteStoreImpl),
onClose: onWatchStreamClose.bind(null, remoteStoreImpl),
onWatchChange: onWatchStreamChange.bind(null, remoteStoreImpl)
});
remoteStoreImpl.onNetworkStatusChange.push(async (enabled) => {
if (enabled) {
remoteStoreImpl.watchStream.inhibitBackoff();
if (shouldStartWatchStream(remoteStoreImpl)) {
startWatchStream(remoteStoreImpl);
}
else {
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
else {
await remoteStoreImpl.watchStream.stop();
cleanUpWatchStreamState(remoteStoreImpl);
}
});
}
return remoteStoreImpl.watchStream;
}
/**
* If not yet initialized, registers the WriteStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/
function ensureWriteStream(remoteStoreImpl) {
if (!remoteStoreImpl.writeStream) {
// Create stream (but note that it is not started yet).
remoteStoreImpl.writeStream = newPersistentWriteStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
onOpen: onWriteStreamOpen.bind(null, remoteStoreImpl),
onClose: onWriteStreamClose.bind(null, remoteStoreImpl),
onHandshakeComplete: onWriteHandshakeComplete.bind(null, remoteStoreImpl),
onMutationResult: onMutationResult.bind(null, remoteStoreImpl)
});
remoteStoreImpl.onNetworkStatusChange.push(async (enabled) => {
if (enabled) {
remoteStoreImpl.writeStream.inhibitBackoff();
// This will start the write stream if necessary.
await fillWritePipeline(remoteStoreImpl);
}
else {
await remoteStoreImpl.writeStream.stop();
if (remoteStoreImpl.writePipeline.length > 0) {
logDebug(LOG_TAG$5, `Stopping write stream with ${remoteStoreImpl.writePipeline.length} pending writes`);
remoteStoreImpl.writePipeline = [];
}
}
});
}
return remoteStoreImpl.writeStream;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$4 = 'AsyncQueue';
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*
* Note: We implement `PromiseLike` instead of `Promise`, as the `Promise` type
* in newer versions of TypeScript defines `finally`, which is not available in
* IE.
*/
class DelayedOperation {
constructor(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(err => { });
}
get promise() {
return this.deferred.promise;
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue - The queue to schedule the operation on.
* @param id - A Timer ID identifying the type of operation this is.
* @param delayMs - The delay (ms) before the operation should be scheduled.
* @param op - The operation to run.
* @param removalCallback - A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
static createAndSchedule(asyncQueue, timerId, delayMs, op, removalCallback) {
const targetTime = Date.now() + delayMs;
const delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
}
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
start(delayMs) {
this.timerHandle = setTimeout(() => this.handleDelayElapsed(), delayMs);
}
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
skipDelay() {
return this.handleDelayElapsed();
}
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
cancel(reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
}
handleDelayElapsed() {
this.asyncQueue.enqueueAndForget(() => {
if (this.timerHandle !== null) {
this.clearTimeout();
return this.op().then(result => {
return this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
}
clearTimeout() {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
}
}
/**
* Returns a FirestoreError that can be surfaced to the user if the provided
* error is an IndexedDbTransactionError. Re-throws the error otherwise.
*/
function wrapInUserErrorIfRecoverable(e, msg) {
logError(LOG_TAG$4, `${msg}: ${e}`);
if (isIndexedDbTransactionError(e)) {
return new FirestoreError(Code.UNAVAILABLE, `${msg}: ${e}`);
}
else {
throw e;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentSet is an immutable (copy-on-write) collection that holds documents
* in order specified by the provided comparator. We always add a document key
* comparator on top of what is provided to guarantee document equality based on
* the key.
*/
class DocumentSet {
/** The default ordering is by key if the comparator is omitted */
constructor(comp) {
// We are adding document key comparator to the end as it's the only
// guaranteed unique property of a document.
if (comp) {
this.comparator = (d1, d2) => comp(d1, d2) || DocumentKey.comparator(d1.key, d2.key);
}
else {
this.comparator = (d1, d2) => DocumentKey.comparator(d1.key, d2.key);
}
this.keyedMap = documentMap();
this.sortedSet = new SortedMap(this.comparator);
}
/**
* Returns an empty copy of the existing DocumentSet, using the same
* comparator.
*/
static emptySet(oldSet) {
return new DocumentSet(oldSet.comparator);
}
has(key) {
return this.keyedMap.get(key) != null;
}
get(key) {
return this.keyedMap.get(key);
}
first() {
return this.sortedSet.minKey();
}
last() {
return this.sortedSet.maxKey();
}
isEmpty() {
return this.sortedSet.isEmpty();
}
/**
* Returns the index of the provided key in the document set, or -1 if the
* document key is not present in the set;
*/
indexOf(key) {
const doc = this.keyedMap.get(key);
return doc ? this.sortedSet.indexOf(doc) : -1;
}
get size() {
return this.sortedSet.size;
}
/** Iterates documents in order defined by "comparator" */
forEach(cb) {
this.sortedSet.inorderTraversal((k, v) => {
cb(k);
return false;
});
}
/** Inserts or updates a document with the same key */
add(doc) {
// First remove the element if we have it.
const set = this.delete(doc.key);
return set.copy(set.keyedMap.insert(doc.key, doc), set.sortedSet.insert(doc, null));
}
/** Deletes a document with a given key */
delete(key) {
const doc = this.get(key);
if (!doc) {
return this;
}
return this.copy(this.keyedMap.remove(key), this.sortedSet.remove(doc));
}
isEqual(other) {
if (!(other instanceof DocumentSet)) {
return false;
}
if (this.size !== other.size) {
return false;
}
const thisIt = this.sortedSet.getIterator();
const otherIt = other.sortedSet.getIterator();
while (thisIt.hasNext()) {
const thisDoc = thisIt.getNext().key;
const otherDoc = otherIt.getNext().key;
if (!thisDoc.isEqual(otherDoc)) {
return false;
}
}
return true;
}
toString() {
const docStrings = [];
this.forEach(doc => {
docStrings.push(doc.toString());
});
if (docStrings.length === 0) {
return 'DocumentSet ()';
}
else {
return 'DocumentSet (\n ' + docStrings.join(' \n') + '\n)';
}
}
copy(keyedMap, sortedSet) {
const newSet = new DocumentSet();
newSet.comparator = this.comparator;
newSet.keyedMap = keyedMap;
newSet.sortedSet = sortedSet;
return newSet;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentChangeSet keeps track of a set of changes to docs in a query, merging
* duplicate events for the same doc.
*/
class DocumentChangeSet {
constructor() {
this.changeMap = new SortedMap(DocumentKey.comparator);
}
track(change) {
const key = change.doc.key;
const oldChange = this.changeMap.get(key);
if (!oldChange) {
this.changeMap = this.changeMap.insert(key, change);
return;
}
// Merge the new change with the existing change.
if (change.type !== 0 /* ChangeType.Added */ &&
oldChange.type === 3 /* ChangeType.Metadata */) {
this.changeMap = this.changeMap.insert(key, change);
}
else if (change.type === 3 /* ChangeType.Metadata */ &&
oldChange.type !== 1 /* ChangeType.Removed */) {
this.changeMap = this.changeMap.insert(key, {
type: oldChange.type,
doc: change.doc
});
}
else if (change.type === 2 /* ChangeType.Modified */ &&
oldChange.type === 2 /* ChangeType.Modified */) {
this.changeMap = this.changeMap.insert(key, {
type: 2 /* ChangeType.Modified */,
doc: change.doc
});
}
else if (change.type === 2 /* ChangeType.Modified */ &&
oldChange.type === 0 /* ChangeType.Added */) {
this.changeMap = this.changeMap.insert(key, {
type: 0 /* ChangeType.Added */,
doc: change.doc
});
}
else if (change.type === 1 /* ChangeType.Removed */ &&
oldChange.type === 0 /* ChangeType.Added */) {
this.changeMap = this.changeMap.remove(key);
}
else if (change.type === 1 /* ChangeType.Removed */ &&
oldChange.type === 2 /* ChangeType.Modified */) {
this.changeMap = this.changeMap.insert(key, {
type: 1 /* ChangeType.Removed */,
doc: oldChange.doc
});
}
else if (change.type === 0 /* ChangeType.Added */ &&
oldChange.type === 1 /* ChangeType.Removed */) {
this.changeMap = this.changeMap.insert(key, {
type: 2 /* ChangeType.Modified */,
doc: change.doc
});
}
else {
// This includes these cases, which don't make sense:
// Added->Added
// Removed->Removed
// Modified->Added
// Removed->Modified
// Metadata->Added
// Removed->Metadata
fail();
}
}
getChanges() {
const changes = [];
this.changeMap.inorderTraversal((key, change) => {
changes.push(change);
});
return changes;
}
}
class ViewSnapshot {
constructor(query, docs, oldDocs, docChanges, mutatedKeys, fromCache, syncStateChanged, excludesMetadataChanges, hasCachedResults) {
this.query = query;
this.docs = docs;
this.oldDocs = oldDocs;
this.docChanges = docChanges;
this.mutatedKeys = mutatedKeys;
this.fromCache = fromCache;
this.syncStateChanged = syncStateChanged;
this.excludesMetadataChanges = excludesMetadataChanges;
this.hasCachedResults = hasCachedResults;
}
/** Returns a view snapshot as if all documents in the snapshot were added. */
static fromInitialDocuments(query, documents, mutatedKeys, fromCache, hasCachedResults) {
const changes = [];
documents.forEach(doc => {
changes.push({ type: 0 /* ChangeType.Added */, doc });
});
return new ViewSnapshot(query, documents, DocumentSet.emptySet(documents), changes, mutatedKeys, fromCache,
/* syncStateChanged= */ true,
/* excludesMetadataChanges= */ false, hasCachedResults);
}
get hasPendingWrites() {
return !this.mutatedKeys.isEmpty();
}
isEqual(other) {
if (this.fromCache !== other.fromCache ||
this.hasCachedResults !== other.hasCachedResults ||
this.syncStateChanged !== other.syncStateChanged ||
!this.mutatedKeys.isEqual(other.mutatedKeys) ||
!queryEquals(this.query, other.query) ||
!this.docs.isEqual(other.docs) ||
!this.oldDocs.isEqual(other.oldDocs)) {
return false;
}
const changes = this.docChanges;
const otherChanges = other.docChanges;
if (changes.length !== otherChanges.length) {
return false;
}
for (let i = 0; i < changes.length; i++) {
if (changes[i].type !== otherChanges[i].type ||
!changes[i].doc.isEqual(otherChanges[i].doc)) {
return false;
}
}
return true;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Holds the listeners and the last received ViewSnapshot for a query being
* tracked by EventManager.
*/
class QueryListenersInfo {
constructor() {
this.viewSnap = undefined;
this.listeners = [];
}
}
function newEventManager() {
return new EventManagerImpl();
}
class EventManagerImpl {
constructor() {
this.queries = new ObjectMap(q => canonifyQuery(q), queryEquals);
this.onlineState = "Unknown" /* OnlineState.Unknown */;
this.snapshotsInSyncListeners = new Set();
}
}
async function eventManagerListen(eventManager, listener) {
const eventManagerImpl = debugCast(eventManager);
const query = listener.query;
let firstListen = false;
let queryInfo = eventManagerImpl.queries.get(query);
if (!queryInfo) {
firstListen = true;
queryInfo = new QueryListenersInfo();
}
if (firstListen) {
try {
queryInfo.viewSnap = await eventManagerImpl.onListen(query);
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Initialization of query '${stringifyQuery(listener.query)}' failed`);
listener.onError(firestoreError);
return;
}
}
eventManagerImpl.queries.set(query, queryInfo);
queryInfo.listeners.push(listener);
// Run global snapshot listeners if a consistent snapshot has been emitted.
listener.applyOnlineStateChange(eventManagerImpl.onlineState);
if (queryInfo.viewSnap) {
const raisedEvent = listener.onViewSnapshot(queryInfo.viewSnap);
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
}
async function eventManagerUnlisten(eventManager, listener) {
const eventManagerImpl = debugCast(eventManager);
const query = listener.query;
let lastListen = false;
const queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
const i = queryInfo.listeners.indexOf(listener);
if (i >= 0) {
queryInfo.listeners.splice(i, 1);
lastListen = queryInfo.listeners.length === 0;
}
}
if (lastListen) {
eventManagerImpl.queries.delete(query);
return eventManagerImpl.onUnlisten(query);
}
}
function eventManagerOnWatchChange(eventManager, viewSnaps) {
const eventManagerImpl = debugCast(eventManager);
let raisedEvent = false;
for (const viewSnap of viewSnaps) {
const query = viewSnap.query;
const queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
for (const listener of queryInfo.listeners) {
if (listener.onViewSnapshot(viewSnap)) {
raisedEvent = true;
}
}
queryInfo.viewSnap = viewSnap;
}
}
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
function eventManagerOnWatchError(eventManager, query, error) {
const eventManagerImpl = debugCast(eventManager);
const queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
for (const listener of queryInfo.listeners) {
listener.onError(error);
}
}
// Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
// after an error.
eventManagerImpl.queries.delete(query);
}
function eventManagerOnOnlineStateChange(eventManager, onlineState) {
const eventManagerImpl = debugCast(eventManager);
eventManagerImpl.onlineState = onlineState;
let raisedEvent = false;
eventManagerImpl.queries.forEach((_, queryInfo) => {
for (const listener of queryInfo.listeners) {
// Run global snapshot listeners if a consistent snapshot has been emitted.
if (listener.applyOnlineStateChange(onlineState)) {
raisedEvent = true;
}
}
});
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
function addSnapshotsInSyncListener(eventManager, observer) {
const eventManagerImpl = debugCast(eventManager);
eventManagerImpl.snapshotsInSyncListeners.add(observer);
// Immediately fire an initial event, indicating all existing listeners
// are in-sync.
observer.next();
}
function removeSnapshotsInSyncListener(eventManager, observer) {
const eventManagerImpl = debugCast(eventManager);
eventManagerImpl.snapshotsInSyncListeners.delete(observer);
}
// Call all global snapshot listeners that have been set.
function raiseSnapshotsInSyncEvent(eventManagerImpl) {
eventManagerImpl.snapshotsInSyncListeners.forEach(observer => {
observer.next();
});
}
/**
* QueryListener takes a series of internal view snapshots and determines
* when to raise the event.
*
* It uses an Observer to dispatch events.
*/
class QueryListener {
constructor(query, queryObserver, options) {
this.query = query;
this.queryObserver = queryObserver;
/**
* Initial snapshots (e.g. from cache) may not be propagated to the wrapped
* observer. This flag is set to true once we've actually raised an event.
*/
this.raisedInitialEvent = false;
this.snap = null;
this.onlineState = "Unknown" /* OnlineState.Unknown */;
this.options = options || {};
}
/**
* Applies the new ViewSnapshot to this listener, raising a user-facing event
* if applicable (depending on what changed, whether the user has opted into
* metadata-only changes, etc.). Returns true if a user-facing event was
* indeed raised.
*/
onViewSnapshot(snap) {
if (!this.options.includeMetadataChanges) {
// Remove the metadata only changes.
const docChanges = [];
for (const docChange of snap.docChanges) {
if (docChange.type !== 3 /* ChangeType.Metadata */) {
docChanges.push(docChange);
}
}
snap = new ViewSnapshot(snap.query, snap.docs, snap.oldDocs, docChanges, snap.mutatedKeys, snap.fromCache, snap.syncStateChanged,
/* excludesMetadataChanges= */ true, snap.hasCachedResults);
}
let raisedEvent = false;
if (!this.raisedInitialEvent) {
if (this.shouldRaiseInitialEvent(snap, this.onlineState)) {
this.raiseInitialEvent(snap);
raisedEvent = true;
}
}
else if (this.shouldRaiseEvent(snap)) {
this.queryObserver.next(snap);
raisedEvent = true;
}
this.snap = snap;
return raisedEvent;
}
onError(error) {
this.queryObserver.error(error);
}
/** Returns whether a snapshot was raised. */
applyOnlineStateChange(onlineState) {
this.onlineState = onlineState;
let raisedEvent = false;
if (this.snap &&
!this.raisedInitialEvent &&
this.shouldRaiseInitialEvent(this.snap, onlineState)) {
this.raiseInitialEvent(this.snap);
raisedEvent = true;
}
return raisedEvent;
}
shouldRaiseInitialEvent(snap, onlineState) {
// Always raise the first event when we're synced
if (!snap.fromCache) {
return true;
}
// NOTE: We consider OnlineState.Unknown as online (it should become Offline
// or Online if we wait long enough).
const maybeOnline = onlineState !== "Offline" /* OnlineState.Offline */;
// Don't raise the event if we're online, aren't synced yet (checked
// above) and are waiting for a sync.
if (this.options.waitForSyncWhenOnline && maybeOnline) {
return false;
}
// Raise data from cache if we have any documents, have cached results before,
// or we are offline.
return (!snap.docs.isEmpty() ||
snap.hasCachedResults ||
onlineState === "Offline" /* OnlineState.Offline */);
}
shouldRaiseEvent(snap) {
// We don't need to handle includeDocumentMetadataChanges here because
// the Metadata only changes have already been stripped out if needed.
// At this point the only changes we will see are the ones we should
// propagate.
if (snap.docChanges.length > 0) {
return true;
}
const hasPendingWritesChanged = this.snap && this.snap.hasPendingWrites !== snap.hasPendingWrites;
if (snap.syncStateChanged || hasPendingWritesChanged) {
return this.options.includeMetadataChanges === true;
}
// Generally we should have hit one of the cases above, but it's possible
// to get here if there were only metadata docChanges and they got
// stripped out.
return false;
}
raiseInitialEvent(snap) {
snap = ViewSnapshot.fromInitialDocuments(snap.query, snap.docs, snap.mutatedKeys, snap.fromCache, snap.hasCachedResults);
this.raisedInitialEvent = true;
this.queryObserver.next(snap);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A set of changes to what documents are currently in view and out of view for
* a given query. These changes are sent to the LocalStore by the View (via
* the SyncEngine) and are used to pin / unpin documents as appropriate.
*/
class LocalViewChanges {
constructor(targetId, fromCache, addedKeys, removedKeys) {
this.targetId = targetId;
this.fromCache = fromCache;
this.addedKeys = addedKeys;
this.removedKeys = removedKeys;
}
static fromSnapshot(targetId, viewSnapshot) {
let addedKeys = documentKeySet();
let removedKeys = documentKeySet();
for (const docChange of viewSnapshot.docChanges) {
switch (docChange.type) {
case 0 /* ChangeType.Added */:
addedKeys = addedKeys.add(docChange.doc.key);
break;
case 1 /* ChangeType.Removed */:
removedKeys = removedKeys.add(docChange.doc.key);
break;
// do nothing
}
}
return new LocalViewChanges(targetId, viewSnapshot.fromCache, addedKeys, removedKeys);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Helper to convert objects from bundles to model objects in the SDK.
*/
class BundleConverterImpl {
constructor(serializer) {
this.serializer = serializer;
}
toDocumentKey(name) {
return fromName(this.serializer, name);
}
/**
* Converts a BundleDocument to a MutableDocument.
*/
toMutableDocument(bundledDoc) {
if (bundledDoc.metadata.exists) {
return fromDocument(this.serializer, bundledDoc.document, false);
}
else {
return MutableDocument.newNoDocument(this.toDocumentKey(bundledDoc.metadata.name), this.toSnapshotVersion(bundledDoc.metadata.readTime));
}
}
toSnapshotVersion(time) {
return fromVersion(time);
}
}
/**
* A class to process the elements from a bundle, load them into local
* storage and provide progress update while loading.
*/
class BundleLoader {
constructor(bundleMetadata, localStore, serializer) {
this.bundleMetadata = bundleMetadata;
this.localStore = localStore;
this.serializer = serializer;
/** Batched queries to be saved into storage */
this.queries = [];
/** Batched documents to be saved into storage */
this.documents = [];
/** The collection groups affected by this bundle. */
this.collectionGroups = new Set();
this.progress = bundleInitialProgress(bundleMetadata);
}
/**
* Adds an element from the bundle to the loader.
*
* Returns a new progress if adding the element leads to a new progress,
* otherwise returns null.
*/
addSizedElement(element) {
this.progress.bytesLoaded += element.byteLength;
let documentsLoaded = this.progress.documentsLoaded;
if (element.payload.namedQuery) {
this.queries.push(element.payload.namedQuery);
}
else if (element.payload.documentMetadata) {
this.documents.push({ metadata: element.payload.documentMetadata });
if (!element.payload.documentMetadata.exists) {
++documentsLoaded;
}
const path = ResourcePath.fromString(element.payload.documentMetadata.name);
this.collectionGroups.add(path.get(path.length - 2));
}
else if (element.payload.document) {
this.documents[this.documents.length - 1].document =
element.payload.document;
++documentsLoaded;
}
if (documentsLoaded !== this.progress.documentsLoaded) {
this.progress.documentsLoaded = documentsLoaded;
return Object.assign({}, this.progress);
}
return null;
}
getQueryDocumentMapping(documents) {
const queryDocumentMap = new Map();
const bundleConverter = new BundleConverterImpl(this.serializer);
for (const bundleDoc of documents) {
if (bundleDoc.metadata.queries) {
const documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
for (const queryName of bundleDoc.metadata.queries) {
const documentKeys = (queryDocumentMap.get(queryName) || documentKeySet()).add(documentKey);
queryDocumentMap.set(queryName, documentKeys);
}
}
}
return queryDocumentMap;
}
/**
* Update the progress to 'Success' and return the updated progress.
*/
async complete() {
const changedDocs = await localStoreApplyBundledDocuments(this.localStore, new BundleConverterImpl(this.serializer), this.documents, this.bundleMetadata.id);
const queryDocumentMap = this.getQueryDocumentMapping(this.documents);
for (const q of this.queries) {
await localStoreSaveNamedQuery(this.localStore, q, queryDocumentMap.get(q.name));
}
this.progress.taskState = 'Success';
return {
progress: this.progress,
changedCollectionGroups: this.collectionGroups,
changedDocs
};
}
}
/**
* Returns a `LoadBundleTaskProgress` representing the initial progress of
* loading a bundle.
*/
function bundleInitialProgress(metadata) {
return {
taskState: 'Running',
documentsLoaded: 0,
bytesLoaded: 0,
totalDocuments: metadata.totalDocuments,
totalBytes: metadata.totalBytes
};
}
/**
* Returns a `LoadBundleTaskProgress` representing the progress that the loading
* has succeeded.
*/
function bundleSuccessProgress(metadata) {
return {
taskState: 'Success',
documentsLoaded: metadata.totalDocuments,
bytesLoaded: metadata.totalBytes,
totalDocuments: metadata.totalDocuments,
totalBytes: metadata.totalBytes
};
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class AddedLimboDocument {
constructor(key) {
this.key = key;
}
}
class RemovedLimboDocument {
constructor(key) {
this.key = key;
}
}
/**
* View is responsible for computing the final merged truth of what docs are in
* a query. It gets notified of local and remote changes to docs, and applies
* the query filters and limits to determine the most correct possible results.
*/
class View {
constructor(query,
/** Documents included in the remote target */
_syncedDocuments) {
this.query = query;
this._syncedDocuments = _syncedDocuments;
this.syncState = null;
this.hasCachedResults = false;
/**
* A flag whether the view is current with the backend. A view is considered
* current after it has seen the current flag from the backend and did not
* lose consistency within the watch stream (e.g. because of an existence
* filter mismatch).
*/
this.current = false;
/** Documents in the view but not in the remote target */
this.limboDocuments = documentKeySet();
/** Document Keys that have local changes */
this.mutatedKeys = documentKeySet();
this.docComparator = newQueryComparator(query);
this.documentSet = new DocumentSet(this.docComparator);
}
/**
* The set of remote documents that the server has told us belongs to the target associated with
* this view.
*/
get syncedDocuments() {
return this._syncedDocuments;
}
/**
* Iterates over a set of doc changes, applies the query limit, and computes
* what the new results should be, what the changes were, and whether we may
* need to go back to the local cache for more results. Does not make any
* changes to the view.
* @param docChanges - The doc changes to apply to this view.
* @param previousChanges - If this is being called with a refill, then start
* with this set of docs and changes instead of the current view.
* @returns a new set of docs, changes, and refill flag.
*/
computeDocChanges(docChanges, previousChanges) {
const changeSet = previousChanges
? previousChanges.changeSet
: new DocumentChangeSet();
const oldDocumentSet = previousChanges
? previousChanges.documentSet
: this.documentSet;
let newMutatedKeys = previousChanges
? previousChanges.mutatedKeys
: this.mutatedKeys;
let newDocumentSet = oldDocumentSet;
let needsRefill = false;
// Track the last doc in a (full) limit. This is necessary, because some
// update (a delete, or an update moving a doc past the old limit) might
// mean there is some other document in the local cache that either should
// come (1) between the old last limit doc and the new last document, in the
// case of updates, or (2) after the new last document, in the case of
// deletes. So we keep this doc at the old limit to compare the updates to.
//
// Note that this should never get used in a refill (when previousChanges is
// set), because there will only be adds -- no deletes or updates.
const lastDocInLimit = this.query.limitType === "F" /* LimitType.First */ &&
oldDocumentSet.size === this.query.limit
? oldDocumentSet.last()
: null;
const firstDocInLimit = this.query.limitType === "L" /* LimitType.Last */ &&
oldDocumentSet.size === this.query.limit
? oldDocumentSet.first()
: null;
docChanges.inorderTraversal((key, entry) => {
const oldDoc = oldDocumentSet.get(key);
const newDoc = queryMatches(this.query, entry) ? entry : null;
const oldDocHadPendingMutations = oldDoc
? this.mutatedKeys.has(oldDoc.key)
: false;
const newDocHasPendingMutations = newDoc
? newDoc.hasLocalMutations ||
// We only consider committed mutations for documents that were
// mutated during the lifetime of the view.
(this.mutatedKeys.has(newDoc.key) && newDoc.hasCommittedMutations)
: false;
let changeApplied = false;
// Calculate change
if (oldDoc && newDoc) {
const docsEqual = oldDoc.data.isEqual(newDoc.data);
if (!docsEqual) {
if (!this.shouldWaitForSyncedDocument(oldDoc, newDoc)) {
changeSet.track({
type: 2 /* ChangeType.Modified */,
doc: newDoc
});
changeApplied = true;
if ((lastDocInLimit &&
this.docComparator(newDoc, lastDocInLimit) > 0) ||
(firstDocInLimit &&
this.docComparator(newDoc, firstDocInLimit) < 0)) {
// This doc moved from inside the limit to outside the limit.
// That means there may be some other doc in the local cache
// that should be included instead.
needsRefill = true;
}
}
}
else if (oldDocHadPendingMutations !== newDocHasPendingMutations) {
changeSet.track({ type: 3 /* ChangeType.Metadata */, doc: newDoc });
changeApplied = true;
}
}
else if (!oldDoc && newDoc) {
changeSet.track({ type: 0 /* ChangeType.Added */, doc: newDoc });
changeApplied = true;
}
else if (oldDoc && !newDoc) {
changeSet.track({ type: 1 /* ChangeType.Removed */, doc: oldDoc });
changeApplied = true;
if (lastDocInLimit || firstDocInLimit) {
// A doc was removed from a full limit query. We'll need to
// requery from the local cache to see if we know about some other
// doc that should be in the results.
needsRefill = true;
}
}
if (changeApplied) {
if (newDoc) {
newDocumentSet = newDocumentSet.add(newDoc);
if (newDocHasPendingMutations) {
newMutatedKeys = newMutatedKeys.add(key);
}
else {
newMutatedKeys = newMutatedKeys.delete(key);
}
}
else {
newDocumentSet = newDocumentSet.delete(key);
newMutatedKeys = newMutatedKeys.delete(key);
}
}
});
// Drop documents out to meet limit/limitToLast requirement.
if (this.query.limit !== null) {
while (newDocumentSet.size > this.query.limit) {
const oldDoc = this.query.limitType === "F" /* LimitType.First */
? newDocumentSet.last()
: newDocumentSet.first();
newDocumentSet = newDocumentSet.delete(oldDoc.key);
newMutatedKeys = newMutatedKeys.delete(oldDoc.key);
changeSet.track({ type: 1 /* ChangeType.Removed */, doc: oldDoc });
}
}
return {
documentSet: newDocumentSet,
changeSet,
needsRefill,
mutatedKeys: newMutatedKeys
};
}
shouldWaitForSyncedDocument(oldDoc, newDoc) {
// We suppress the initial change event for documents that were modified as
// part of a write acknowledgment (e.g. when the value of a server transform
// is applied) as Watch will send us the same document again.
// By suppressing the event, we only raise two user visible events (one with
// `hasPendingWrites` and the final state of the document) instead of three
// (one with `hasPendingWrites`, the modified document with
// `hasPendingWrites` and the final state of the document).
return (oldDoc.hasLocalMutations &&
newDoc.hasCommittedMutations &&
!newDoc.hasLocalMutations);
}
/**
* Updates the view with the given ViewDocumentChanges and optionally updates
* limbo docs and sync state from the provided target change.
* @param docChanges - The set of changes to make to the view's docs.
* @param limboResolutionEnabled - Whether to update limbo documents based on
* this change.
* @param targetChange - A target change to apply for computing limbo docs and
* sync state.
* @param targetIsPendingReset - Whether the target is pending to reset due to
* existence filter mismatch. If not explicitly specified, it is treated
* equivalently to `false`.
* @returns A new ViewChange with the given docs, changes, and sync state.
*/
// PORTING NOTE: The iOS/Android clients always compute limbo document changes.
applyChanges(docChanges, limboResolutionEnabled, targetChange, targetIsPendingReset) {
const oldDocs = this.documentSet;
this.documentSet = docChanges.documentSet;
this.mutatedKeys = docChanges.mutatedKeys;
// Sort changes based on type and query comparator
const changes = docChanges.changeSet.getChanges();
changes.sort((c1, c2) => {
return (compareChangeType(c1.type, c2.type) ||
this.docComparator(c1.doc, c2.doc));
});
this.applyTargetChange(targetChange);
targetIsPendingReset = targetIsPendingReset !== null && targetIsPendingReset !== void 0 ? targetIsPendingReset : false;
const limboChanges = limboResolutionEnabled && !targetIsPendingReset
? this.updateLimboDocuments()
: [];
// We are at synced state if there is no limbo docs are waiting to be resolved, view is current
// with the backend, and the query is not pending to reset due to existence filter mismatch.
const synced = this.limboDocuments.size === 0 && this.current && !targetIsPendingReset;
const newSyncState = synced ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */;
const syncStateChanged = newSyncState !== this.syncState;
this.syncState = newSyncState;
if (changes.length === 0 && !syncStateChanged) {
// no changes
return { limboChanges };
}
else {
const snap = new ViewSnapshot(this.query, docChanges.documentSet, oldDocs, changes, docChanges.mutatedKeys, newSyncState === 0 /* SyncState.Local */, syncStateChanged,
/* excludesMetadataChanges= */ false, targetChange
? targetChange.resumeToken.approximateByteSize() > 0
: false);
return {
snapshot: snap,
limboChanges
};
}
}
/**
* Applies an OnlineState change to the view, potentially generating a
* ViewChange if the view's syncState changes as a result.
*/
applyOnlineStateChange(onlineState) {
if (this.current && onlineState === "Offline" /* OnlineState.Offline */) {
// If we're offline, set `current` to false and then call applyChanges()
// to refresh our syncState and generate a ViewChange as appropriate. We
// are guaranteed to get a new TargetChange that sets `current` back to
// true once the client is back online.
this.current = false;
return this.applyChanges({
documentSet: this.documentSet,
changeSet: new DocumentChangeSet(),
mutatedKeys: this.mutatedKeys,
needsRefill: false
},
/* limboResolutionEnabled= */ false);
}
else {
// No effect, just return a no-op ViewChange.
return { limboChanges: [] };
}
}
/**
* Returns whether the doc for the given key should be in limbo.
*/
shouldBeInLimbo(key) {
// If the remote end says it's part of this query, it's not in limbo.
if (this._syncedDocuments.has(key)) {
return false;
}
// The local store doesn't think it's a result, so it shouldn't be in limbo.
if (!this.documentSet.has(key)) {
return false;
}
// If there are local changes to the doc, they might explain why the server
// doesn't know that it's part of the query. So don't put it in limbo.
// TODO(klimt): Ideally, we would only consider changes that might actually
// affect this specific query.
if (this.documentSet.get(key).hasLocalMutations) {
return false;
}
// Everything else is in limbo.
return true;
}
/**
* Updates syncedDocuments, current, and limbo docs based on the given change.
* Returns the list of changes to which docs are in limbo.
*/
applyTargetChange(targetChange) {
if (targetChange) {
targetChange.addedDocuments.forEach(key => (this._syncedDocuments = this._syncedDocuments.add(key)));
targetChange.modifiedDocuments.forEach(key => {
});
targetChange.removedDocuments.forEach(key => (this._syncedDocuments = this._syncedDocuments.delete(key)));
this.current = targetChange.current;
}
}
updateLimboDocuments() {
// We can only determine limbo documents when we're in-sync with the server.
if (!this.current) {
return [];
}
// TODO(klimt): Do this incrementally so that it's not quadratic when
// updating many documents.
const oldLimboDocuments = this.limboDocuments;
this.limboDocuments = documentKeySet();
this.documentSet.forEach(doc => {
if (this.shouldBeInLimbo(doc.key)) {
this.limboDocuments = this.limboDocuments.add(doc.key);
}
});
// Diff the new limbo docs with the old limbo docs.
const changes = [];
oldLimboDocuments.forEach(key => {
if (!this.limboDocuments.has(key)) {
changes.push(new RemovedLimboDocument(key));
}
});
this.limboDocuments.forEach(key => {
if (!oldLimboDocuments.has(key)) {
changes.push(new AddedLimboDocument(key));
}
});
return changes;
}
/**
* Update the in-memory state of the current view with the state read from
* persistence.
*
* We update the query view whenever a client's primary status changes:
* - When a client transitions from primary to secondary, it can miss
* LocalStorage updates and its query views may temporarily not be
* synchronized with the state on disk.
* - For secondary to primary transitions, the client needs to update the list
* of `syncedDocuments` since secondary clients update their query views
* based purely on synthesized RemoteEvents.
*
* @param queryResult.documents - The documents that match the query according
* to the LocalStore.
* @param queryResult.remoteKeys - The keys of the documents that match the
* query according to the backend.
*
* @returns The ViewChange that resulted from this synchronization.
*/
// PORTING NOTE: Multi-tab only.
synchronizeWithPersistedState(queryResult) {
this._syncedDocuments = queryResult.remoteKeys;
this.limboDocuments = documentKeySet();
const docChanges = this.computeDocChanges(queryResult.documents);
return this.applyChanges(docChanges, /* limboResolutionEnabled= */ true);
}
/**
* Returns a view snapshot as if this query was just listened to. Contains
* a document add for every existing document and the `fromCache` and
* `hasPendingWrites` status of the already established view.
*/
// PORTING NOTE: Multi-tab only.
computeInitialSnapshot() {
return ViewSnapshot.fromInitialDocuments(this.query, this.documentSet, this.mutatedKeys, this.syncState === 0 /* SyncState.Local */, this.hasCachedResults);
}
}
function compareChangeType(c1, c2) {
const order = (change) => {
switch (change) {
case 0 /* ChangeType.Added */:
return 1;
case 2 /* ChangeType.Modified */:
return 2;
case 3 /* ChangeType.Metadata */:
// A metadata change is converted to a modified change at the public
// api layer. Since we sort by document key and then change type,
// metadata and modified changes must be sorted equivalently.
return 2;
case 1 /* ChangeType.Removed */:
return 0;
default:
return fail();
}
};
return order(c1) - order(c2);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$3 = 'SyncEngine';
/**
* QueryView contains all of the data that SyncEngine needs to keep track of for
* a particular query.
*/
class QueryView {
constructor(
/**
* The query itself.
*/
query,
/**
* The target number created by the client that is used in the watch
* stream to identify this query.
*/
targetId,
/**
* The view is responsible for computing the final merged truth of what
* docs are in the query. It gets notified of local and remote changes,
* and applies the query filters and limits to determine the most correct
* possible results.
*/
view) {
this.query = query;
this.targetId = targetId;
this.view = view;
}
}
/** Tracks a limbo resolution. */
class LimboResolution {
constructor(key) {
this.key = key;
/**
* Set to true once we've received a document. This is used in
* getRemoteKeysForTarget() and ultimately used by WatchChangeAggregator to
* decide whether it needs to manufacture a delete event for the target once
* the target is CURRENT.
*/
this.receivedDocument = false;
}
}
/**
* An implementation of `SyncEngine` coordinating with other parts of SDK.
*
* The parts of SyncEngine that act as a callback to RemoteStore need to be
* registered individually. This is done in `syncEngineWrite()` and
* `syncEngineListen()` (as well as `applyPrimaryState()`) as these methods
* serve as entry points to RemoteStore's functionality.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
class SyncEngineImpl {
constructor(localStore, remoteStore, eventManager,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
sharedClientState, currentUser, maxConcurrentLimboResolutions) {
this.localStore = localStore;
this.remoteStore = remoteStore;
this.eventManager = eventManager;
this.sharedClientState = sharedClientState;
this.currentUser = currentUser;
this.maxConcurrentLimboResolutions = maxConcurrentLimboResolutions;
this.syncEngineListener = {};
this.queryViewsByQuery = new ObjectMap(q => canonifyQuery(q), queryEquals);
this.queriesByTarget = new Map();
/**
* The keys of documents that are in limbo for which we haven't yet started a
* limbo resolution query. The strings in this set are the result of calling
* `key.path.canonicalString()` where `key` is a `DocumentKey` object.
*
* The `Set` type was chosen because it provides efficient lookup and removal
* of arbitrary elements and it also maintains insertion order, providing the
* desired queue-like FIFO semantics.
*/
this.enqueuedLimboResolutions = new Set();
/**
* Keeps track of the target ID for each document that is in limbo with an
* active target.
*/
this.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
/**
* Keeps track of the information about an active limbo resolution for each
* active target ID that was started for the purpose of limbo resolution.
*/
this.activeLimboResolutionsByTarget = new Map();
this.limboDocumentRefs = new ReferenceSet();
/** Stores user completion handlers, indexed by User and BatchId. */
this.mutationUserCallbacks = {};
/** Stores user callbacks waiting for all pending writes to be acknowledged. */
this.pendingWritesCallbacks = new Map();
this.limboTargetIdGenerator = TargetIdGenerator.forSyncEngine();
this.onlineState = "Unknown" /* OnlineState.Unknown */;
// The primary state is set to `true` or `false` immediately after Firestore
// startup. In the interim, a client should only be considered primary if
// `isPrimary` is true.
this._isPrimaryClient = undefined;
}
get isPrimaryClient() {
return this._isPrimaryClient === true;
}
}
function newSyncEngine(localStore, remoteStore, eventManager,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
sharedClientState, currentUser, maxConcurrentLimboResolutions, isPrimary) {
const syncEngine = new SyncEngineImpl(localStore, remoteStore, eventManager, sharedClientState, currentUser, maxConcurrentLimboResolutions);
if (isPrimary) {
syncEngine._isPrimaryClient = true;
}
return syncEngine;
}
/**
* Initiates the new listen, resolves promise when listen enqueued to the
* server. All the subsequent view snapshots or errors are sent to the
* subscribed handlers. Returns the initial snapshot.
*/
async function syncEngineListen(syncEngine, query) {
const syncEngineImpl = ensureWatchCallbacks(syncEngine);
let targetId;
let viewSnapshot;
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
if (queryView) {
// PORTING NOTE: With Multi-Tab Web, it is possible that a query view
// already exists when EventManager calls us for the first time. This
// happens when the primary tab is already listening to this query on
// behalf of another tab and the user of the primary also starts listening
// to the query. EventManager will not have an assigned target ID in this
// case and calls `listen` to obtain this ID.
targetId = queryView.targetId;
syncEngineImpl.sharedClientState.addLocalQueryTarget(targetId);
viewSnapshot = queryView.view.computeInitialSnapshot();
}
else {
const targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(query));
const status = syncEngineImpl.sharedClientState.addLocalQueryTarget(targetData.targetId);
targetId = targetData.targetId;
viewSnapshot = await initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, status === 'current', targetData.resumeToken);
if (syncEngineImpl.isPrimaryClient) {
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
}
return viewSnapshot;
}
/**
* Registers a view for a previously unknown query and computes its initial
* snapshot.
*/
async function initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, current, resumeToken) {
// PORTING NOTE: On Web only, we inject the code that registers new Limbo
// targets based on view changes. This allows us to only depend on Limbo
// changes when user code includes queries.
syncEngineImpl.applyDocChanges = (queryView, changes, remoteEvent) => applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent);
const queryResult = await localStoreExecuteQuery(syncEngineImpl.localStore, query,
/* usePreviousResults= */ true);
const view = new View(query, queryResult.remoteKeys);
const viewDocChanges = view.computeDocChanges(queryResult.documents);
const synthesizedTargetChange = TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current && syncEngineImpl.onlineState !== "Offline" /* OnlineState.Offline */, resumeToken);
const viewChange = view.applyChanges(viewDocChanges,
/* limboResolutionEnabled= */ syncEngineImpl.isPrimaryClient, synthesizedTargetChange);
updateTrackedLimbos(syncEngineImpl, targetId, viewChange.limboChanges);
const data = new QueryView(query, targetId, view);
syncEngineImpl.queryViewsByQuery.set(query, data);
if (syncEngineImpl.queriesByTarget.has(targetId)) {
syncEngineImpl.queriesByTarget.get(targetId).push(query);
}
else {
syncEngineImpl.queriesByTarget.set(targetId, [query]);
}
return viewChange.snapshot;
}
/** Stops listening to the query. */
async function syncEngineUnlisten(syncEngine, query) {
const syncEngineImpl = debugCast(syncEngine);
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
// Only clean up the query view and target if this is the only query mapped
// to the target.
const queries = syncEngineImpl.queriesByTarget.get(queryView.targetId);
if (queries.length > 1) {
syncEngineImpl.queriesByTarget.set(queryView.targetId, queries.filter(q => !queryEquals(q, query)));
syncEngineImpl.queryViewsByQuery.delete(query);
return;
}
// No other queries are mapped to the target, clean up the query and the target.
if (syncEngineImpl.isPrimaryClient) {
// We need to remove the local query target first to allow us to verify
// whether any other client is still interested in this target.
syncEngineImpl.sharedClientState.removeLocalQueryTarget(queryView.targetId);
const targetRemainsActive = syncEngineImpl.sharedClientState.isActiveQueryTarget(queryView.targetId);
if (!targetRemainsActive) {
await localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
/*keepPersistedTargetData=*/ false)
.then(() => {
syncEngineImpl.sharedClientState.clearQueryState(queryView.targetId);
remoteStoreUnlisten(syncEngineImpl.remoteStore, queryView.targetId);
removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
})
.catch(ignoreIfPrimaryLeaseLoss);
}
}
else {
removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
await localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
/*keepPersistedTargetData=*/ true);
}
}
/**
* Initiates the write of local mutation batch which involves adding the
* writes to the mutation queue, notifying the remote store about new
* mutations and raising events for any changes this write caused.
*
* The promise returned by this call is resolved when the above steps
* have completed, *not* when the write was acked by the backend. The
* userCallback is resolved once the write was acked/rejected by the
* backend (or failed locally for any other reason).
*/
async function syncEngineWrite(syncEngine, batch, userCallback) {
const syncEngineImpl = syncEngineEnsureWriteCallbacks(syncEngine);
try {
const result = await localStoreWriteLocally(syncEngineImpl.localStore, batch);
syncEngineImpl.sharedClientState.addPendingMutation(result.batchId);
addMutationCallback(syncEngineImpl, result.batchId, userCallback);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.changes);
await fillWritePipeline(syncEngineImpl.remoteStore);
}
catch (e) {
// If we can't persist the mutation, we reject the user callback and
// don't send the mutation. The user can then retry the write.
const error = wrapInUserErrorIfRecoverable(e, `Failed to persist write`);
userCallback.reject(error);
}
}
/**
* Applies one remote event to the sync engine, notifying any views of the
* changes, and releasing any pending mutation batches that would become
* visible because of the snapshot version the remote event contains.
*/
async function syncEngineApplyRemoteEvent(syncEngine, remoteEvent) {
const syncEngineImpl = debugCast(syncEngine);
try {
const changes = await localStoreApplyRemoteEventToLocalCache(syncEngineImpl.localStore, remoteEvent);
// Update `receivedDocument` as appropriate for any limbo targets.
remoteEvent.targetChanges.forEach((targetChange, targetId) => {
const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
if (limboResolution) {
// Since this is a limbo resolution lookup, it's for a single document
// and it could be added, modified, or removed, but not a combination.
hardAssert(targetChange.addedDocuments.size +
targetChange.modifiedDocuments.size +
targetChange.removedDocuments.size <=
1);
if (targetChange.addedDocuments.size > 0) {
limboResolution.receivedDocument = true;
}
else if (targetChange.modifiedDocuments.size > 0) {
hardAssert(limboResolution.receivedDocument);
}
else if (targetChange.removedDocuments.size > 0) {
hardAssert(limboResolution.receivedDocument);
limboResolution.receivedDocument = false;
}
else {
// This was probably just a CURRENT targetChange or similar.
}
}
});
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, remoteEvent);
}
catch (error) {
await ignoreIfPrimaryLeaseLoss(error);
}
}
/**
* Applies an OnlineState change to the sync engine and notifies any views of
* the change.
*/
function syncEngineApplyOnlineStateChange(syncEngine, onlineState, source) {
const syncEngineImpl = debugCast(syncEngine);
// If we are the secondary client, we explicitly ignore the remote store's
// online state (the local client may go offline, even though the primary
// tab remains online) and only apply the primary tab's online state from
// SharedClientState.
if ((syncEngineImpl.isPrimaryClient &&
source === 0 /* OnlineStateSource.RemoteStore */) ||
(!syncEngineImpl.isPrimaryClient &&
source === 1 /* OnlineStateSource.SharedClientState */)) {
const newViewSnapshots = [];
syncEngineImpl.queryViewsByQuery.forEach((query, queryView) => {
const viewChange = queryView.view.applyOnlineStateChange(onlineState);
if (viewChange.snapshot) {
newViewSnapshots.push(viewChange.snapshot);
}
});
eventManagerOnOnlineStateChange(syncEngineImpl.eventManager, onlineState);
if (newViewSnapshots.length) {
syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
}
syncEngineImpl.onlineState = onlineState;
if (syncEngineImpl.isPrimaryClient) {
syncEngineImpl.sharedClientState.setOnlineState(onlineState);
}
}
}
/**
* Rejects the listen for the given targetID. This can be triggered by the
* backend for any active target.
*
* @param syncEngine - The sync engine implementation.
* @param targetId - The targetID corresponds to one previously initiated by the
* user as part of TargetData passed to listen() on RemoteStore.
* @param err - A description of the condition that has forced the rejection.
* Nearly always this will be an indication that the user is no longer
* authorized to see the data matching the target.
*/
async function syncEngineRejectListen(syncEngine, targetId, err) {
const syncEngineImpl = debugCast(syncEngine);
// PORTING NOTE: Multi-tab only.
syncEngineImpl.sharedClientState.updateQueryState(targetId, 'rejected', err);
const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
const limboKey = limboResolution && limboResolution.key;
if (limboKey) {
// TODO(klimt): We really only should do the following on permission
// denied errors, but we don't have the cause code here.
// It's a limbo doc. Create a synthetic event saying it was deleted.
// This is kind of a hack. Ideally, we would have a method in the local
// store to purge a document. However, it would be tricky to keep all of
// the local store's invariants with another method.
let documentUpdates = new SortedMap(DocumentKey.comparator);
// TODO(b/217189216): This limbo document should ideally have a read time,
// so that it is picked up by any read-time based scans. The backend,
// however, does not send a read time for target removals.
documentUpdates = documentUpdates.insert(limboKey, MutableDocument.newNoDocument(limboKey, SnapshotVersion.min()));
const resolvedLimboDocuments = documentKeySet().add(limboKey);
const event = new RemoteEvent(SnapshotVersion.min(),
/* targetChanges= */ new Map(),
/* targetMismatches= */ new SortedMap(primitiveComparator), documentUpdates, resolvedLimboDocuments);
await syncEngineApplyRemoteEvent(syncEngineImpl, event);
// Since this query failed, we won't want to manually unlisten to it.
// We only remove it from bookkeeping after we successfully applied the
// RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
// this query when the RemoteStore restarts the Watch stream, which should
// re-trigger the target failure.
syncEngineImpl.activeLimboTargetsByKey =
syncEngineImpl.activeLimboTargetsByKey.remove(limboKey);
syncEngineImpl.activeLimboResolutionsByTarget.delete(targetId);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
else {
await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ false)
.then(() => removeAndCleanupTarget(syncEngineImpl, targetId, err))
.catch(ignoreIfPrimaryLeaseLoss);
}
}
async function syncEngineApplySuccessfulWrite(syncEngine, mutationBatchResult) {
const syncEngineImpl = debugCast(syncEngine);
const batchId = mutationBatchResult.batch.batchId;
try {
const changes = await localStoreAcknowledgeBatch(syncEngineImpl.localStore, mutationBatchResult);
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught
// up), so we raise user callbacks first so that they consistently happen
// before listen events.
processUserCallback(syncEngineImpl, batchId, /*error=*/ null);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
syncEngineImpl.sharedClientState.updateMutationState(batchId, 'acknowledged');
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes);
}
catch (error) {
await ignoreIfPrimaryLeaseLoss(error);
}
}
async function syncEngineRejectFailedWrite(syncEngine, batchId, error) {
const syncEngineImpl = debugCast(syncEngine);
try {
const changes = await localStoreRejectBatch(syncEngineImpl.localStore, batchId);
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught up),
// so we raise user callbacks first so that they consistently happen before
// listen events.
processUserCallback(syncEngineImpl, batchId, error);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
syncEngineImpl.sharedClientState.updateMutationState(batchId, 'rejected', error);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes);
}
catch (error) {
await ignoreIfPrimaryLeaseLoss(error);
}
}
/**
* Registers a user callback that resolves when all pending mutations at the moment of calling
* are acknowledged .
*/
async function syncEngineRegisterPendingWritesCallback(syncEngine, callback) {
const syncEngineImpl = debugCast(syncEngine);
if (!canUseNetwork(syncEngineImpl.remoteStore)) {
logDebug(LOG_TAG$3, 'The network is disabled. The task returned by ' +
"'awaitPendingWrites()' will not complete until the network is enabled.");
}
try {
const highestBatchId = await localStoreGetHighestUnacknowledgedBatchId(syncEngineImpl.localStore);
if (highestBatchId === BATCHID_UNKNOWN) {
// Trigger the callback right away if there is no pending writes at the moment.
callback.resolve();
return;
}
const callbacks = syncEngineImpl.pendingWritesCallbacks.get(highestBatchId) || [];
callbacks.push(callback);
syncEngineImpl.pendingWritesCallbacks.set(highestBatchId, callbacks);
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, 'Initialization of waitForPendingWrites() operation failed');
callback.reject(firestoreError);
}
}
/**
* Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
* if there are any.
*/
function triggerPendingWritesCallbacks(syncEngineImpl, batchId) {
(syncEngineImpl.pendingWritesCallbacks.get(batchId) || []).forEach(callback => {
callback.resolve();
});
syncEngineImpl.pendingWritesCallbacks.delete(batchId);
}
/** Reject all outstanding callbacks waiting for pending writes to complete. */
function rejectOutstandingPendingWritesCallbacks(syncEngineImpl, errorMessage) {
syncEngineImpl.pendingWritesCallbacks.forEach(callbacks => {
callbacks.forEach(callback => {
callback.reject(new FirestoreError(Code.CANCELLED, errorMessage));
});
});
syncEngineImpl.pendingWritesCallbacks.clear();
}
function addMutationCallback(syncEngineImpl, batchId, callback) {
let newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
if (!newCallbacks) {
newCallbacks = new SortedMap(primitiveComparator);
}
newCallbacks = newCallbacks.insert(batchId, callback);
syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] =
newCallbacks;
}
/**
* Resolves or rejects the user callback for the given batch and then discards
* it.
*/
function processUserCallback(syncEngine, batchId, error) {
const syncEngineImpl = debugCast(syncEngine);
let newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
// NOTE: Mutations restored from persistence won't have callbacks, so it's
// okay for there to be no callback for this ID.
if (newCallbacks) {
const callback = newCallbacks.get(batchId);
if (callback) {
if (error) {
callback.reject(error);
}
else {
callback.resolve();
}
newCallbacks = newCallbacks.remove(batchId);
}
syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] =
newCallbacks;
}
}
function removeAndCleanupTarget(syncEngineImpl, targetId, error = null) {
syncEngineImpl.sharedClientState.removeLocalQueryTarget(targetId);
for (const query of syncEngineImpl.queriesByTarget.get(targetId)) {
syncEngineImpl.queryViewsByQuery.delete(query);
if (error) {
syncEngineImpl.syncEngineListener.onWatchError(query, error);
}
}
syncEngineImpl.queriesByTarget.delete(targetId);
if (syncEngineImpl.isPrimaryClient) {
const limboKeys = syncEngineImpl.limboDocumentRefs.removeReferencesForId(targetId);
limboKeys.forEach(limboKey => {
const isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboKey);
if (!isReferenced) {
// We removed the last reference for this key
removeLimboTarget(syncEngineImpl, limboKey);
}
});
}
}
function removeLimboTarget(syncEngineImpl, key) {
syncEngineImpl.enqueuedLimboResolutions.delete(key.path.canonicalString());
// It's possible that the target already got removed because the query failed. In that case,
// the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
const limboTargetId = syncEngineImpl.activeLimboTargetsByKey.get(key);
if (limboTargetId === null) {
// This target already got removed, because the query failed.
return;
}
remoteStoreUnlisten(syncEngineImpl.remoteStore, limboTargetId);
syncEngineImpl.activeLimboTargetsByKey =
syncEngineImpl.activeLimboTargetsByKey.remove(key);
syncEngineImpl.activeLimboResolutionsByTarget.delete(limboTargetId);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
function updateTrackedLimbos(syncEngineImpl, targetId, limboChanges) {
for (const limboChange of limboChanges) {
if (limboChange instanceof AddedLimboDocument) {
syncEngineImpl.limboDocumentRefs.addReference(limboChange.key, targetId);
trackLimboChange(syncEngineImpl, limboChange);
}
else if (limboChange instanceof RemovedLimboDocument) {
logDebug(LOG_TAG$3, 'Document no longer in limbo: ' + limboChange.key);
syncEngineImpl.limboDocumentRefs.removeReference(limboChange.key, targetId);
const isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboChange.key);
if (!isReferenced) {
// We removed the last reference for this key
removeLimboTarget(syncEngineImpl, limboChange.key);
}
}
else {
fail();
}
}
}
function trackLimboChange(syncEngineImpl, limboChange) {
const key = limboChange.key;
const keyString = key.path.canonicalString();
if (!syncEngineImpl.activeLimboTargetsByKey.get(key) &&
!syncEngineImpl.enqueuedLimboResolutions.has(keyString)) {
logDebug(LOG_TAG$3, 'New document in limbo: ' + key);
syncEngineImpl.enqueuedLimboResolutions.add(keyString);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
}
/**
* Starts listens for documents in limbo that are enqueued for resolution,
* subject to a maximum number of concurrent resolutions.
*
* Without bounding the number of concurrent resolutions, the server can fail
* with "resource exhausted" errors which can lead to pathological client
* behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
*/
function pumpEnqueuedLimboResolutions(syncEngineImpl) {
while (syncEngineImpl.enqueuedLimboResolutions.size > 0 &&
syncEngineImpl.activeLimboTargetsByKey.size <
syncEngineImpl.maxConcurrentLimboResolutions) {
const keyString = syncEngineImpl.enqueuedLimboResolutions
.values()
.next().value;
syncEngineImpl.enqueuedLimboResolutions.delete(keyString);
const key = new DocumentKey(ResourcePath.fromString(keyString));
const limboTargetId = syncEngineImpl.limboTargetIdGenerator.next();
syncEngineImpl.activeLimboResolutionsByTarget.set(limboTargetId, new LimboResolution(key));
syncEngineImpl.activeLimboTargetsByKey =
syncEngineImpl.activeLimboTargetsByKey.insert(key, limboTargetId);
remoteStoreListen(syncEngineImpl.remoteStore, new TargetData(queryToTarget(newQueryForPath(key.path)), limboTargetId, "TargetPurposeLimboResolution" /* TargetPurpose.LimboResolution */, ListenSequence.INVALID));
}
}
async function syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, changes, remoteEvent) {
const syncEngineImpl = debugCast(syncEngine);
const newSnaps = [];
const docChangesInAllViews = [];
const queriesProcessed = [];
if (syncEngineImpl.queryViewsByQuery.isEmpty()) {
// Return early since `onWatchChange()` might not have been assigned yet.
return;
}
syncEngineImpl.queryViewsByQuery.forEach((_, queryView) => {
queriesProcessed.push(syncEngineImpl
.applyDocChanges(queryView, changes, remoteEvent)
.then(viewSnapshot => {
// If there are changes, or we are handling a global snapshot, notify
// secondary clients to update query state.
if (viewSnapshot || remoteEvent) {
if (syncEngineImpl.isPrimaryClient) {
syncEngineImpl.sharedClientState.updateQueryState(queryView.targetId, (viewSnapshot === null || viewSnapshot === void 0 ? void 0 : viewSnapshot.fromCache) ? 'not-current' : 'current');
}
}
// Update views if there are actual changes.
if (!!viewSnapshot) {
newSnaps.push(viewSnapshot);
const docChanges = LocalViewChanges.fromSnapshot(queryView.targetId, viewSnapshot);
docChangesInAllViews.push(docChanges);
}
}));
});
await Promise.all(queriesProcessed);
syncEngineImpl.syncEngineListener.onWatchChange(newSnaps);
await localStoreNotifyLocalViewChanges(syncEngineImpl.localStore, docChangesInAllViews);
}
async function applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent) {
let viewDocChanges = queryView.view.computeDocChanges(changes);
if (viewDocChanges.needsRefill) {
// The query has a limit and some docs were removed, so we need
// to re-run the query against the local store to make sure we
// didn't lose any good docs that had been past the limit.
viewDocChanges = await localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
/* usePreviousResults= */ false).then(({ documents }) => {
return queryView.view.computeDocChanges(documents, viewDocChanges);
});
}
const targetChange = remoteEvent && remoteEvent.targetChanges.get(queryView.targetId);
const targetIsPendingReset = remoteEvent && remoteEvent.targetMismatches.get(queryView.targetId) != null;
const viewChange = queryView.view.applyChanges(viewDocChanges,
/* limboResolutionEnabled= */ syncEngineImpl.isPrimaryClient, targetChange, targetIsPendingReset);
updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewChange.limboChanges);
return viewChange.snapshot;
}
async function syncEngineHandleCredentialChange(syncEngine, user) {
const syncEngineImpl = debugCast(syncEngine);
const userChanged = !syncEngineImpl.currentUser.isEqual(user);
if (userChanged) {
logDebug(LOG_TAG$3, 'User change. New user:', user.toKey());
const result = await localStoreHandleUserChange(syncEngineImpl.localStore, user);
syncEngineImpl.currentUser = user;
// Fails tasks waiting for pending writes requested by previous user.
rejectOutstandingPendingWritesCallbacks(syncEngineImpl, "'waitForPendingWrites' promise is rejected due to a user change.");
// TODO(b/114226417): Consider calling this only in the primary tab.
syncEngineImpl.sharedClientState.handleUserChange(user, result.removedBatchIds, result.addedBatchIds);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.affectedDocuments);
}
}
function syncEngineGetRemoteKeysForTarget(syncEngine, targetId) {
const syncEngineImpl = debugCast(syncEngine);
const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
if (limboResolution && limboResolution.receivedDocument) {
return documentKeySet().add(limboResolution.key);
}
else {
let keySet = documentKeySet();
const queries = syncEngineImpl.queriesByTarget.get(targetId);
if (!queries) {
return keySet;
}
for (const query of queries) {
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
keySet = keySet.unionWith(queryView.view.syncedDocuments);
}
return keySet;
}
}
/**
* Reconcile the list of synced documents in an existing view with those
* from persistence.
*/
async function synchronizeViewAndComputeSnapshot(syncEngine, queryView) {
const syncEngineImpl = debugCast(syncEngine);
const queryResult = await localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
/* usePreviousResults= */ true);
const viewSnapshot = queryView.view.synchronizeWithPersistedState(queryResult);
if (syncEngineImpl.isPrimaryClient) {
updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewSnapshot.limboChanges);
}
return viewSnapshot;
}
/**
* Retrieves newly changed documents from remote document cache and raises
* snapshots if needed.
*/
// PORTING NOTE: Multi-Tab only.
async function syncEngineSynchronizeWithChangedDocuments(syncEngine, collectionGroup) {
const syncEngineImpl = debugCast(syncEngine);
return localStoreGetNewDocumentChanges(syncEngineImpl.localStore, collectionGroup).then(changes => syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes));
}
/** Applies a mutation state to an existing batch. */
// PORTING NOTE: Multi-Tab only.
async function syncEngineApplyBatchState(syncEngine, batchId, batchState, error) {
const syncEngineImpl = debugCast(syncEngine);
const documents = await localStoreLookupMutationDocuments(syncEngineImpl.localStore, batchId);
if (documents === null) {
// A throttled tab may not have seen the mutation before it was completed
// and removed from the mutation queue, in which case we won't have cached
// the affected documents. In this case we can safely ignore the update
// since that means we didn't apply the mutation locally at all (if we
// had, we would have cached the affected documents), and so we will just
// see any resulting document changes via normal remote document updates
// as applicable.
logDebug(LOG_TAG$3, 'Cannot apply mutation batch with id: ' + batchId);
return;
}
if (batchState === 'pending') {
// If we are the primary client, we need to send this write to the
// backend. Secondary clients will ignore these writes since their remote
// connection is disabled.
await fillWritePipeline(syncEngineImpl.remoteStore);
}
else if (batchState === 'acknowledged' || batchState === 'rejected') {
// NOTE: Both these methods are no-ops for batches that originated from
// other clients.
processUserCallback(syncEngineImpl, batchId, error ? error : null);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
localStoreRemoveCachedMutationBatchMetadata(syncEngineImpl.localStore, batchId);
}
else {
fail();
}
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, documents);
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
async function syncEngineApplyPrimaryState(syncEngine, isPrimary) {
const syncEngineImpl = debugCast(syncEngine);
ensureWatchCallbacks(syncEngineImpl);
syncEngineEnsureWriteCallbacks(syncEngineImpl);
if (isPrimary === true && syncEngineImpl._isPrimaryClient !== true) {
// Secondary tabs only maintain Views for their local listeners and the
// Views internal state may not be 100% populated (in particular
// secondary tabs don't track syncedDocuments, the set of documents the
// server considers to be in the target). So when a secondary becomes
// primary, we need to need to make sure that all views for all targets
// match the state on disk.
const activeTargets = syncEngineImpl.sharedClientState.getAllActiveQueryTargets();
const activeQueries = await synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets.toArray());
syncEngineImpl._isPrimaryClient = true;
await remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, true);
for (const targetData of activeQueries) {
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
}
else if (isPrimary === false && syncEngineImpl._isPrimaryClient !== false) {
const activeTargets = [];
let p = Promise.resolve();
syncEngineImpl.queriesByTarget.forEach((_, targetId) => {
if (syncEngineImpl.sharedClientState.isLocalQueryTarget(targetId)) {
activeTargets.push(targetId);
}
else {
p = p.then(() => {
removeAndCleanupTarget(syncEngineImpl, targetId);
return localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/*keepPersistedTargetData=*/ true);
});
}
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
});
await p;
await synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets);
resetLimboDocuments(syncEngineImpl);
syncEngineImpl._isPrimaryClient = false;
await remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, false);
}
}
// PORTING NOTE: Multi-Tab only.
function resetLimboDocuments(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.activeLimboResolutionsByTarget.forEach((_, targetId) => {
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
});
syncEngineImpl.limboDocumentRefs.removeAllReferences();
syncEngineImpl.activeLimboResolutionsByTarget = new Map();
syncEngineImpl.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
}
/**
* Reconcile the query views of the provided query targets with the state from
* persistence. Raises snapshots for any changes that affect the local
* client and returns the updated state of all target's query data.
*
* @param syncEngine - The sync engine implementation
* @param targets - the list of targets with views that need to be recomputed
* @param transitionToPrimary - `true` iff the tab transitions from a secondary
* tab to a primary tab
*/
// PORTING NOTE: Multi-Tab only.
async function synchronizeQueryViewsAndRaiseSnapshots(syncEngine, targets, transitionToPrimary) {
const syncEngineImpl = debugCast(syncEngine);
const activeQueries = [];
const newViewSnapshots = [];
for (const targetId of targets) {
let targetData;
const queries = syncEngineImpl.queriesByTarget.get(targetId);
if (queries && queries.length !== 0) {
// For queries that have a local View, we fetch their current state
// from LocalStore (as the resume token and the snapshot version
// might have changed) and reconcile their views with the persisted
// state (the list of syncedDocuments may have gotten out of sync).
targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(queries[0]));
for (const query of queries) {
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
const viewChange = await synchronizeViewAndComputeSnapshot(syncEngineImpl, queryView);
if (viewChange.snapshot) {
newViewSnapshots.push(viewChange.snapshot);
}
}
}
else {
// For queries that never executed on this client, we need to
// allocate the target in LocalStore and initialize a new View.
const target = await localStoreGetCachedTarget(syncEngineImpl.localStore, targetId);
targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, target);
await initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetId,
/*current=*/ false, targetData.resumeToken);
}
activeQueries.push(targetData);
}
syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
return activeQueries;
}
/**
* Creates a `Query` object from the specified `Target`. There is no way to
* obtain the original `Query`, so we synthesize a `Query` from the `Target`
* object.
*
* The synthesized result might be different from the original `Query`, but
* since the synthesized `Query` should return the same results as the
* original one (only the presentation of results might differ), the potential
* difference will not cause issues.
*/
// PORTING NOTE: Multi-Tab only.
function synthesizeTargetToQuery(target) {
return newQuery(target.path, target.collectionGroup, target.orderBy, target.filters, target.limit, "F" /* LimitType.First */, target.startAt, target.endAt);
}
/** Returns the IDs of the clients that are currently active. */
// PORTING NOTE: Multi-Tab only.
function syncEngineGetActiveClients(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
return localStoreGetActiveClients(syncEngineImpl.localStore);
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
async function syncEngineApplyTargetState(syncEngine, targetId, state, error) {
const syncEngineImpl = debugCast(syncEngine);
if (syncEngineImpl._isPrimaryClient) {
// If we receive a target state notification via WebStorage, we are
// either already secondary or another tab has taken the primary lease.
logDebug(LOG_TAG$3, 'Ignoring unexpected query state notification.');
return;
}
const query = syncEngineImpl.queriesByTarget.get(targetId);
if (query && query.length > 0) {
switch (state) {
case 'current':
case 'not-current': {
const changes = await localStoreGetNewDocumentChanges(syncEngineImpl.localStore, queryCollectionGroup(query[0]));
const synthesizedRemoteEvent = RemoteEvent.createSynthesizedRemoteEventForCurrentChange(targetId, state === 'current', ByteString.EMPTY_BYTE_STRING);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, synthesizedRemoteEvent);
break;
}
case 'rejected': {
await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ true);
removeAndCleanupTarget(syncEngineImpl, targetId, error);
break;
}
default:
fail();
}
}
}
/** Adds or removes Watch targets for queries from different tabs. */
async function syncEngineApplyActiveTargetsChange(syncEngine, added, removed) {
const syncEngineImpl = ensureWatchCallbacks(syncEngine);
if (!syncEngineImpl._isPrimaryClient) {
return;
}
for (const targetId of added) {
if (syncEngineImpl.queriesByTarget.has(targetId)) {
// A target might have been added in a previous attempt
logDebug(LOG_TAG$3, 'Adding an already active target ' + targetId);
continue;
}
const target = await localStoreGetCachedTarget(syncEngineImpl.localStore, targetId);
const targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, target);
await initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetData.targetId,
/*current=*/ false, targetData.resumeToken);
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
for (const targetId of removed) {
// Check that the target is still active since the target might have been
// removed if it has been rejected by the backend.
if (!syncEngineImpl.queriesByTarget.has(targetId)) {
continue;
}
// Release queries that are still active.
await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ false)
.then(() => {
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
removeAndCleanupTarget(syncEngineImpl, targetId);
})
.catch(ignoreIfPrimaryLeaseLoss);
}
}
function ensureWatchCallbacks(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.remoteStore.remoteSyncer.applyRemoteEvent =
syncEngineApplyRemoteEvent.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.getRemoteKeysForTarget =
syncEngineGetRemoteKeysForTarget.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.rejectListen =
syncEngineRejectListen.bind(null, syncEngineImpl);
syncEngineImpl.syncEngineListener.onWatchChange =
eventManagerOnWatchChange.bind(null, syncEngineImpl.eventManager);
syncEngineImpl.syncEngineListener.onWatchError =
eventManagerOnWatchError.bind(null, syncEngineImpl.eventManager);
return syncEngineImpl;
}
function syncEngineEnsureWriteCallbacks(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.remoteStore.remoteSyncer.applySuccessfulWrite =
syncEngineApplySuccessfulWrite.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.rejectFailedWrite =
syncEngineRejectFailedWrite.bind(null, syncEngineImpl);
return syncEngineImpl;
}
/**
* Loads a Firestore bundle into the SDK. The returned promise resolves when
* the bundle finished loading.
*
* @param syncEngine - SyncEngine to use.
* @param bundleReader - Bundle to load into the SDK.
* @param task - LoadBundleTask used to update the loading progress to public API.
*/
function syncEngineLoadBundle(syncEngine, bundleReader, task) {
const syncEngineImpl = debugCast(syncEngine);
// eslint-disable-next-line @typescript-eslint/no-floating-promises
loadBundleImpl(syncEngineImpl, bundleReader, task).then(collectionGroups => {
syncEngineImpl.sharedClientState.notifyBundleLoaded(collectionGroups);
});
}
/** Loads a bundle and returns the list of affected collection groups. */
async function loadBundleImpl(syncEngine, reader, task) {
try {
const metadata = await reader.getMetadata();
const skip = await localStoreHasNewerBundle(syncEngine.localStore, metadata);
if (skip) {
await reader.close();
task._completeWith(bundleSuccessProgress(metadata));
return Promise.resolve(new Set());
}
task._updateProgress(bundleInitialProgress(metadata));
const loader = new BundleLoader(metadata, syncEngine.localStore, reader.serializer);
let element = await reader.nextElement();
while (element) {
;
const progress = await loader.addSizedElement(element);
if (progress) {
task._updateProgress(progress);
}
element = await reader.nextElement();
}
const result = await loader.complete();
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, result.changedDocs,
/* remoteEvent */ undefined);
// Save metadata, so loading the same bundle will skip.
await localStoreSaveBundle(syncEngine.localStore, metadata);
task._completeWith(result.progress);
return Promise.resolve(result.changedCollectionGroups);
}
catch (e) {
logWarn(LOG_TAG$3, `Loading bundle failed with ${e}`);
task._failWith(e);
return Promise.resolve(new Set());
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides all components needed for Firestore with in-memory persistence.
* Uses EagerGC garbage collection.
*/
class MemoryOfflineComponentProvider {
constructor() {
this.synchronizeTabs = false;
}
async initialize(cfg) {
this.serializer = newSerializer(cfg.databaseInfo.databaseId);
this.sharedClientState = this.createSharedClientState(cfg);
this.persistence = this.createPersistence(cfg);
await this.persistence.start();
this.localStore = this.createLocalStore(cfg);
this.gcScheduler = this.createGarbageCollectionScheduler(cfg, this.localStore);
this.indexBackfillerScheduler = this.createIndexBackfillerScheduler(cfg, this.localStore);
}
createGarbageCollectionScheduler(cfg, localStore) {
return null;
}
createIndexBackfillerScheduler(cfg, localStore) {
return null;
}
createLocalStore(cfg) {
return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
}
createPersistence(cfg) {
return new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer);
}
createSharedClientState(cfg) {
return new MemorySharedClientState();
}
async terminate() {
var _a, _b;
(_a = this.gcScheduler) === null || _a === void 0 ? void 0 : _a.stop();
(_b = this.indexBackfillerScheduler) === null || _b === void 0 ? void 0 : _b.stop();
this.sharedClientState.shutdown();
await this.persistence.shutdown();
}
}
class LruGcMemoryOfflineComponentProvider extends MemoryOfflineComponentProvider {
constructor(cacheSizeBytes) {
super();
this.cacheSizeBytes = cacheSizeBytes;
}
createGarbageCollectionScheduler(cfg, localStore) {
hardAssert(this.persistence.referenceDelegate instanceof MemoryLruDelegate);
const garbageCollector = this.persistence.referenceDelegate.garbageCollector;
return new LruScheduler(garbageCollector, cfg.asyncQueue, localStore);
}
createPersistence(cfg) {
const lruParams = this.cacheSizeBytes !== undefined
? LruParams.withCacheSize(this.cacheSizeBytes)
: LruParams.DEFAULT;
return new MemoryPersistence(p => MemoryLruDelegate.factory(p, lruParams), this.serializer);
}
}
/**
* Provides all components needed for Firestore with IndexedDB persistence.
*/
class IndexedDbOfflineComponentProvider extends MemoryOfflineComponentProvider {
constructor(onlineComponentProvider, cacheSizeBytes, forceOwnership) {
super();
this.onlineComponentProvider = onlineComponentProvider;
this.cacheSizeBytes = cacheSizeBytes;
this.forceOwnership = forceOwnership;
this.synchronizeTabs = false;
}
async initialize(cfg) {
await super.initialize(cfg);
await this.onlineComponentProvider.initialize(this, cfg);
// Enqueue writes from a previous session
await syncEngineEnsureWriteCallbacks(this.onlineComponentProvider.syncEngine);
await fillWritePipeline(this.onlineComponentProvider.remoteStore);
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
await this.persistence.setPrimaryStateListener(() => {
if (this.gcScheduler && !this.gcScheduler.started) {
this.gcScheduler.start();
}
if (this.indexBackfillerScheduler &&
!this.indexBackfillerScheduler.started) {
this.indexBackfillerScheduler.start();
}
return Promise.resolve();
});
}
createLocalStore(cfg) {
return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
}
createGarbageCollectionScheduler(cfg, localStore) {
const garbageCollector = this.persistence.referenceDelegate.garbageCollector;
return new LruScheduler(garbageCollector, cfg.asyncQueue, localStore);
}
createIndexBackfillerScheduler(cfg, localStore) {
const indexBackfiller = new IndexBackfiller(localStore, this.persistence);
return new IndexBackfillerScheduler(cfg.asyncQueue, indexBackfiller);
}
createPersistence(cfg) {
const persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
const lruParams = this.cacheSizeBytes !== undefined
? LruParams.withCacheSize(this.cacheSizeBytes)
: LruParams.DEFAULT;
return new IndexedDbPersistence(this.synchronizeTabs, persistenceKey, cfg.clientId, lruParams, cfg.asyncQueue, getWindow(), getDocument(), this.serializer, this.sharedClientState, !!this.forceOwnership);
}
createSharedClientState(cfg) {
return new MemorySharedClientState();
}
}
/**
* Provides all components needed for Firestore with multi-tab IndexedDB
* persistence.
*
* In the legacy client, this provider is used to provide both multi-tab and
* non-multi-tab persistence since we cannot tell at build time whether
* `synchronizeTabs` will be enabled.
*/
class MultiTabOfflineComponentProvider extends IndexedDbOfflineComponentProvider {
constructor(onlineComponentProvider, cacheSizeBytes) {
super(onlineComponentProvider, cacheSizeBytes, /* forceOwnership= */ false);
this.onlineComponentProvider = onlineComponentProvider;
this.cacheSizeBytes = cacheSizeBytes;
this.synchronizeTabs = true;
}
async initialize(cfg) {
await super.initialize(cfg);
const syncEngine = this.onlineComponentProvider.syncEngine;
if (this.sharedClientState instanceof WebStorageSharedClientState) {
this.sharedClientState.syncEngine = {
applyBatchState: syncEngineApplyBatchState.bind(null, syncEngine),
applyTargetState: syncEngineApplyTargetState.bind(null, syncEngine),
applyActiveTargetsChange: syncEngineApplyActiveTargetsChange.bind(null, syncEngine),
getActiveClients: syncEngineGetActiveClients.bind(null, syncEngine),
synchronizeWithChangedDocuments: syncEngineSynchronizeWithChangedDocuments.bind(null, syncEngine)
};
await this.sharedClientState.start();
}
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
await this.persistence.setPrimaryStateListener(async (isPrimary) => {
await syncEngineApplyPrimaryState(this.onlineComponentProvider.syncEngine, isPrimary);
if (this.gcScheduler) {
if (isPrimary && !this.gcScheduler.started) {
this.gcScheduler.start();
}
else if (!isPrimary) {
this.gcScheduler.stop();
}
}
if (this.indexBackfillerScheduler) {
if (isPrimary && !this.indexBackfillerScheduler.started) {
this.indexBackfillerScheduler.start();
}
else if (!isPrimary) {
this.indexBackfillerScheduler.stop();
}
}
});
}
createSharedClientState(cfg) {
const window = getWindow();
if (!WebStorageSharedClientState.isAvailable(window)) {
throw new FirestoreError(Code.UNIMPLEMENTED, 'IndexedDB persistence is only available on platforms that support LocalStorage.');
}
const persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
return new WebStorageSharedClientState(window, cfg.asyncQueue, persistenceKey, cfg.clientId, cfg.initialUser);
}
}
/**
* Initializes and wires the components that are needed to interface with the
* network.
*/
class OnlineComponentProvider {
async initialize(offlineComponentProvider, cfg) {
if (this.localStore) {
// OnlineComponentProvider may get initialized multiple times if
// multi-tab persistence is used.
return;
}
this.localStore = offlineComponentProvider.localStore;
this.sharedClientState = offlineComponentProvider.sharedClientState;
this.datastore = this.createDatastore(cfg);
this.remoteStore = this.createRemoteStore(cfg);
this.eventManager = this.createEventManager(cfg);
this.syncEngine = this.createSyncEngine(cfg,
/* startAsPrimary=*/ !offlineComponentProvider.synchronizeTabs);
this.sharedClientState.onlineStateHandler = onlineState => syncEngineApplyOnlineStateChange(this.syncEngine, onlineState, 1 /* OnlineStateSource.SharedClientState */);
this.remoteStore.remoteSyncer.handleCredentialChange =
syncEngineHandleCredentialChange.bind(null, this.syncEngine);
await remoteStoreApplyPrimaryState(this.remoteStore, this.syncEngine.isPrimaryClient);
}
createEventManager(cfg) {
return newEventManager();
}
createDatastore(cfg) {
const serializer = newSerializer(cfg.databaseInfo.databaseId);
const connection = newConnection(cfg.databaseInfo);
return newDatastore(cfg.authCredentials, cfg.appCheckCredentials, connection, serializer);
}
createRemoteStore(cfg) {
return newRemoteStore(this.localStore, this.datastore, cfg.asyncQueue, onlineState => syncEngineApplyOnlineStateChange(this.syncEngine, onlineState, 0 /* OnlineStateSource.RemoteStore */), newConnectivityMonitor());
}
createSyncEngine(cfg, startAsPrimary) {
return newSyncEngine(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, cfg.initialUser, cfg.maxConcurrentLimboResolutions, startAsPrimary);
}
async terminate() {
var _a;
await remoteStoreShutdown(this.remoteStore);
(_a = this.datastore) === null || _a === void 0 ? void 0 : _a.terminate();
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* How many bytes to read each time when `ReadableStreamReader.read()` is
* called. Only applicable for byte streams that we control (e.g. those backed
* by an UInt8Array).
*/
const DEFAULT_BYTES_PER_READ = 10240;
/**
* Builds a `ByteStreamReader` from a UInt8Array.
* @param source - The data source to use.
* @param bytesPerRead - How many bytes each `read()` from the returned reader
* will read.
*/
function toByteStreamReaderHelper(source, bytesPerRead = DEFAULT_BYTES_PER_READ) {
let readFrom = 0;
// The TypeScript definition for ReadableStreamReader changed. We use
// `any` here to allow this code to compile with different versions.
// See https://github.com/microsoft/TypeScript/issues/42970
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const reader = {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async read() {
if (readFrom < source.byteLength) {
const result = {
value: source.slice(readFrom, readFrom + bytesPerRead),
done: false
};
readFrom += bytesPerRead;
return result;
}
return { done: true };
},
async cancel() { },
releaseLock() { },
closed: Promise.resolve()
};
return reader;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function validateNonEmptyArgument(functionName, argumentName, argument) {
if (!argument) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() cannot be called with an empty ${argumentName}.`);
}
}
/**
* Validates that two boolean options are not set at the same time.
* @internal
*/
function validateIsNotUsedTogether(optionName1, argument1, optionName2, argument2) {
if (argument1 === true && argument2 === true) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `${optionName1} and ${optionName2} cannot be used together.`);
}
}
/**
* Validates that `path` refers to a document (indicated by the fact it contains
* an even numbers of segments).
*/
function validateDocumentPath(path) {
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid document reference. Document references must have an even number of segments, but ${path} has ${path.length}.`);
}
}
/**
* Validates that `path` refers to a collection (indicated by the fact it
* contains an odd numbers of segments).
*/
function validateCollectionPath(path) {
if (DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid collection reference. Collection references must have an odd number of segments, but ${path} has ${path.length}.`);
}
}
/**
* Returns true if it's a non-null object without a custom prototype
* (i.e. excludes Array, Date, etc.).
*/
function isPlainObject(input) {
return (typeof input === 'object' &&
input !== null &&
(Object.getPrototypeOf(input) === Object.prototype ||
Object.getPrototypeOf(input) === null));
}
/** Returns a string describing the type / value of the provided input. */
function valueDescription(input) {
if (input === undefined) {
return 'undefined';
}
else if (input === null) {
return 'null';
}
else if (typeof input === 'string') {
if (input.length > 20) {
input = `${input.substring(0, 20)}...`;
}
return JSON.stringify(input);
}
else if (typeof input === 'number' || typeof input === 'boolean') {
return '' + input;
}
else if (typeof input === 'object') {
if (input instanceof Array) {
return 'an array';
}
else {
const customObjectName = tryGetCustomObjectType(input);
if (customObjectName) {
return `a custom ${customObjectName} object`;
}
else {
return 'an object';
}
}
}
else if (typeof input === 'function') {
return 'a function';
}
else {
return fail();
}
}
/** try to get the constructor name for an object. */
function tryGetCustomObjectType(input) {
if (input.constructor) {
return input.constructor.name;
}
return null;
}
/**
* Casts `obj` to `T`, optionally unwrapping Compat types to expose the
* underlying instance. Throws if `obj` is not an instance of `T`.
*
* This cast is used in the Lite and Full SDK to verify instance types for
* arguments passed to the public API.
* @internal
*/
function cast(obj,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
constructor) {
if ('_delegate' in obj) {
// Unwrap Compat types
// eslint-disable-next-line @typescript-eslint/no-explicit-any
obj = obj._delegate;
}
if (!(obj instanceof constructor)) {
if (constructor.name === obj.constructor.name) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Type does not match the expected instance. Did you pass a ' +
`reference from a different Firestore SDK?`);
}
else {
const description = valueDescription(obj);
throw new FirestoreError(Code.INVALID_ARGUMENT, `Expected type '${constructor.name}', but it was: ${description}`);
}
}
return obj;
}
function validatePositiveNumber(functionName, n) {
if (n <= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() requires a positive number, but it was: ${n}.`);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* On Node, only supported data source is a `Uint8Array` for now.
*/
function toByteStreamReader(source, bytesPerRead) {
if (!(source instanceof Uint8Array)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `NodePlatform.toByteStreamReader expects source to be Uint8Array, got ${valueDescription(source)}`);
}
return toByteStreamReaderHelper(source, bytesPerRead);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* A wrapper implementation of Observer<T> that will dispatch events
* asynchronously. To allow immediate silencing, a mute call is added which
* causes events scheduled to no longer be raised.
*/
class AsyncObserver {
constructor(observer) {
this.observer = observer;
/**
* When set to true, will not raise future events. Necessary to deal with
* async detachment of listener.
*/
this.muted = false;
}
next(value) {
if (this.observer.next) {
this.scheduleEvent(this.observer.next, value);
}
}
error(error) {
if (this.observer.error) {
this.scheduleEvent(this.observer.error, error);
}
else {
logError('Uncaught Error in snapshot listener:', error.toString());
}
}
mute() {
this.muted = true;
}
scheduleEvent(eventHandler, event) {
if (!this.muted) {
setTimeout(() => {
if (!this.muted) {
eventHandler(event);
}
}, 0);
}
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A complete element in the bundle stream, together with the byte length it
* occupies in the stream.
*/
class SizedBundleElement {
constructor(payload,
// How many bytes this element takes to store in the bundle.
byteLength) {
this.payload = payload;
this.byteLength = byteLength;
}
isBundleMetadata() {
return 'metadata' in this.payload;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A class representing a bundle.
*
* Takes a bundle stream or buffer, and presents abstractions to read bundled
* elements out of the underlying content.
*/
class BundleReaderImpl {
constructor(
/** The reader to read from underlying binary bundle data source. */
reader, serializer) {
this.reader = reader;
this.serializer = serializer;
/** Cached bundle metadata. */
this.metadata = new Deferred();
/**
* Internal buffer to hold bundle content, accumulating incomplete element
* content.
*/
this.buffer = new Uint8Array();
this.textDecoder = newTextDecoder();
// Read the metadata (which is the first element).
this.nextElementImpl().then(element => {
if (element && element.isBundleMetadata()) {
this.metadata.resolve(element.payload.metadata);
}
else {
this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is
${JSON.stringify(element === null || element === void 0 ? void 0 : element.payload)}`));
}
}, error => this.metadata.reject(error));
}
close() {
return this.reader.cancel();
}
async getMetadata() {
return this.metadata.promise;
}
async nextElement() {
// Makes sure metadata is read before proceeding.
await this.getMetadata();
return this.nextElementImpl();
}
/**
* Reads from the head of internal buffer, and pulling more data from
* underlying stream if a complete element cannot be found, until an
* element(including the prefixed length and the JSON string) is found.
*
* Once a complete element is read, it is dropped from internal buffer.
*
* Returns either the bundled element, or null if we have reached the end of
* the stream.
*/
async nextElementImpl() {
const lengthBuffer = await this.readLength();
if (lengthBuffer === null) {
return null;
}
const lengthString = this.textDecoder.decode(lengthBuffer);
const length = Number(lengthString);
if (isNaN(length)) {
this.raiseError(`length string (${lengthString}) is not valid number`);
}
const jsonString = await this.readJsonString(length);
return new SizedBundleElement(JSON.parse(jsonString), lengthBuffer.length + length);
}
/** First index of '{' from the underlying buffer. */
indexOfOpenBracket() {
return this.buffer.findIndex(v => v === '{'.charCodeAt(0));
}
/**
* Reads from the beginning of the internal buffer, until the first '{', and
* return the content.
*
* If reached end of the stream, returns a null.
*/
async readLength() {
while (this.indexOfOpenBracket() < 0) {
const done = await this.pullMoreDataToBuffer();
if (done) {
break;
}
}
// Broke out of the loop because underlying stream is closed, and there
// happens to be no more data to process.
if (this.buffer.length === 0) {
return null;
}
const position = this.indexOfOpenBracket();
// Broke out of the loop because underlying stream is closed, but still
// cannot find an open bracket.
if (position < 0) {
this.raiseError('Reached the end of bundle when a length string is expected.');
}
const result = this.buffer.slice(0, position);
// Update the internal buffer to drop the read length.
this.buffer = this.buffer.slice(position);
return result;
}
/**
* Reads from a specified position from the internal buffer, for a specified
* number of bytes, pulling more data from the underlying stream if needed.
*
* Returns a string decoded from the read bytes.
*/
async readJsonString(length) {
while (this.buffer.length < length) {
const done = await this.pullMoreDataToBuffer();
if (done) {
this.raiseError('Reached the end of bundle when more is expected.');
}
}
const result = this.textDecoder.decode(this.buffer.slice(0, length));
// Update the internal buffer to drop the read json string.
this.buffer = this.buffer.slice(length);
return result;
}
raiseError(message) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.reader.cancel();
throw new Error(`Invalid bundle format: ${message}`);
}
/**
* Pulls more data from underlying stream to internal buffer.
* Returns a boolean indicating whether the stream is finished.
*/
async pullMoreDataToBuffer() {
const result = await this.reader.read();
if (!result.done) {
const newBuffer = new Uint8Array(this.buffer.length + result.value.length);
newBuffer.set(this.buffer);
newBuffer.set(result.value, this.buffer.length);
this.buffer = newBuffer;
}
return result.done;
}
}
function newBundleReader(reader, serializer) {
return new BundleReaderImpl(reader, serializer);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal transaction object responsible for accumulating the mutations to
* perform and the base versions for any documents read.
*/
class Transaction$2 {
constructor(datastore) {
this.datastore = datastore;
// The version of each document that was read during this transaction.
this.readVersions = new Map();
this.mutations = [];
this.committed = false;
/**
* A deferred usage error that occurred previously in this transaction that
* will cause the transaction to fail once it actually commits.
*/
this.lastTransactionError = null;
/**
* Set of documents that have been written in the transaction.
*
* When there's more than one write to the same key in a transaction, any
* writes after the first are handled differently.
*/
this.writtenDocs = new Set();
}
async lookup(keys) {
this.ensureCommitNotCalled();
if (this.mutations.length > 0) {
this.lastTransactionError = new FirestoreError(Code.INVALID_ARGUMENT, 'Firestore transactions require all reads to be executed before all writes.');
throw this.lastTransactionError;
}
const docs = await invokeBatchGetDocumentsRpc(this.datastore, keys);
docs.forEach(doc => this.recordVersion(doc));
return docs;
}
set(key, data) {
this.write(data.toMutation(key, this.precondition(key)));
this.writtenDocs.add(key.toString());
}
update(key, data) {
try {
this.write(data.toMutation(key, this.preconditionForUpdate(key)));
}
catch (e) {
this.lastTransactionError = e;
}
this.writtenDocs.add(key.toString());
}
delete(key) {
this.write(new DeleteMutation(key, this.precondition(key)));
this.writtenDocs.add(key.toString());
}
async commit() {
this.ensureCommitNotCalled();
if (this.lastTransactionError) {
throw this.lastTransactionError;
}
const unwritten = this.readVersions;
// For each mutation, note that the doc was written.
this.mutations.forEach(mutation => {
unwritten.delete(mutation.key.toString());
});
// For each document that was read but not written to, we want to perform
// a `verify` operation.
unwritten.forEach((_, path) => {
const key = DocumentKey.fromPath(path);
this.mutations.push(new VerifyMutation(key, this.precondition(key)));
});
await invokeCommitRpc(this.datastore, this.mutations);
this.committed = true;
}
recordVersion(doc) {
let docVersion;
if (doc.isFoundDocument()) {
docVersion = doc.version;
}
else if (doc.isNoDocument()) {
// Represent a deleted doc using SnapshotVersion.min().
docVersion = SnapshotVersion.min();
}
else {
throw fail();
}
const existingVersion = this.readVersions.get(doc.key.toString());
if (existingVersion) {
if (!docVersion.isEqual(existingVersion)) {
// This transaction will fail no matter what.
throw new FirestoreError(Code.ABORTED, 'Document version changed between two reads.');
}
}
else {
this.readVersions.set(doc.key.toString(), docVersion);
}
}
/**
* Returns the version of this document when it was read in this transaction,
* as a precondition, or no precondition if it was not read.
*/
precondition(key) {
const version = this.readVersions.get(key.toString());
if (!this.writtenDocs.has(key.toString()) && version) {
if (version.isEqual(SnapshotVersion.min())) {
return Precondition.exists(false);
}
else {
return Precondition.updateTime(version);
}
}
else {
return Precondition.none();
}
}
/**
* Returns the precondition for a document if the operation is an update.
*/
preconditionForUpdate(key) {
const version = this.readVersions.get(key.toString());
// The first time a document is written, we want to take into account the
// read time and existence
if (!this.writtenDocs.has(key.toString()) && version) {
if (version.isEqual(SnapshotVersion.min())) {
// The document doesn't exist, so fail the transaction.
// This has to be validated locally because you can't send a
// precondition that a document does not exist without changing the
// semantics of the backend write to be an insert. This is the reverse
// of what we want, since we want to assert that the document doesn't
// exist but then send the update and have it fail. Since we can't
// express that to the backend, we have to validate locally.
// Note: this can change once we can send separate verify writes in the
// transaction.
throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't update a document that doesn't exist.");
}
// Document exists, base precondition on document update time.
return Precondition.updateTime(version);
}
else {
// Document was not read, so we just use the preconditions for a blind
// update.
return Precondition.exists(true);
}
}
write(mutation) {
this.ensureCommitNotCalled();
this.mutations.push(mutation);
}
ensureCommitNotCalled() {
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* TransactionRunner encapsulates the logic needed to run and retry transactions
* with backoff.
*/
class TransactionRunner {
constructor(asyncQueue, datastore, options, updateFunction, deferred) {
this.asyncQueue = asyncQueue;
this.datastore = datastore;
this.options = options;
this.updateFunction = updateFunction;
this.deferred = deferred;
this.attemptsRemaining = options.maxAttempts;
this.backoff = new ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
}
/** Runs the transaction and sets the result on deferred. */
run() {
this.attemptsRemaining -= 1;
this.runWithBackOff();
}
runWithBackOff() {
this.backoff.backoffAndRun(async () => {
const transaction = new Transaction$2(this.datastore);
const userPromise = this.tryRunUpdateFunction(transaction);
if (userPromise) {
userPromise
.then(result => {
this.asyncQueue.enqueueAndForget(() => {
return transaction
.commit()
.then(() => {
this.deferred.resolve(result);
})
.catch(commitError => {
this.handleTransactionError(commitError);
});
});
})
.catch(userPromiseError => {
this.handleTransactionError(userPromiseError);
});
}
});
}
tryRunUpdateFunction(transaction) {
try {
const userPromise = this.updateFunction(transaction);
if (isNullOrUndefined(userPromise) ||
!userPromise.catch ||
!userPromise.then) {
this.deferred.reject(Error('Transaction callback must return a Promise'));
return null;
}
return userPromise;
}
catch (error) {
// Do not retry errors thrown by user provided updateFunction.
this.deferred.reject(error);
return null;
}
}
handleTransactionError(error) {
if (this.attemptsRemaining > 0 && this.isRetryableTransactionError(error)) {
this.attemptsRemaining -= 1;
this.asyncQueue.enqueueAndForget(() => {
this.runWithBackOff();
return Promise.resolve();
});
}
else {
this.deferred.reject(error);
}
}
isRetryableTransactionError(error) {
if (error.name === 'FirebaseError') {
// In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
// non-matching document versions with ABORTED. These errors should be retried.
const code = error.code;
return (code === 'aborted' ||
code === 'failed-precondition' ||
code === 'already-exists' ||
!isPermanentError(code));
}
return false;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$2 = 'FirestoreClient';
const MAX_CONCURRENT_LIMBO_RESOLUTIONS = 100;
/** DOMException error code constants. */
const DOM_EXCEPTION_INVALID_STATE = 11;
const DOM_EXCEPTION_ABORTED = 20;
const DOM_EXCEPTION_QUOTA_EXCEEDED = 22;
/**
* FirestoreClient is a top-level class that constructs and owns all of the //
* pieces of the client SDK architecture. It is responsible for creating the //
* async queue that is shared by all of the other components in the system. //
*/
class FirestoreClient {
constructor(authCredentials, appCheckCredentials,
/**
* Asynchronous queue responsible for all of our internal processing. When
* we get incoming work from the user (via public API) or the network
* (incoming GRPC messages), we should always schedule onto this queue.
* This ensures all of our work is properly serialized (e.g. we don't
* start processing a new operation while the previous one is waiting for
* an async I/O to complete).
*/
asyncQueue, databaseInfo) {
this.authCredentials = authCredentials;
this.appCheckCredentials = appCheckCredentials;
this.asyncQueue = asyncQueue;
this.databaseInfo = databaseInfo;
this.user = User.UNAUTHENTICATED;
this.clientId = AutoId.newId();
this.authCredentialListener = () => Promise.resolve();
this.appCheckCredentialListener = () => Promise.resolve();
this.authCredentials.start(asyncQueue, async (user) => {
logDebug(LOG_TAG$2, 'Received user=', user.uid);
await this.authCredentialListener(user);
this.user = user;
});
this.appCheckCredentials.start(asyncQueue, newAppCheckToken => {
logDebug(LOG_TAG$2, 'Received new app check token=', newAppCheckToken);
return this.appCheckCredentialListener(newAppCheckToken, this.user);
});
}
get configuration() {
return {
asyncQueue: this.asyncQueue,
databaseInfo: this.databaseInfo,
clientId: this.clientId,
authCredentials: this.authCredentials,
appCheckCredentials: this.appCheckCredentials,
initialUser: this.user,
maxConcurrentLimboResolutions: MAX_CONCURRENT_LIMBO_RESOLUTIONS
};
}
setCredentialChangeListener(listener) {
this.authCredentialListener = listener;
}
setAppCheckTokenChangeListener(listener) {
this.appCheckCredentialListener = listener;
}
/**
* Checks that the client has not been terminated. Ensures that other methods on //
* this class cannot be called after the client is terminated. //
*/
verifyNotTerminated() {
if (this.asyncQueue.isShuttingDown) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
}
}
terminate() {
this.asyncQueue.enterRestrictedMode();
const deferred = new Deferred();
this.asyncQueue.enqueueAndForgetEvenWhileRestricted(async () => {
try {
if (this._onlineComponents) {
await this._onlineComponents.terminate();
}
if (this._offlineComponents) {
await this._offlineComponents.terminate();
}
// The credentials provider must be terminated after shutting down the
// RemoteStore as it will prevent the RemoteStore from retrieving auth
// tokens.
this.authCredentials.shutdown();
this.appCheckCredentials.shutdown();
deferred.resolve();
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to shutdown persistence`);
deferred.reject(firestoreError);
}
});
return deferred.promise;
}
}
async function setOfflineComponentProvider(client, offlineComponentProvider) {
client.asyncQueue.verifyOperationInProgress();
logDebug(LOG_TAG$2, 'Initializing OfflineComponentProvider');
const configuration = client.configuration;
await offlineComponentProvider.initialize(configuration);
let currentUser = configuration.initialUser;
client.setCredentialChangeListener(async (user) => {
if (!currentUser.isEqual(user)) {
await localStoreHandleUserChange(offlineComponentProvider.localStore, user);
currentUser = user;
}
});
// When a user calls clearPersistence() in one client, all other clients
// need to be terminated to allow the delete to succeed.
offlineComponentProvider.persistence.setDatabaseDeletedListener(() => client.terminate());
client._offlineComponents = offlineComponentProvider;
}
async function setOnlineComponentProvider(client, onlineComponentProvider) {
client.asyncQueue.verifyOperationInProgress();
const offlineComponentProvider = await ensureOfflineComponents(client);
logDebug(LOG_TAG$2, 'Initializing OnlineComponentProvider');
await onlineComponentProvider.initialize(offlineComponentProvider, client.configuration);
// The CredentialChangeListener of the online component provider takes
// precedence over the offline component provider.
client.setCredentialChangeListener(user => remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user));
client.setAppCheckTokenChangeListener((_, user) => remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user));
client._onlineComponents = onlineComponentProvider;
}
/**
* Decides whether the provided error allows us to gracefully disable
* persistence (as opposed to crashing the client).
*/
function canFallbackFromIndexedDbError(error) {
if (error.name === 'FirebaseError') {
return (error.code === Code.FAILED_PRECONDITION ||
error.code === Code.UNIMPLEMENTED);
}
else if (typeof DOMException !== 'undefined' &&
error instanceof DOMException) {
// There are a few known circumstances where we can open IndexedDb but
// trying to read/write will fail (e.g. quota exceeded). For
// well-understood cases, we attempt to detect these and then gracefully
// fall back to memory persistence.
// NOTE: Rather than continue to add to this list, we could decide to
// always fall back, with the risk that we might accidentally hide errors
// representing actual SDK bugs.
return (
// When the browser is out of quota we could get either quota exceeded
// or an aborted error depending on whether the error happened during
// schema migration.
error.code === DOM_EXCEPTION_QUOTA_EXCEEDED ||
error.code === DOM_EXCEPTION_ABORTED ||
// Firefox Private Browsing mode disables IndexedDb and returns
// INVALID_STATE for any usage.
error.code === DOM_EXCEPTION_INVALID_STATE);
}
return true;
}
async function ensureOfflineComponents(client) {
if (!client._offlineComponents) {
if (client._uninitializedComponentsProvider) {
logDebug(LOG_TAG$2, 'Using user provided OfflineComponentProvider');
try {
await setOfflineComponentProvider(client, client._uninitializedComponentsProvider._offline);
}
catch (e) {
const error = e;
if (!canFallbackFromIndexedDbError(error)) {
throw error;
}
logWarn('Error using user provided cache. Falling back to ' +
'memory cache: ' +
error);
await setOfflineComponentProvider(client, new MemoryOfflineComponentProvider());
}
}
else {
logDebug(LOG_TAG$2, 'Using default OfflineComponentProvider');
await setOfflineComponentProvider(client, new MemoryOfflineComponentProvider());
}
}
return client._offlineComponents;
}
async function ensureOnlineComponents(client) {
if (!client._onlineComponents) {
if (client._uninitializedComponentsProvider) {
logDebug(LOG_TAG$2, 'Using user provided OnlineComponentProvider');
await setOnlineComponentProvider(client, client._uninitializedComponentsProvider._online);
}
else {
logDebug(LOG_TAG$2, 'Using default OnlineComponentProvider');
await setOnlineComponentProvider(client, new OnlineComponentProvider());
}
}
return client._onlineComponents;
}
function getPersistence(client) {
return ensureOfflineComponents(client).then(c => c.persistence);
}
function getLocalStore(client) {
return ensureOfflineComponents(client).then(c => c.localStore);
}
function getRemoteStore(client) {
return ensureOnlineComponents(client).then(c => c.remoteStore);
}
function getSyncEngine(client) {
return ensureOnlineComponents(client).then(c => c.syncEngine);
}
function getDatastore(client) {
return ensureOnlineComponents(client).then(c => c.datastore);
}
async function getEventManager(client) {
const onlineComponentProvider = await ensureOnlineComponents(client);
const eventManager = onlineComponentProvider.eventManager;
eventManager.onListen = syncEngineListen.bind(null, onlineComponentProvider.syncEngine);
eventManager.onUnlisten = syncEngineUnlisten.bind(null, onlineComponentProvider.syncEngine);
return eventManager;
}
/** Enables the network connection and re-enqueues all pending operations. */
function firestoreClientEnableNetwork(client) {
return client.asyncQueue.enqueue(async () => {
const persistence = await getPersistence(client);
const remoteStore = await getRemoteStore(client);
persistence.setNetworkEnabled(true);
return remoteStoreEnableNetwork(remoteStore);
});
}
/** Disables the network connection. Pending operations will not complete. */
function firestoreClientDisableNetwork(client) {
return client.asyncQueue.enqueue(async () => {
const persistence = await getPersistence(client);
const remoteStore = await getRemoteStore(client);
persistence.setNetworkEnabled(false);
return remoteStoreDisableNetwork(remoteStore);
});
}
/**
* Returns a Promise that resolves when all writes that were pending at the time
* this method was called received server acknowledgement. An acknowledgement
* can be either acceptance or rejection.
*/
function firestoreClientWaitForPendingWrites(client) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const syncEngine = await getSyncEngine(client);
return syncEngineRegisterPendingWritesCallback(syncEngine, deferred);
});
return deferred.promise;
}
function firestoreClientListen(client, query, options, observer) {
const wrappedObserver = new AsyncObserver(observer);
const listener = new QueryListener(query, wrappedObserver, options);
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return eventManagerListen(eventManager, listener);
});
return () => {
wrappedObserver.mute();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return eventManagerUnlisten(eventManager, listener);
});
};
}
function firestoreClientGetDocumentFromLocalCache(client, docKey) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const localStore = await getLocalStore(client);
return readDocumentFromCache(localStore, docKey, deferred);
});
return deferred.promise;
}
function firestoreClientGetDocumentViaSnapshotListener(client, key, options = {}) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return readDocumentViaSnapshotListener(eventManager, client.asyncQueue, key, options, deferred);
});
return deferred.promise;
}
function firestoreClientGetDocumentsFromLocalCache(client, query) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const localStore = await getLocalStore(client);
return executeQueryFromCache(localStore, query, deferred);
});
return deferred.promise;
}
function firestoreClientGetDocumentsViaSnapshotListener(client, query, options = {}) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return executeQueryViaSnapshotListener(eventManager, client.asyncQueue, query, options, deferred);
});
return deferred.promise;
}
function firestoreClientRunAggregateQuery(client, query, aggregates) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
// Implement and call executeAggregateQueryViaSnapshotListener, similar
// to the implementation in firestoreClientGetDocumentsViaSnapshotListener
// above
try {
// TODO(b/277628384): check `canUseNetwork()` and handle multi-tab.
const datastore = await getDatastore(client);
deferred.resolve(invokeRunAggregationQueryRpc(datastore, query, aggregates));
}
catch (e) {
deferred.reject(e);
}
});
return deferred.promise;
}
function firestoreClientWrite(client, mutations) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const syncEngine = await getSyncEngine(client);
return syncEngineWrite(syncEngine, mutations, deferred);
});
return deferred.promise;
}
function firestoreClientAddSnapshotsInSyncListener(client, observer) {
const wrappedObserver = new AsyncObserver(observer);
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return addSnapshotsInSyncListener(eventManager, wrappedObserver);
});
return () => {
wrappedObserver.mute();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return removeSnapshotsInSyncListener(eventManager, wrappedObserver);
});
};
}
/**
* Takes an updateFunction in which a set of reads and writes can be performed
* atomically. In the updateFunction, the client can read and write values
* using the supplied transaction object. After the updateFunction, all
* changes will be committed. If a retryable error occurs (ex: some other
* client has changed any of the data referenced), then the updateFunction
* will be called again after a backoff. If the updateFunction still fails
* after all retries, then the transaction will be rejected.
*
* The transaction object passed to the updateFunction contains methods for
* accessing documents and collections. Unlike other datastore access, data
* accessed with the transaction will not reflect local changes that have not
* been committed. For this reason, it is required that all reads are
* performed before any writes. Transactions must be performed while online.
*/
function firestoreClientTransaction(client, updateFunction, options) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const datastore = await getDatastore(client);
new TransactionRunner(client.asyncQueue, datastore, options, updateFunction, deferred).run();
});
return deferred.promise;
}
async function readDocumentFromCache(localStore, docKey, result) {
try {
const document = await localStoreReadDocument(localStore, docKey);
if (document.isFoundDocument()) {
result.resolve(document);
}
else if (document.isNoDocument()) {
result.resolve(null);
}
else {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from cache. (However, this document may ' +
"exist on the server. Run again without setting 'source' in " +
'the GetOptions to attempt to retrieve the document from the ' +
'server.)'));
}
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to get document '${docKey} from cache`);
result.reject(firestoreError);
}
}
/**
* Retrieves a latency-compensated document from the backend via a
* SnapshotListener.
*/
function readDocumentViaSnapshotListener(eventManager, asyncQueue, key, options, result) {
const wrappedObserver = new AsyncObserver({
next: (snap) => {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
asyncQueue.enqueueAndForget(() => eventManagerUnlisten(eventManager, listener));
const exists = snap.docs.has(key);
if (!exists && snap.fromCache) {
// TODO(dimond): If we're online and the document doesn't
// exist then we resolve with a doc.exists set to false. If
// we're offline however, we reject the Promise in this
// case. Two options: 1) Cache the negative response from
// the server so we can deliver that even when you're
// offline 2) Actually reject the Promise in the online case
// if the document doesn't exist.
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document because the client is offline.'));
}
else if (exists &&
snap.fromCache &&
options &&
options.source === 'server') {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from server. (However, this ' +
'document does exist in the local cache. Run again ' +
'without setting source to "server" to ' +
'retrieve the cached document.)'));
}
else {
result.resolve(snap);
}
},
error: e => result.reject(e)
});
const listener = new QueryListener(newQueryForPath(key.path), wrappedObserver, {
includeMetadataChanges: true,
waitForSyncWhenOnline: true
});
return eventManagerListen(eventManager, listener);
}
async function executeQueryFromCache(localStore, query, result) {
try {
const queryResult = await localStoreExecuteQuery(localStore, query,
/* usePreviousResults= */ true);
const view = new View(query, queryResult.remoteKeys);
const viewDocChanges = view.computeDocChanges(queryResult.documents);
const viewChange = view.applyChanges(viewDocChanges,
/* limboResolutionEnabled= */ false);
result.resolve(viewChange.snapshot);
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to execute query '${query} against cache`);
result.reject(firestoreError);
}
}
/**
* Retrieves a latency-compensated query snapshot from the backend via a
* SnapshotListener.
*/
function executeQueryViaSnapshotListener(eventManager, asyncQueue, query, options, result) {
const wrappedObserver = new AsyncObserver({
next: snapshot => {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
asyncQueue.enqueueAndForget(() => eventManagerUnlisten(eventManager, listener));
if (snapshot.fromCache && options.source === 'server') {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get documents from server. (However, these ' +
'documents may exist in the local cache. Run again ' +
'without setting source to "server" to ' +
'retrieve the cached documents.)'));
}
else {
result.resolve(snapshot);
}
},
error: e => result.reject(e)
});
const listener = new QueryListener(query, wrappedObserver, {
includeMetadataChanges: true,
waitForSyncWhenOnline: true
});
return eventManagerListen(eventManager, listener);
}
function firestoreClientLoadBundle(client, databaseId, data, resultTask) {
const reader = createBundleReader(data, newSerializer(databaseId));
client.asyncQueue.enqueueAndForget(async () => {
syncEngineLoadBundle(await getSyncEngine(client), reader, resultTask);
});
}
function firestoreClientGetNamedQuery(client, queryName) {
return client.asyncQueue.enqueue(async () => localStoreGetNamedQuery(await getLocalStore(client), queryName));
}
function createBundleReader(data, serializer) {
let content;
if (typeof data === 'string') {
content = newTextEncoder().encode(data);
}
else {
content = data;
}
return newBundleReader(toByteStreamReader(content), serializer);
}
function firestoreClientSetIndexConfiguration(client, indexes) {
return client.asyncQueue.enqueue(async () => {
return localStoreConfigureFieldIndexes(await getLocalStore(client), indexes);
});
}
function firestoreClientSetPersistentCacheIndexAutoCreationEnabled(client, isEnabled) {
return client.asyncQueue.enqueue(async () => {
return localStoreSetIndexAutoCreationEnabled(await getLocalStore(client), isEnabled);
});
}
function firestoreClientDeleteAllFieldIndexes(client) {
return client.asyncQueue.enqueue(async () => {
return localStoreDeleteAllFieldIndexes(await getLocalStore(client));
});
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compares two `ExperimentalLongPollingOptions` objects for equality.
*/
function longPollingOptionsEqual(options1, options2) {
return options1.timeoutSeconds === options2.timeoutSeconds;
}
/**
* Creates and returns a new `ExperimentalLongPollingOptions` with the same
* option values as the given instance.
*/
function cloneLongPollingOptions(options) {
const clone = {};
if (options.timeoutSeconds !== undefined) {
clone.timeoutSeconds = options.timeoutSeconds;
}
return clone;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$1 = 'ComponentProvider';
/**
* An instance map that ensures only one Datastore exists per Firestore
* instance.
*/
const datastoreInstances = new Map();
/**
* Removes all components associated with the provided instance. Must be called
* when the `Firestore` instance is terminated.
*/
function removeComponents(firestore) {
const datastore = datastoreInstances.get(firestore);
if (datastore) {
logDebug(LOG_TAG$1, 'Removing Datastore');
datastoreInstances.delete(firestore);
datastore.terminate();
}
}
function makeDatabaseInfo(databaseId, appId, persistenceKey, settings) {
return new DatabaseInfo(databaseId, appId, persistenceKey, settings.host, settings.ssl, settings.experimentalForceLongPolling, settings.experimentalAutoDetectLongPolling, cloneLongPollingOptions(settings.experimentalLongPollingOptions), settings.useFetchStreams);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// settings() defaults:
const DEFAULT_HOST = 'firestore.googleapis.com';
const DEFAULT_SSL = true;
// The minimum long-polling timeout is hardcoded on the server. The value here
// should be kept in sync with the value used by the server, as the server will
// silently ignore a value below the minimum and fall back to the default.
// Googlers see b/266868871 for relevant discussion.
const MIN_LONG_POLLING_TIMEOUT_SECONDS = 5;
// No maximum long-polling timeout is configured in the server, and defaults to
// 30 seconds, which is what Watch appears to use.
// Googlers see b/266868871 for relevant discussion.
const MAX_LONG_POLLING_TIMEOUT_SECONDS = 30;
// Whether long-polling auto-detected is enabled by default.
const DEFAULT_AUTO_DETECT_LONG_POLLING = true;
/**
* A concrete type describing all the values that can be applied via a
* user-supplied `FirestoreSettings` object. This is a separate type so that
* defaults can be supplied and the value can be checked for equality.
*/
class FirestoreSettingsImpl {
constructor(settings) {
var _a, _b;
if (settings.host === undefined) {
if (settings.ssl !== undefined) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't provide ssl option if host option is not set");
}
this.host = DEFAULT_HOST;
this.ssl = DEFAULT_SSL;
}
else {
this.host = settings.host;
this.ssl = (_a = settings.ssl) !== null && _a !== void 0 ? _a : DEFAULT_SSL;
}
this.credentials = settings.credentials;
this.ignoreUndefinedProperties = !!settings.ignoreUndefinedProperties;
this.localCache = settings.localCache;
if (settings.cacheSizeBytes === undefined) {
this.cacheSizeBytes = LRU_DEFAULT_CACHE_SIZE_BYTES;
}
else {
if (settings.cacheSizeBytes !== LRU_COLLECTION_DISABLED &&
settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `cacheSizeBytes must be at least ${LRU_MINIMUM_CACHE_SIZE_BYTES}`);
}
else {
this.cacheSizeBytes = settings.cacheSizeBytes;
}
}
validateIsNotUsedTogether('experimentalForceLongPolling', settings.experimentalForceLongPolling, 'experimentalAutoDetectLongPolling', settings.experimentalAutoDetectLongPolling);
this.experimentalForceLongPolling = !!settings.experimentalForceLongPolling;
if (this.experimentalForceLongPolling) {
this.experimentalAutoDetectLongPolling = false;
}
else if (settings.experimentalAutoDetectLongPolling === undefined) {
this.experimentalAutoDetectLongPolling = DEFAULT_AUTO_DETECT_LONG_POLLING;
}
else {
// For backwards compatibility, coerce the value to boolean even though
// the TypeScript compiler has narrowed the type to boolean already.
// noinspection PointlessBooleanExpressionJS
this.experimentalAutoDetectLongPolling =
!!settings.experimentalAutoDetectLongPolling;
}
this.experimentalLongPollingOptions = cloneLongPollingOptions((_b = settings.experimentalLongPollingOptions) !== null && _b !== void 0 ? _b : {});
validateLongPollingOptions(this.experimentalLongPollingOptions);
this.useFetchStreams = !!settings.useFetchStreams;
}
isEqual(other) {
return (this.host === other.host &&
this.ssl === other.ssl &&
this.credentials === other.credentials &&
this.cacheSizeBytes === other.cacheSizeBytes &&
this.experimentalForceLongPolling ===
other.experimentalForceLongPolling &&
this.experimentalAutoDetectLongPolling ===
other.experimentalAutoDetectLongPolling &&
longPollingOptionsEqual(this.experimentalLongPollingOptions, other.experimentalLongPollingOptions) &&
this.ignoreUndefinedProperties === other.ignoreUndefinedProperties &&
this.useFetchStreams === other.useFetchStreams);
}
}
function validateLongPollingOptions(options) {
if (options.timeoutSeconds !== undefined) {
if (isNaN(options.timeoutSeconds)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `invalid long polling timeout: ` +
`${options.timeoutSeconds} (must not be NaN)`);
}
if (options.timeoutSeconds < MIN_LONG_POLLING_TIMEOUT_SECONDS) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `invalid long polling timeout: ${options.timeoutSeconds} ` +
`(minimum allowed value is ${MIN_LONG_POLLING_TIMEOUT_SECONDS})`);
}
if (options.timeoutSeconds > MAX_LONG_POLLING_TIMEOUT_SECONDS) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `invalid long polling timeout: ${options.timeoutSeconds} ` +
`(maximum allowed value is ${MAX_LONG_POLLING_TIMEOUT_SECONDS})`);
}
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
*/
class Firestore$1 {
/** @hideconstructor */
constructor(_authCredentials, _appCheckCredentials, _databaseId, _app) {
this._authCredentials = _authCredentials;
this._appCheckCredentials = _appCheckCredentials;
this._databaseId = _databaseId;
this._app = _app;
/**
* Whether it's a Firestore or Firestore Lite instance.
*/
this.type = 'firestore-lite';
this._persistenceKey = '(lite)';
this._settings = new FirestoreSettingsImpl({});
this._settingsFrozen = false;
}
/**
* The {@link @firebase/app#FirebaseApp} associated with this `Firestore` service
* instance.
*/
get app() {
if (!this._app) {
throw new FirestoreError(Code.FAILED_PRECONDITION, "Firestore was not initialized using the Firebase SDK. 'app' is " +
'not available');
}
return this._app;
}
get _initialized() {
return this._settingsFrozen;
}
get _terminated() {
return this._terminateTask !== undefined;
}
_setSettings(settings) {
if (this._settingsFrozen) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and its settings can no longer ' +
'be changed. You can only modify settings before calling any other ' +
'methods on a Firestore object.');
}
this._settings = new FirestoreSettingsImpl(settings);
if (settings.credentials !== undefined) {
this._authCredentials = makeAuthCredentialsProvider(settings.credentials);
}
}
_getSettings() {
return this._settings;
}
_freezeSettings() {
this._settingsFrozen = true;
return this._settings;
}
_delete() {
if (!this._terminateTask) {
this._terminateTask = this._terminate();
}
return this._terminateTask;
}
/** Returns a JSON-serializable representation of this `Firestore` instance. */
toJSON() {
return {
app: this._app,
databaseId: this._databaseId,
settings: this._settings
};
}
/**
* Terminates all components used by this client. Subclasses can override
* this method to clean up their own dependencies, but must also call this
* method.
*
* Only ever called once.
*/
_terminate() {
removeComponents(this);
return Promise.resolve();
}
}
/**
* Modify this instance to communicate with the Cloud Firestore emulator.
*
* Note: This must be called before this instance has been used to do any
* operations.
*
* @param firestore - The `Firestore` instance to configure to connect to the
* emulator.
* @param host - the emulator host (ex: localhost).
* @param port - the emulator port (ex: 9000).
* @param options.mockUserToken - the mock auth token to use for unit testing
* Security Rules.
*/
function connectFirestoreEmulator(firestore, host, port, options = {}) {
var _a;
firestore = cast(firestore, Firestore$1);
const settings = firestore._getSettings();
const newHostSetting = `${host}:${port}`;
if (settings.host !== DEFAULT_HOST && settings.host !== newHostSetting) {
logWarn('Host has been set in both settings() and connectFirestoreEmulator(), emulator host ' +
'will be used.');
}
firestore._setSettings(Object.assign(Object.assign({}, settings), { host: newHostSetting, ssl: false }));
if (options.mockUserToken) {
let token;
let user;
if (typeof options.mockUserToken === 'string') {
token = options.mockUserToken;
user = User.MOCK_USER;
}
else {
// Let createMockUserToken validate first (catches common mistakes like
// invalid field "uid" and missing field "sub" / "user_id".)
token = createMockUserToken(options.mockUserToken, (_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.projectId);
const uid = options.mockUserToken.sub || options.mockUserToken.user_id;
if (!uid) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "mockUserToken must contain 'sub' or 'user_id' field!");
}
user = new User(uid);
}
firestore._authCredentials = new EmulatorAuthCredentialsProvider(new OAuthToken(token, user));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `Query` refers to a query which you can read or listen to. You can also
* construct refined `Query` objects by adding filters and ordering.
*/
class Query {
// This is the lite version of the Query class in the main SDK.
/** @hideconstructor protected */
constructor(firestore,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
converter, _query) {
this.converter = converter;
this._query = _query;
/** The type of this Firestore reference. */
this.type = 'query';
this.firestore = firestore;
}
withConverter(converter) {
return new Query(this.firestore, converter, this._query);
}
}
/**
* A `DocumentReference` refers to a document location in a Firestore database
* and can be used to write, read, or listen to the location. The document at
* the referenced location may or may not exist.
*/
class DocumentReference {
/** @hideconstructor */
constructor(firestore,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
converter, _key) {
this.converter = converter;
this._key = _key;
/** The type of this Firestore reference. */
this.type = 'document';
this.firestore = firestore;
}
get _path() {
return this._key.path;
}
/**
* The document's identifier within its collection.
*/
get id() {
return this._key.path.lastSegment();
}
/**
* A string representing the path of the referenced document (relative
* to the root of the database).
*/
get path() {
return this._key.path.canonicalString();
}
/**
* The collection this `DocumentReference` belongs to.
*/
get parent() {
return new CollectionReference(this.firestore, this.converter, this._key.path.popLast());
}
withConverter(converter) {
return new DocumentReference(this.firestore, converter, this._key);
}
}
/**
* A `CollectionReference` object can be used for adding documents, getting
* document references, and querying for documents (using {@link (query:1)}).
*/
class CollectionReference extends Query {
/** @hideconstructor */
constructor(firestore, converter, _path) {
super(firestore, converter, newQueryForPath(_path));
this._path = _path;
/** The type of this Firestore reference. */
this.type = 'collection';
}
/** The collection's identifier. */
get id() {
return this._query.path.lastSegment();
}
/**
* A string representing the path of the referenced collection (relative
* to the root of the database).
*/
get path() {
return this._query.path.canonicalString();
}
/**
* A reference to the containing `DocumentReference` if this is a
* subcollection. If this isn't a subcollection, the reference is null.
*/
get parent() {
const parentPath = this._path.popLast();
if (parentPath.isEmpty()) {
return null;
}
else {
return new DocumentReference(this.firestore,
/* converter= */ null, new DocumentKey(parentPath));
}
}
withConverter(converter) {
return new CollectionReference(this.firestore, converter, this._path);
}
}
function collection(parent, path, ...pathSegments) {
parent = getModularInstance(parent);
validateNonEmptyArgument('collection', 'path', path);
if (parent instanceof Firestore$1) {
const absolutePath = ResourcePath.fromString(path, ...pathSegments);
validateCollectionPath(absolutePath);
return new CollectionReference(parent, /* converter= */ null, absolutePath);
}
else {
if (!(parent instanceof DocumentReference) &&
!(parent instanceof CollectionReference)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
'a DocumentReference or FirebaseFirestore');
}
const absolutePath = parent._path.child(ResourcePath.fromString(path, ...pathSegments));
validateCollectionPath(absolutePath);
return new CollectionReference(parent.firestore,
/* converter= */ null, absolutePath);
}
}
// TODO(firestorelite): Consider using ErrorFactory -
// https://github.com/firebase/firebase-js-sdk/blob/0131e1f/packages/util/src/errors.ts#L106
/**
* Creates and returns a new `Query` instance that includes all documents in the
* database that are contained in a collection or subcollection with the
* given `collectionId`.
*
* @param firestore - A reference to the root `Firestore` instance.
* @param collectionId - Identifies the collections to query over. Every
* collection or subcollection with this ID as the last segment of its path
* will be included. Cannot contain a slash.
* @returns The created `Query`.
*/
function collectionGroup(firestore, collectionId) {
firestore = cast(firestore, Firestore$1);
validateNonEmptyArgument('collectionGroup', 'collection id', collectionId);
if (collectionId.indexOf('/') >= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid collection ID '${collectionId}' passed to function ` +
`collectionGroup(). Collection IDs must not contain '/'.`);
}
return new Query(firestore,
/* converter= */ null, newQueryForCollectionGroup(collectionId));
}
function doc(parent, path, ...pathSegments) {
parent = getModularInstance(parent);
// We allow omission of 'pathString' but explicitly prohibit passing in both
// 'undefined' and 'null'.
if (arguments.length === 1) {
path = AutoId.newId();
}
validateNonEmptyArgument('doc', 'path', path);
if (parent instanceof Firestore$1) {
const absolutePath = ResourcePath.fromString(path, ...pathSegments);
validateDocumentPath(absolutePath);
return new DocumentReference(parent,
/* converter= */ null, new DocumentKey(absolutePath));
}
else {
if (!(parent instanceof DocumentReference) &&
!(parent instanceof CollectionReference)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
'a DocumentReference or FirebaseFirestore');
}
const absolutePath = parent._path.child(ResourcePath.fromString(path, ...pathSegments));
validateDocumentPath(absolutePath);
return new DocumentReference(parent.firestore, parent instanceof CollectionReference ? parent.converter : null, new DocumentKey(absolutePath));
}
}
/**
* Returns true if the provided references are equal.
*
* @param left - A reference to compare.
* @param right - A reference to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/
function refEqual(left, right) {
left = getModularInstance(left);
right = getModularInstance(right);
if ((left instanceof DocumentReference ||
left instanceof CollectionReference) &&
(right instanceof DocumentReference || right instanceof CollectionReference)) {
return (left.firestore === right.firestore &&
left.path === right.path &&
left.converter === right.converter);
}
return false;
}
/**
* Returns true if the provided queries point to the same collection and apply
* the same constraints.
*
* @param left - A `Query` to compare.
* @param right - A `Query` to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/
function queryEqual(left, right) {
left = getModularInstance(left);
right = getModularInstance(right);
if (left instanceof Query && right instanceof Query) {
return (left.firestore === right.firestore &&
queryEquals(left._query, right._query) &&
left.converter === right.converter);
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG = 'AsyncQueue';
class AsyncQueueImpl {
constructor() {
// The last promise in the queue.
this.tail = Promise.resolve();
// A list of retryable operations. Retryable operations are run in order and
// retried with backoff.
this.retryableOps = [];
// Is this AsyncQueue being shut down? Once it is set to true, it will not
// be changed again.
this._isShuttingDown = false;
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// visible for testing
this.failure = null;
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
// Enabled during shutdown on Safari to prevent future access to IndexedDB.
this.skipNonRestrictedTasks = false;
// List of TimerIds to fast-forward delays for.
this.timerIdsToSkip = [];
// Backoff timer used to schedule retries for retryable operations
this.backoff = new ExponentialBackoff(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */);
// Visibility handler that triggers an immediate retry of all retryable
// operations. Meant to speed up recovery when we regain file system access
// after page comes into foreground.
this.visibilityHandler = () => {
this.backoff.skipBackoff();
};
}
get isShuttingDown() {
return this._isShuttingDown;
}
/**
* Adds a new operation to the queue without waiting for it to complete (i.e.
* we ignore the Promise result).
*/
enqueueAndForget(op) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueue(op);
}
enqueueAndForgetEvenWhileRestricted(op) {
this.verifyNotFailed();
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueueInternal(op);
}
enterRestrictedMode(purgeExistingTasks) {
if (!this._isShuttingDown) {
this._isShuttingDown = true;
this.skipNonRestrictedTasks = purgeExistingTasks || false;
}
}
enqueue(op) {
this.verifyNotFailed();
if (this._isShuttingDown) {
// Return a Promise which never resolves.
return new Promise(() => { });
}
// Create a deferred Promise that we can return to the callee. This
// allows us to return a "hanging Promise" only to the callee and still
// advance the queue even when the operation is not run.
const task = new Deferred();
return this.enqueueInternal(() => {
if (this._isShuttingDown && this.skipNonRestrictedTasks) {
// We do not resolve 'task'
return Promise.resolve();
}
op().then(task.resolve, task.reject);
return task.promise;
}).then(() => task.promise);
}
enqueueRetryable(op) {
this.enqueueAndForget(() => {
this.retryableOps.push(op);
return this.retryNextOp();
});
}
/**
* Runs the next operation from the retryable queue. If the operation fails,
* reschedules with backoff.
*/
async retryNextOp() {
if (this.retryableOps.length === 0) {
return;
}
try {
await this.retryableOps[0]();
this.retryableOps.shift();
this.backoff.reset();
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG, 'Operation failed with retryable error: ' + e);
}
else {
throw e; // Failure will be handled by AsyncQueue
}
}
if (this.retryableOps.length > 0) {
// If there are additional operations, we re-schedule `retryNextOp()`.
// This is necessary to run retryable operations that failed during
// their initial attempt since we don't know whether they are already
// enqueued. If, for example, `op1`, `op2`, `op3` are enqueued and `op1`
// needs to be re-run, we will run `op1`, `op1`, `op2` using the
// already enqueued calls to `retryNextOp()`. `op3()` will then run in the
// call scheduled here.
// Since `backoffAndRun()` cancels an existing backoff and schedules a
// new backoff on every call, there is only ever a single additional
// operation in the queue.
this.backoff.backoffAndRun(() => this.retryNextOp());
}
}
enqueueInternal(op) {
const newTail = this.tail.then(() => {
this.operationInProgress = true;
return op()
.catch((error) => {
this.failure = error;
this.operationInProgress = false;
const message = getMessageOrStack(error);
logError('INTERNAL UNHANDLED ERROR: ', message);
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(result => {
this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
}
enqueueAfterDelay(timerId, delayMs, op) {
this.verifyNotFailed();
// Fast-forward delays for timerIds that have been overriden.
if (this.timerIdsToSkip.indexOf(timerId) > -1) {
delayMs = 0;
}
const delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, removedOp => this.removeDelayedOperation(removedOp));
this.delayedOperations.push(delayedOp);
return delayedOp;
}
verifyNotFailed() {
if (this.failure) {
fail();
}
}
verifyOperationInProgress() {
}
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
async drain() {
// Operations in the queue prior to draining may have enqueued additional
// operations. Keep draining the queue until the tail is no longer advanced,
// which indicates that no more new operations were enqueued and that all
// operations were executed.
let currentTail;
do {
currentTail = this.tail;
await currentTail;
} while (currentTail !== this.tail);
}
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
containsDelayedOperation(timerId) {
for (const op of this.delayedOperations) {
if (op.timerId === timerId) {
return true;
}
}
return false;
}
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId - Delayed operations up to and including this TimerId
* will be drained. Pass TimerId.All to run all delayed operations.
* @returns a Promise that resolves once all operations have been run.
*/
runAllDelayedOperationsUntil(lastTimerId) {
// Note that draining may generate more delayed ops, so we do that first.
return this.drain().then(() => {
// Run ops in the same order they'd run if they ran naturally.
this.delayedOperations.sort((a, b) => a.targetTimeMs - b.targetTimeMs);
for (const op of this.delayedOperations) {
op.skipDelay();
if (lastTimerId !== "all" /* TimerId.All */ && op.timerId === lastTimerId) {
break;
}
}
return this.drain();
});
}
/**
* For Tests: Skip all subsequent delays for a timer id.
*/
skipDelaysForTimerId(timerId) {
this.timerIdsToSkip.push(timerId);
}
/** Called once a DelayedOperation is run or canceled. */
removeDelayedOperation(op) {
// NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
const index = this.delayedOperations.indexOf(op);
this.delayedOperations.splice(index, 1);
}
}
function newAsyncQueue() {
return new AsyncQueueImpl();
}
/**
* Chrome includes Error.message in Error.stack. Other browsers do not.
* This returns expected output of message + stack when available.
* @param error - Error or FirestoreError
*/
function getMessageOrStack(error) {
let message = error.message || '';
if (error.stack) {
if (error.stack.includes(error.message)) {
message = error.stack;
}
else {
message = error.message + '\n' + error.stack;
}
}
return message;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents the task of loading a Firestore bundle. It provides progress of bundle
* loading, as well as task completion and error events.
*
* The API is compatible with `Promise<LoadBundleTaskProgress>`.
*/
class LoadBundleTask {
constructor() {
this._progressObserver = {};
this._taskCompletionResolver = new Deferred();
this._lastProgress = {
taskState: 'Running',
totalBytes: 0,
totalDocuments: 0,
bytesLoaded: 0,
documentsLoaded: 0
};
}
/**
* Registers functions to listen to bundle loading progress events.
* @param next - Called when there is a progress update from bundle loading. Typically `next` calls occur
* each time a Firestore document is loaded from the bundle.
* @param error - Called when an error occurs during bundle loading. The task aborts after reporting the
* error, and there should be no more updates after this.
* @param complete - Called when the loading task is complete.
*/
onProgress(next, error, complete) {
this._progressObserver = {
next,
error,
complete
};
}
/**
* Implements the `Promise<LoadBundleTaskProgress>.catch` interface.
*
* @param onRejected - Called when an error occurs during bundle loading.
*/
catch(onRejected) {
return this._taskCompletionResolver.promise.catch(onRejected);
}
/**
* Implements the `Promise<LoadBundleTaskProgress>.then` interface.
*
* @param onFulfilled - Called on the completion of the loading task with a final `LoadBundleTaskProgress` update.
* The update will always have its `taskState` set to `"Success"`.
* @param onRejected - Called when an error occurs during bundle loading.
*/
then(onFulfilled, onRejected) {
return this._taskCompletionResolver.promise.then(onFulfilled, onRejected);
}
/**
* Notifies all observers that bundle loading has completed, with a provided
* `LoadBundleTaskProgress` object.
*
* @private
*/
_completeWith(progress) {
this._updateProgress(progress);
if (this._progressObserver.complete) {
this._progressObserver.complete();
}
this._taskCompletionResolver.resolve(progress);
}
/**
* Notifies all observers that bundle loading has failed, with a provided
* `Error` as the reason.
*
* @private
*/
_failWith(error) {
this._lastProgress.taskState = 'Error';
if (this._progressObserver.next) {
this._progressObserver.next(this._lastProgress);
}
if (this._progressObserver.error) {
this._progressObserver.error(error);
}
this._taskCompletionResolver.reject(error);
}
/**
* Notifies a progress update of loading a bundle.
* @param progress - The new progress.
*
* @private
*/
_updateProgress(progress) {
this._lastProgress = progress;
if (this._progressObserver.next) {
this._progressObserver.next(progress);
}
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Constant used to indicate the LRU garbage collection should be disabled.
* Set this value as the `cacheSizeBytes` on the settings passed to the
* {@link Firestore} instance.
*/
const CACHE_SIZE_UNLIMITED = LRU_COLLECTION_DISABLED;
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
*/
class Firestore extends Firestore$1 {
/** @hideconstructor */
constructor(authCredentialsProvider, appCheckCredentialsProvider, databaseId, app) {
super(authCredentialsProvider, appCheckCredentialsProvider, databaseId, app);
/**
* Whether it's a {@link Firestore} or Firestore Lite instance.
*/
this.type = 'firestore';
this._queue = newAsyncQueue();
this._persistenceKey = (app === null || app === void 0 ? void 0 : app.name) || '[DEFAULT]';
}
_terminate() {
if (!this._firestoreClient) {
// The client must be initialized to ensure that all subsequent API
// usage throws an exception.
configureFirestore(this);
}
return this._firestoreClient.terminate();
}
}
/**
* Initializes a new instance of {@link Firestore} with the provided settings.
* Can only be called before any other function, including
* {@link (getFirestore:1)}. If the custom settings are empty, this function is
* equivalent to calling {@link (getFirestore:1)}.
*
* @param app - The {@link @firebase/app#FirebaseApp} with which the {@link Firestore} instance will
* be associated.
* @param settings - A settings object to configure the {@link Firestore} instance.
* @param databaseId - The name of the database.
* @returns A newly initialized {@link Firestore} instance.
*/
function initializeFirestore(app, settings, databaseId) {
if (!databaseId) {
databaseId = DEFAULT_DATABASE_NAME;
}
const provider = _getProvider(app, 'firestore');
if (provider.isInitialized(databaseId)) {
const existingInstance = provider.getImmediate({
identifier: databaseId
});
const initialSettings = provider.getOptions(databaseId);
if (deepEqual(initialSettings, settings)) {
return existingInstance;
}
else {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'initializeFirestore() has already been called with ' +
'different options. To avoid this error, call initializeFirestore() with the ' +
'same options as when it was originally called, or call getFirestore() to return the' +
' already initialized instance.');
}
}
if (settings.cacheSizeBytes !== undefined &&
settings.localCache !== undefined) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `cache and cacheSizeBytes cannot be specified at the same time as cacheSizeBytes will` +
`be deprecated. Instead, specify the cache size in the cache object`);
}
if (settings.cacheSizeBytes !== undefined &&
settings.cacheSizeBytes !== CACHE_SIZE_UNLIMITED &&
settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `cacheSizeBytes must be at least ${LRU_MINIMUM_CACHE_SIZE_BYTES}`);
}
return provider.initialize({
options: settings,
instanceIdentifier: databaseId
});
}
function getFirestore(appOrDatabaseId, optionalDatabaseId) {
const app = typeof appOrDatabaseId === 'object' ? appOrDatabaseId : getApp();
const databaseId = typeof appOrDatabaseId === 'string'
? appOrDatabaseId
: optionalDatabaseId || DEFAULT_DATABASE_NAME;
const db = _getProvider(app, 'firestore').getImmediate({
identifier: databaseId
});
if (!db._initialized) {
const emulator = getDefaultEmulatorHostnameAndPort('firestore');
if (emulator) {
connectFirestoreEmulator(db, ...emulator);
}
}
return db;
}
/**
* @internal
*/
function ensureFirestoreConfigured(firestore) {
if (!firestore._firestoreClient) {
configureFirestore(firestore);
}
firestore._firestoreClient.verifyNotTerminated();
return firestore._firestoreClient;
}
function configureFirestore(firestore) {
var _a, _b, _c;
const settings = firestore._freezeSettings();
const databaseInfo = makeDatabaseInfo(firestore._databaseId, ((_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.appId) || '', firestore._persistenceKey, settings);
firestore._firestoreClient = new FirestoreClient(firestore._authCredentials, firestore._appCheckCredentials, firestore._queue, databaseInfo);
if (((_b = settings.localCache) === null || _b === void 0 ? void 0 : _b._offlineComponentProvider) &&
((_c = settings.localCache) === null || _c === void 0 ? void 0 : _c._onlineComponentProvider)) {
firestore._firestoreClient._uninitializedComponentsProvider = {
_offlineKind: settings.localCache.kind,
_offline: settings.localCache._offlineComponentProvider,
_online: settings.localCache._onlineComponentProvider
};
}
}
/**
* Attempts to enable persistent storage, if possible.
*
* On failure, `enableIndexedDbPersistence()` will reject the promise or
* throw an exception. There are several reasons why this can fail, which can be
* identified by the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab.
* * unimplemented: The browser is incompatible with the offline persistence
* implementation.
*
* Note that even after a failure, the {@link Firestore} instance will remain
* usable, however offline persistence will be disabled.
*
* Note: `enableIndexedDbPersistence()` must be called before any other functions
* (other than {@link initializeFirestore}, {@link (getFirestore:1)} or
* {@link clearIndexedDbPersistence}.
*
* Persistence cannot be used in a Node.js environment.
*
* @param firestore - The {@link Firestore} instance to enable persistence for.
* @param persistenceSettings - Optional settings object to configure
* persistence.
* @returns A `Promise` that represents successfully enabling persistent storage.
* @deprecated This function will be removed in a future major release. Instead, set
* `FirestoreSettings.localCache` to an instance of `PersistentLocalCache` to
* turn on IndexedDb cache. Calling this function when `FirestoreSettings.localCache`
* is already specified will throw an exception.
*/
function enableIndexedDbPersistence(firestore, persistenceSettings) {
firestore = cast(firestore, Firestore);
verifyNotInitialized(firestore);
const client = ensureFirestoreConfigured(firestore);
if (client._uninitializedComponentsProvider) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'SDK cache is already specified.');
}
logWarn('enableIndexedDbPersistence() will be deprecated in the future, ' +
'you can use `FirestoreSettings.cache` instead.');
const settings = firestore._freezeSettings();
const onlineComponentProvider = new OnlineComponentProvider();
const offlineComponentProvider = new IndexedDbOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes, persistenceSettings === null || persistenceSettings === void 0 ? void 0 : persistenceSettings.forceOwnership);
return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
}
/**
* Attempts to enable multi-tab persistent storage, if possible. If enabled
* across all tabs, all operations share access to local persistence, including
* shared execution of queries and latency-compensated local document updates
* across all connected instances.
*
* On failure, `enableMultiTabIndexedDbPersistence()` will reject the promise or
* throw an exception. There are several reasons why this can fail, which can be
* identified by the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab and
* multi-tab is not enabled.
* * unimplemented: The browser is incompatible with the offline persistence
* implementation.
*
* Note that even after a failure, the {@link Firestore} instance will remain
* usable, however offline persistence will be disabled.
*
* @param firestore - The {@link Firestore} instance to enable persistence for.
* @returns A `Promise` that represents successfully enabling persistent
* storage.
* @deprecated This function will be removed in a future major release. Instead, set
* `FirestoreSettings.localCache` to an instance of `PersistentLocalCache` to
* turn on indexeddb cache. Calling this function when `FirestoreSettings.localCache`
* is already specified will throw an exception.
*/
function enableMultiTabIndexedDbPersistence(firestore) {
firestore = cast(firestore, Firestore);
verifyNotInitialized(firestore);
const client = ensureFirestoreConfigured(firestore);
if (client._uninitializedComponentsProvider) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'SDK cache is already specified.');
}
logWarn('enableMultiTabIndexedDbPersistence() will be deprecated in the future, ' +
'you can use `FirestoreSettings.cache` instead.');
const settings = firestore._freezeSettings();
const onlineComponentProvider = new OnlineComponentProvider();
const offlineComponentProvider = new MultiTabOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes);
return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
}
/**
* Registers both the `OfflineComponentProvider` and `OnlineComponentProvider`.
* If the operation fails with a recoverable error (see
* `canRecoverFromIndexedDbError()` below), the returned Promise is rejected
* but the client remains usable.
*/
function setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider) {
const persistenceResult = new Deferred();
return client.asyncQueue
.enqueue(async () => {
try {
await setOfflineComponentProvider(client, offlineComponentProvider);
await setOnlineComponentProvider(client, onlineComponentProvider);
persistenceResult.resolve();
}
catch (e) {
const error = e;
if (!canFallbackFromIndexedDbError(error)) {
throw error;
}
logWarn('Error enabling indexeddb cache. Falling back to ' +
'memory cache: ' +
error);
persistenceResult.reject(error);
}
})
.then(() => persistenceResult.promise);
}
/**
* Clears the persistent storage. This includes pending writes and cached
* documents.
*
* Must be called while the {@link Firestore} instance is not started (after the app is
* terminated or when the app is first initialized). On startup, this function
* must be called before other functions (other than {@link
* initializeFirestore} or {@link (getFirestore:1)})). If the {@link Firestore}
* instance is still running, the promise will be rejected with the error code
* of `failed-precondition`.
*
* Note: `clearIndexedDbPersistence()` is primarily intended to help write
* reliable tests that use Cloud Firestore. It uses an efficient mechanism for
* dropping existing data but does not attempt to securely overwrite or
* otherwise make cached data unrecoverable. For applications that are sensitive
* to the disclosure of cached data in between user sessions, we strongly
* recommend not enabling persistence at all.
*
* @param firestore - The {@link Firestore} instance to clear persistence for.
* @returns A `Promise` that is resolved when the persistent storage is
* cleared. Otherwise, the promise is rejected with an error.
*/
function clearIndexedDbPersistence(firestore) {
if (firestore._initialized && !firestore._terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Persistence can only be cleared before a Firestore instance is ' +
'initialized or after it is terminated.');
}
const deferred = new Deferred();
firestore._queue.enqueueAndForgetEvenWhileRestricted(async () => {
try {
await indexedDbClearPersistence(indexedDbStoragePrefix(firestore._databaseId, firestore._persistenceKey));
deferred.resolve();
}
catch (e) {
deferred.reject(e);
}
});
return deferred.promise;
}
/**
* Waits until all currently pending writes for the active user have been
* acknowledged by the backend.
*
* The returned promise resolves immediately if there are no outstanding writes.
* Otherwise, the promise waits for all previously issued writes (including
* those written in a previous app session), but it does not wait for writes
* that were added after the function is called. If you want to wait for
* additional writes, call `waitForPendingWrites()` again.
*
* Any outstanding `waitForPendingWrites()` promises are rejected during user
* changes.
*
* @returns A `Promise` which resolves when all currently pending writes have been
* acknowledged by the backend.
*/
function waitForPendingWrites(firestore) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientWaitForPendingWrites(client);
}
/**
* Re-enables use of the network for this {@link Firestore} instance after a prior
* call to {@link disableNetwork}.
*
* @returns A `Promise` that is resolved once the network has been enabled.
*/
function enableNetwork(firestore) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientEnableNetwork(client);
}
/**
* Disables network usage for this instance. It can be re-enabled via {@link
* enableNetwork}. While the network is disabled, any snapshot listeners,
* `getDoc()` or `getDocs()` calls will return results from cache, and any write
* operations will be queued until the network is restored.
*
* @returns A `Promise` that is resolved once the network has been disabled.
*/
function disableNetwork(firestore) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientDisableNetwork(client);
}
/**
* Terminates the provided {@link Firestore} instance.
*
* After calling `terminate()` only the `clearIndexedDbPersistence()` function
* may be used. Any other function will throw a `FirestoreError`.
*
* To restart after termination, create a new instance of FirebaseFirestore with
* {@link (getFirestore:1)}.
*
* Termination does not cancel any pending writes, and any promises that are
* awaiting a response from the server will not be resolved. If you have
* persistence enabled, the next time you start this instance, it will resume
* sending these writes to the server.
*
* Note: Under normal circumstances, calling `terminate()` is not required. This
* function is useful only when you want to force this instance to release all
* of its resources or in combination with `clearIndexedDbPersistence()` to
* ensure that all local state is destroyed between test runs.
*
* @returns A `Promise` that is resolved when the instance has been successfully
* terminated.
*/
function terminate(firestore) {
_removeServiceInstance(firestore.app, 'firestore', firestore._databaseId.database);
return firestore._delete();
}
/**
* Loads a Firestore bundle into the local cache.
*
* @param firestore - The {@link Firestore} instance to load bundles for.
* @param bundleData - An object representing the bundle to be loaded. Valid
* objects are `ArrayBuffer`, `ReadableStream<Uint8Array>` or `string`.
*
* @returns A `LoadBundleTask` object, which notifies callers with progress
* updates, and completion or error events. It can be used as a
* `Promise<LoadBundleTaskProgress>`.
*/
function loadBundle(firestore, bundleData) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const resultTask = new LoadBundleTask();
firestoreClientLoadBundle(client, firestore._databaseId, bundleData, resultTask);
return resultTask;
}
/**
* Reads a Firestore {@link Query} from local cache, identified by the given
* name.
*
* The named queries are packaged into bundles on the server side (along
* with resulting documents), and loaded to local cache using `loadBundle`. Once
* in local cache, use this method to extract a {@link Query} by name.
*
* @param firestore - The {@link Firestore} instance to read the query from.
* @param name - The name of the query.
* @returns A `Promise` that is resolved with the Query or `null`.
*/
function namedQuery(firestore, name) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientGetNamedQuery(client, name).then(namedQuery => {
if (!namedQuery) {
return null;
}
return new Query(firestore, null, namedQuery.query);
});
}
function verifyNotInitialized(firestore) {
if (firestore._initialized || firestore._terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and persistence can no longer be ' +
'enabled. You can only enable persistence before calling any other ' +
'methods on a Firestore object.');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function registerFirestore(variant, useFetchStreams = true) {
setSDKVersion(SDK_VERSION$1);
_registerComponent(new Component('firestore', (container, { instanceIdentifier: databaseId, options: settings }) => {
const app = container.getProvider('app').getImmediate();
const firestoreInstance = new Firestore(new FirebaseAuthCredentialsProvider(container.getProvider('auth-internal')), new FirebaseAppCheckTokenProvider(container.getProvider('app-check-internal')), databaseIdFromApp(app, databaseId), app);
settings = Object.assign({ useFetchStreams }, settings);
firestoreInstance._setSettings(settings);
return firestoreInstance;
}, 'PUBLIC').setMultipleInstances(true));
registerVersion(name, version$1, variant);
// BUILD_TARGET will be replaced by values like esm5, esm2017, cjs5, etc during the compilation
registerVersion(name, version$1, 'esm2017');
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Concrete implementation of the Aggregate type.
*/
class AggregateImpl {
constructor(alias, aggregateType, fieldPath) {
this.alias = alias;
this.aggregateType = aggregateType;
this.fieldPath = fieldPath;
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents an aggregation that can be performed by Firestore.
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
class AggregateField {
/**
* Create a new AggregateField<T>
* @param aggregateType Specifies the type of aggregation operation to perform.
* @param _internalFieldPath Optionally specifies the field that is aggregated.
* @internal
*/
constructor(aggregateType = 'count', _internalFieldPath) {
this._internalFieldPath = _internalFieldPath;
/** A type string to uniquely identify instances of this class. */
this.type = 'AggregateField';
this.aggregateType = aggregateType;
}
}
/**
* The results of executing an aggregation query.
*/
class AggregateQuerySnapshot {
/** @hideconstructor */
constructor(query, _userDataWriter, _data) {
this._userDataWriter = _userDataWriter;
this._data = _data;
/** A type string to uniquely identify instances of this class. */
this.type = 'AggregateQuerySnapshot';
this.query = query;
}
/**
* Returns the results of the aggregations performed over the underlying
* query.
*
* The keys of the returned object will be the same as those of the
* `AggregateSpec` object specified to the aggregation method, and the values
* will be the corresponding aggregation result.
*
* @returns The results of the aggregations performed over the underlying
* query.
*/
data() {
return this._userDataWriter.convertObjectMap(this._data);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing an array of bytes.
*/
class Bytes {
/** @hideconstructor */
constructor(byteString) {
this._byteString = byteString;
}
/**
* Creates a new `Bytes` object from the given Base64 string, converting it to
* bytes.
*
* @param base64 - The Base64 string used to create the `Bytes` object.
*/
static fromBase64String(base64) {
try {
return new Bytes(ByteString.fromBase64String(base64));
}
catch (e) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to construct data from Base64 string: ' + e);
}
}
/**
* Creates a new `Bytes` object from the given Uint8Array.
*
* @param array - The Uint8Array used to create the `Bytes` object.
*/
static fromUint8Array(array) {
return new Bytes(ByteString.fromUint8Array(array));
}
/**
* Returns the underlying bytes as a Base64-encoded string.
*
* @returns The Base64-encoded string created from the `Bytes` object.
*/
toBase64() {
return this._byteString.toBase64();
}
/**
* Returns the underlying bytes in a new `Uint8Array`.
*
* @returns The Uint8Array created from the `Bytes` object.
*/
toUint8Array() {
return this._byteString.toUint8Array();
}
/**
* Returns a string representation of the `Bytes` object.
*
* @returns A string representation of the `Bytes` object.
*/
toString() {
return 'Bytes(base64: ' + this.toBase64() + ')';
}
/**
* Returns true if this `Bytes` object is equal to the provided one.
*
* @param other - The `Bytes` object to compare against.
* @returns true if this `Bytes` object is equal to the provided one.
*/
isEqual(other) {
return this._byteString.isEqual(other._byteString);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `FieldPath` refers to a field in a document. The path may consist of a
* single field name (referring to a top-level field in the document), or a
* list of field names (referring to a nested field in the document).
*
* Create a `FieldPath` by providing field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*/
class FieldPath {
/**
* Creates a `FieldPath` from the provided field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*
* @param fieldNames - A list of field names.
*/
constructor(...fieldNames) {
for (let i = 0; i < fieldNames.length; ++i) {
if (fieldNames[i].length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid field name at argument $(i + 1). ` +
'Field names must not be empty.');
}
}
this._internalPath = new FieldPath$1(fieldNames);
}
/**
* Returns true if this `FieldPath` is equal to the provided one.
*
* @param other - The `FieldPath` to compare against.
* @returns true if this `FieldPath` is equal to the provided one.
*/
isEqual(other) {
return this._internalPath.isEqual(other._internalPath);
}
}
/**
* Returns a special sentinel `FieldPath` to refer to the ID of a document.
* It can be used in queries to sort or filter by the document ID.
*/
function documentId() {
return new FieldPath(DOCUMENT_KEY_NAME);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Sentinel values that can be used when writing document fields with `set()`
* or `update()`.
*/
class FieldValue {
/**
* @param _methodName - The public API endpoint that returns this class.
* @hideconstructor
*/
constructor(_methodName) {
this._methodName = _methodName;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing a geographic location in Firestore. The
* location is represented as latitude/longitude pair.
*
* Latitude values are in the range of [-90, 90].
* Longitude values are in the range of [-180, 180].
*/
class GeoPoint {
/**
* Creates a new immutable `GeoPoint` object with the provided latitude and
* longitude values.
* @param latitude - The latitude as number between -90 and 90.
* @param longitude - The longitude as number between -180 and 180.
*/
constructor(latitude, longitude) {
if (!isFinite(latitude) || latitude < -90 || latitude > 90) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Latitude must be a number between -90 and 90, but was: ' + latitude);
}
if (!isFinite(longitude) || longitude < -180 || longitude > 180) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Longitude must be a number between -180 and 180, but was: ' + longitude);
}
this._lat = latitude;
this._long = longitude;
}
/**
* The latitude of this `GeoPoint` instance.
*/
get latitude() {
return this._lat;
}
/**
* The longitude of this `GeoPoint` instance.
*/
get longitude() {
return this._long;
}
/**
* Returns true if this `GeoPoint` is equal to the provided one.
*
* @param other - The `GeoPoint` to compare against.
* @returns true if this `GeoPoint` is equal to the provided one.
*/
isEqual(other) {
return this._lat === other._lat && this._long === other._long;
}
/** Returns a JSON-serializable representation of this GeoPoint. */
toJSON() {
return { latitude: this._lat, longitude: this._long };
}
/**
* Actually private to JS consumers of our API, so this function is prefixed
* with an underscore.
*/
_compareTo(other) {
return (primitiveComparator(this._lat, other._lat) ||
primitiveComparator(this._long, other._long));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const RESERVED_FIELD_REGEX = /^__.*__$/;
/** The result of parsing document data (e.g. for a setData call). */
class ParsedSetData {
constructor(data, fieldMask, fieldTransforms) {
this.data = data;
this.fieldMask = fieldMask;
this.fieldTransforms = fieldTransforms;
}
toMutation(key, precondition) {
if (this.fieldMask !== null) {
return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
}
else {
return new SetMutation(key, this.data, precondition, this.fieldTransforms);
}
}
}
/** The result of parsing "update" data (i.e. for an updateData call). */
class ParsedUpdateData {
constructor(data,
// The fieldMask does not include document transforms.
fieldMask, fieldTransforms) {
this.data = data;
this.fieldMask = fieldMask;
this.fieldTransforms = fieldTransforms;
}
toMutation(key, precondition) {
return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
}
}
function isWrite(dataSource) {
switch (dataSource) {
case 0 /* UserDataSource.Set */: // fall through
case 2 /* UserDataSource.MergeSet */: // fall through
case 1 /* UserDataSource.Update */:
return true;
case 3 /* UserDataSource.Argument */:
case 4 /* UserDataSource.ArrayArgument */:
return false;
default:
throw fail();
}
}
/** A "context" object passed around while parsing user data. */
class ParseContextImpl {
/**
* Initializes a ParseContext with the given source and path.
*
* @param settings - The settings for the parser.
* @param databaseId - The database ID of the Firestore instance.
* @param serializer - The serializer to use to generate the Value proto.
* @param ignoreUndefinedProperties - Whether to ignore undefined properties
* rather than throw.
* @param fieldTransforms - A mutable list of field transforms encountered
* while parsing the data.
* @param fieldMask - A mutable list of field paths encountered while parsing
* the data.
*
* TODO(b/34871131): We don't support array paths right now, so path can be
* null to indicate the context represents any location within an array (in
* which case certain features will not work and errors will be somewhat
* compromised).
*/
constructor(settings, databaseId, serializer, ignoreUndefinedProperties, fieldTransforms, fieldMask) {
this.settings = settings;
this.databaseId = databaseId;
this.serializer = serializer;
this.ignoreUndefinedProperties = ignoreUndefinedProperties;
// Minor hack: If fieldTransforms is undefined, we assume this is an
// external call and we need to validate the entire path.
if (fieldTransforms === undefined) {
this.validatePath();
}
this.fieldTransforms = fieldTransforms || [];
this.fieldMask = fieldMask || [];
}
get path() {
return this.settings.path;
}
get dataSource() {
return this.settings.dataSource;
}
/** Returns a new context with the specified settings overwritten. */
contextWith(configuration) {
return new ParseContextImpl(Object.assign(Object.assign({}, this.settings), configuration), this.databaseId, this.serializer, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
}
childContextForField(field) {
var _a;
const childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
const context = this.contextWith({ path: childPath, arrayElement: false });
context.validatePathSegment(field);
return context;
}
childContextForFieldPath(field) {
var _a;
const childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
const context = this.contextWith({ path: childPath, arrayElement: false });
context.validatePath();
return context;
}
childContextForArray(index) {
// TODO(b/34871131): We don't support array paths right now; so make path
// undefined.
return this.contextWith({ path: undefined, arrayElement: true });
}
createError(reason) {
return createError(reason, this.settings.methodName, this.settings.hasConverter || false, this.path, this.settings.targetDoc);
}
/** Returns 'true' if 'fieldPath' was traversed when creating this context. */
contains(fieldPath) {
return (this.fieldMask.find(field => fieldPath.isPrefixOf(field)) !== undefined ||
this.fieldTransforms.find(transform => fieldPath.isPrefixOf(transform.field)) !== undefined);
}
validatePath() {
// TODO(b/34871131): Remove null check once we have proper paths for fields
// within arrays.
if (!this.path) {
return;
}
for (let i = 0; i < this.path.length; i++) {
this.validatePathSegment(this.path.get(i));
}
}
validatePathSegment(segment) {
if (segment.length === 0) {
throw this.createError('Document fields must not be empty');
}
if (isWrite(this.dataSource) && RESERVED_FIELD_REGEX.test(segment)) {
throw this.createError('Document fields cannot begin and end with "__"');
}
}
}
/**
* Helper for parsing raw user input (provided via the API) into internal model
* classes.
*/
class UserDataReader {
constructor(databaseId, ignoreUndefinedProperties, serializer) {
this.databaseId = databaseId;
this.ignoreUndefinedProperties = ignoreUndefinedProperties;
this.serializer = serializer || newSerializer(databaseId);
}
/** Creates a new top-level parse context. */
createContext(dataSource, methodName, targetDoc, hasConverter = false) {
return new ParseContextImpl({
dataSource,
methodName,
targetDoc,
path: FieldPath$1.emptyPath(),
arrayElement: false,
hasConverter
}, this.databaseId, this.serializer, this.ignoreUndefinedProperties);
}
}
function newUserDataReader(firestore) {
const settings = firestore._freezeSettings();
const serializer = newSerializer(firestore._databaseId);
return new UserDataReader(firestore._databaseId, !!settings.ignoreUndefinedProperties, serializer);
}
/** Parse document data from a set() call. */
function parseSetData(userDataReader, methodName, targetDoc, input, hasConverter, options = {}) {
const context = userDataReader.createContext(options.merge || options.mergeFields
? 2 /* UserDataSource.MergeSet */
: 0 /* UserDataSource.Set */, methodName, targetDoc, hasConverter);
validatePlainObject('Data must be an object, but it was:', context, input);
const updateData = parseObject(input, context);
let fieldMask;
let fieldTransforms;
if (options.merge) {
fieldMask = new FieldMask(context.fieldMask);
fieldTransforms = context.fieldTransforms;
}
else if (options.mergeFields) {
const validatedFieldPaths = [];
for (const stringOrFieldPath of options.mergeFields) {
const fieldPath = fieldPathFromArgument$1(methodName, stringOrFieldPath, targetDoc);
if (!context.contains(fieldPath)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Field '${fieldPath}' is specified in your field mask but missing from your input data.`);
}
if (!fieldMaskContains(validatedFieldPaths, fieldPath)) {
validatedFieldPaths.push(fieldPath);
}
}
fieldMask = new FieldMask(validatedFieldPaths);
fieldTransforms = context.fieldTransforms.filter(transform => fieldMask.covers(transform.field));
}
else {
fieldMask = null;
fieldTransforms = context.fieldTransforms;
}
return new ParsedSetData(new ObjectValue(updateData), fieldMask, fieldTransforms);
}
class DeleteFieldValueImpl extends FieldValue {
_toFieldTransform(context) {
if (context.dataSource === 2 /* UserDataSource.MergeSet */) {
// No transform to add for a delete, but we need to add it to our
// fieldMask so it gets deleted.
context.fieldMask.push(context.path);
}
else if (context.dataSource === 1 /* UserDataSource.Update */) {
throw context.createError(`${this._methodName}() can only appear at the top level ` +
'of your update data');
}
else {
// We shouldn't encounter delete sentinels for queries or non-merge set() calls.
throw context.createError(`${this._methodName}() cannot be used with set() unless you pass ` +
'{merge:true}');
}
return null;
}
isEqual(other) {
return other instanceof DeleteFieldValueImpl;
}
}
/**
* Creates a child context for parsing SerializableFieldValues.
*
* This is different than calling `ParseContext.contextWith` because it keeps
* the fieldTransforms and fieldMask separate.
*
* The created context has its `dataSource` set to `UserDataSource.Argument`.
* Although these values are used with writes, any elements in these FieldValues
* are not considered writes since they cannot contain any FieldValue sentinels,
* etc.
*
* @param fieldValue - The sentinel FieldValue for which to create a child
* context.
* @param context - The parent context.
* @param arrayElement - Whether or not the FieldValue has an array.
*/
function createSentinelChildContext(fieldValue, context, arrayElement) {
return new ParseContextImpl({
dataSource: 3 /* UserDataSource.Argument */,
targetDoc: context.settings.targetDoc,
methodName: fieldValue._methodName,
arrayElement
}, context.databaseId, context.serializer, context.ignoreUndefinedProperties);
}
class ServerTimestampFieldValueImpl extends FieldValue {
_toFieldTransform(context) {
return new FieldTransform(context.path, new ServerTimestampTransform());
}
isEqual(other) {
return other instanceof ServerTimestampFieldValueImpl;
}
}
class ArrayUnionFieldValueImpl extends FieldValue {
constructor(methodName, _elements) {
super(methodName);
this._elements = _elements;
}
_toFieldTransform(context) {
const parseContext = createSentinelChildContext(this, context,
/*array=*/ true);
const parsedElements = this._elements.map(element => parseData(element, parseContext));
const arrayUnion = new ArrayUnionTransformOperation(parsedElements);
return new FieldTransform(context.path, arrayUnion);
}
isEqual(other) {
return (other instanceof ArrayUnionFieldValueImpl &&
deepEqual(this._elements, other._elements));
}
}
class ArrayRemoveFieldValueImpl extends FieldValue {
constructor(methodName, _elements) {
super(methodName);
this._elements = _elements;
}
_toFieldTransform(context) {
const parseContext = createSentinelChildContext(this, context,
/*array=*/ true);
const parsedElements = this._elements.map(element => parseData(element, parseContext));
const arrayUnion = new ArrayRemoveTransformOperation(parsedElements);
return new FieldTransform(context.path, arrayUnion);
}
isEqual(other) {
return (other instanceof ArrayRemoveFieldValueImpl &&
deepEqual(this._elements, other._elements));
}
}
class NumericIncrementFieldValueImpl extends FieldValue {
constructor(methodName, _operand) {
super(methodName);
this._operand = _operand;
}
_toFieldTransform(context) {
const numericIncrement = new NumericIncrementTransformOperation(context.serializer, toNumber(context.serializer, this._operand));
return new FieldTransform(context.path, numericIncrement);
}
isEqual(other) {
return (other instanceof NumericIncrementFieldValueImpl &&
this._operand === other._operand);
}
}
/** Parse update data from an update() call. */
function parseUpdateData(userDataReader, methodName, targetDoc, input) {
const context = userDataReader.createContext(1 /* UserDataSource.Update */, methodName, targetDoc);
validatePlainObject('Data must be an object, but it was:', context, input);
const fieldMaskPaths = [];
const updateData = ObjectValue.empty();
forEach(input, (key, value) => {
const path = fieldPathFromDotSeparatedString(methodName, key, targetDoc);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
value = getModularInstance(value);
const childContext = context.childContextForFieldPath(path);
if (value instanceof DeleteFieldValueImpl) {
// Add it to the field mask, but don't add anything to updateData.
fieldMaskPaths.push(path);
}
else {
const parsedValue = parseData(value, childContext);
if (parsedValue != null) {
fieldMaskPaths.push(path);
updateData.set(path, parsedValue);
}
}
});
const mask = new FieldMask(fieldMaskPaths);
return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
}
/** Parse update data from a list of field/value arguments. */
function parseUpdateVarargs(userDataReader, methodName, targetDoc, field, value, moreFieldsAndValues) {
const context = userDataReader.createContext(1 /* UserDataSource.Update */, methodName, targetDoc);
const keys = [fieldPathFromArgument$1(methodName, field, targetDoc)];
const values = [value];
if (moreFieldsAndValues.length % 2 !== 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${methodName}() needs to be called with an even number ` +
'of arguments that alternate between field names and values.');
}
for (let i = 0; i < moreFieldsAndValues.length; i += 2) {
keys.push(fieldPathFromArgument$1(methodName, moreFieldsAndValues[i]));
values.push(moreFieldsAndValues[i + 1]);
}
const fieldMaskPaths = [];
const updateData = ObjectValue.empty();
// We iterate in reverse order to pick the last value for a field if the
// user specified the field multiple times.
for (let i = keys.length - 1; i >= 0; --i) {
if (!fieldMaskContains(fieldMaskPaths, keys[i])) {
const path = keys[i];
let value = values[i];
// For Compat types, we have to "extract" the underlying types before
// performing validation.
value = getModularInstance(value);
const childContext = context.childContextForFieldPath(path);
if (value instanceof DeleteFieldValueImpl) {
// Add it to the field mask, but don't add anything to updateData.
fieldMaskPaths.push(path);
}
else {
const parsedValue = parseData(value, childContext);
if (parsedValue != null) {
fieldMaskPaths.push(path);
updateData.set(path, parsedValue);
}
}
}
}
const mask = new FieldMask(fieldMaskPaths);
return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
}
/**
* Parse a "query value" (e.g. value in a where filter or a value in a cursor
* bound).
*
* @param allowArrays - Whether the query value is an array that may directly
* contain additional arrays (e.g. the operand of an `in` query).
*/
function parseQueryValue(userDataReader, methodName, input, allowArrays = false) {
const context = userDataReader.createContext(allowArrays ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */, methodName);
const parsed = parseData(input, context);
return parsed;
}
/**
* Parses user data to Protobuf Values.
*
* @param input - Data to be parsed.
* @param context - A context object representing the current path being parsed,
* the source of the data being parsed, etc.
* @returns The parsed value, or null if the value was a FieldValue sentinel
* that should not be included in the resulting parsed data.
*/
function parseData(input, context) {
// Unwrap the API type from the Compat SDK. This will return the API type
// from firestore-exp.
input = getModularInstance(input);
if (looksLikeJsonObject(input)) {
validatePlainObject('Unsupported field value:', context, input);
return parseObject(input, context);
}
else if (input instanceof FieldValue) {
// FieldValues usually parse into transforms (except deleteField())
// in which case we do not want to include this field in our parsed data
// (as doing so will overwrite the field directly prior to the transform
// trying to transform it). So we don't add this location to
// context.fieldMask and we return null as our parsing result.
parseSentinelFieldValue(input, context);
return null;
}
else if (input === undefined && context.ignoreUndefinedProperties) {
// If the input is undefined it can never participate in the fieldMask, so
// don't handle this below. If `ignoreUndefinedProperties` is false,
// `parseScalarValue` will reject an undefined value.
return null;
}
else {
// If context.path is null we are inside an array and we don't support
// field mask paths more granular than the top-level array.
if (context.path) {
context.fieldMask.push(context.path);
}
if (input instanceof Array) {
// TODO(b/34871131): Include the path containing the array in the error
// message.
// In the case of IN queries, the parsed data is an array (representing
// the set of values to be included for the IN query) that may directly
// contain additional arrays (each representing an individual field
// value), so we disable this validation.
if (context.settings.arrayElement &&
context.dataSource !== 4 /* UserDataSource.ArrayArgument */) {
throw context.createError('Nested arrays are not supported');
}
return parseArray(input, context);
}
else {
return parseScalarValue(input, context);
}
}
}
function parseObject(obj, context) {
const fields = {};
if (isEmpty(obj)) {
// If we encounter an empty object, we explicitly add it to the update
// mask to ensure that the server creates a map entry.
if (context.path && context.path.length > 0) {
context.fieldMask.push(context.path);
}
}
else {
forEach(obj, (key, val) => {
const parsedValue = parseData(val, context.childContextForField(key));
if (parsedValue != null) {
fields[key] = parsedValue;
}
});
}
return { mapValue: { fields } };
}
function parseArray(array, context) {
const values = [];
let entryIndex = 0;
for (const entry of array) {
let parsedEntry = parseData(entry, context.childContextForArray(entryIndex));
if (parsedEntry == null) {
// Just include nulls in the array for fields being replaced with a
// sentinel.
parsedEntry = { nullValue: 'NULL_VALUE' };
}
values.push(parsedEntry);
entryIndex++;
}
return { arrayValue: { values } };
}
/**
* "Parses" the provided FieldValueImpl, adding any necessary transforms to
* context.fieldTransforms.
*/
function parseSentinelFieldValue(value, context) {
// Sentinels are only supported with writes, and not within arrays.
if (!isWrite(context.dataSource)) {
throw context.createError(`${value._methodName}() can only be used with update() and set()`);
}
if (!context.path) {
throw context.createError(`${value._methodName}() is not currently supported inside arrays`);
}
const fieldTransform = value._toFieldTransform(context);
if (fieldTransform) {
context.fieldTransforms.push(fieldTransform);
}
}
/**
* Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
*
* @returns The parsed value
*/
function parseScalarValue(value, context) {
value = getModularInstance(value);
if (value === null) {
return { nullValue: 'NULL_VALUE' };
}
else if (typeof value === 'number') {
return toNumber(context.serializer, value);
}
else if (typeof value === 'boolean') {
return { booleanValue: value };
}
else if (typeof value === 'string') {
return { stringValue: value };
}
else if (value instanceof Date) {
const timestamp = Timestamp.fromDate(value);
return {
timestampValue: toTimestamp(context.serializer, timestamp)
};
}
else if (value instanceof Timestamp) {
// Firestore backend truncates precision down to microseconds. To ensure
// offline mode works the same with regards to truncation, perform the
// truncation immediately without waiting for the backend to do that.
const timestamp = new Timestamp(value.seconds, Math.floor(value.nanoseconds / 1000) * 1000);
return {
timestampValue: toTimestamp(context.serializer, timestamp)
};
}
else if (value instanceof GeoPoint) {
return {
geoPointValue: {
latitude: value.latitude,
longitude: value.longitude
}
};
}
else if (value instanceof Bytes) {
return { bytesValue: toBytes(context.serializer, value._byteString) };
}
else if (value instanceof DocumentReference) {
const thisDb = context.databaseId;
const otherDb = value.firestore._databaseId;
if (!otherDb.isEqual(thisDb)) {
throw context.createError('Document reference is for database ' +
`${otherDb.projectId}/${otherDb.database} but should be ` +
`for database ${thisDb.projectId}/${thisDb.database}`);
}
return {
referenceValue: toResourceName(value.firestore._databaseId || context.databaseId, value._key.path)
};
}
else {
throw context.createError(`Unsupported field value: ${valueDescription(value)}`);
}
}
/**
* Checks whether an object looks like a JSON object that should be converted
* into a struct. Normal class/prototype instances are considered to look like
* JSON objects since they should be converted to a struct value. Arrays, Dates,
* GeoPoints, etc. are not considered to look like JSON objects since they map
* to specific FieldValue types other than ObjectValue.
*/
function looksLikeJsonObject(input) {
return (typeof input === 'object' &&
input !== null &&
!(input instanceof Array) &&
!(input instanceof Date) &&
!(input instanceof Timestamp) &&
!(input instanceof GeoPoint) &&
!(input instanceof Bytes) &&
!(input instanceof DocumentReference) &&
!(input instanceof FieldValue));
}
function validatePlainObject(message, context, input) {
if (!looksLikeJsonObject(input) || !isPlainObject(input)) {
const description = valueDescription(input);
if (description === 'an object') {
// Massage the error if it was an object.
throw context.createError(message + ' a custom object');
}
else {
throw context.createError(message + ' ' + description);
}
}
}
/**
* Helper that calls fromDotSeparatedString() but wraps any error thrown.
*/
function fieldPathFromArgument$1(methodName, path, targetDoc) {
// If required, replace the FieldPath Compat class with with the firestore-exp
// FieldPath.
path = getModularInstance(path);
if (path instanceof FieldPath) {
return path._internalPath;
}
else if (typeof path === 'string') {
return fieldPathFromDotSeparatedString(methodName, path);
}
else {
const message = 'Field path arguments must be of type string or ';
throw createError(message, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
}
/**
* Matches any characters in a field path string that are reserved.
*/
const FIELD_PATH_RESERVED = new RegExp('[~\\*/\\[\\]]');
/**
* Wraps fromDotSeparatedString with an error message about the method that
* was thrown.
* @param methodName - The publicly visible method name
* @param path - The dot-separated string form of a field path which will be
* split on dots.
* @param targetDoc - The document against which the field path will be
* evaluated.
*/
function fieldPathFromDotSeparatedString(methodName, path, targetDoc) {
const found = path.search(FIELD_PATH_RESERVED);
if (found >= 0) {
throw createError(`Invalid field path (${path}). Paths must not contain ` +
`'~', '*', '/', '[', or ']'`, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
try {
return new FieldPath(...path.split('.'))._internalPath;
}
catch (e) {
throw createError(`Invalid field path (${path}). Paths must not be empty, ` +
`begin with '.', end with '.', or contain '..'`, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
}
function createError(reason, methodName, hasConverter, path, targetDoc) {
const hasPath = path && !path.isEmpty();
const hasDocument = targetDoc !== undefined;
let message = `Function ${methodName}() called with invalid data`;
if (hasConverter) {
message += ' (via `toFirestore()`)';
}
message += '. ';
let description = '';
if (hasPath || hasDocument) {
description += ' (found';
if (hasPath) {
description += ` in field ${path}`;
}
if (hasDocument) {
description += ` in document ${targetDoc}`;
}
description += ')';
}
return new FirestoreError(Code.INVALID_ARGUMENT, message + reason + description);
}
/** Checks `haystack` if FieldPath `needle` is present. Runs in O(n). */
function fieldMaskContains(haystack, needle) {
return haystack.some(v => v.isEqual(needle));
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get(<field>)` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/
class DocumentSnapshot$1 {
// Note: This class is stripped down version of the DocumentSnapshot in
// the legacy SDK. The changes are:
// - No support for SnapshotMetadata.
// - No support for SnapshotOptions.
/** @hideconstructor protected */
constructor(_firestore, _userDataWriter, _key, _document, _converter) {
this._firestore = _firestore;
this._userDataWriter = _userDataWriter;
this._key = _key;
this._document = _document;
this._converter = _converter;
}
/** Property of the `DocumentSnapshot` that provides the document's ID. */
get id() {
return this._key.path.lastSegment();
}
/**
* The `DocumentReference` for the document included in the `DocumentSnapshot`.
*/
get ref() {
return new DocumentReference(this._firestore, this._converter, this._key);
}
/**
* Signals whether or not the document at the snapshot's location exists.
*
* @returns true if the document exists.
*/
exists() {
return this._document !== null;
}
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* @returns An `Object` containing all fields in the document or `undefined`
* if the document doesn't exist.
*/
data() {
if (!this._document) {
return undefined;
}
else if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
const snapshot = new QueryDocumentSnapshot$1(this._firestore, this._userDataWriter, this._key, this._document,
/* converter= */ null);
return this._converter.fromFirestore(snapshot);
}
else {
return this._userDataWriter.convertValue(this._document.data.value);
}
}
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get(fieldPath) {
if (this._document) {
const value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
if (value !== null) {
return this._userDataWriter.convertValue(value);
}
}
return undefined;
}
}
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get(<field>)` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/
class QueryDocumentSnapshot$1 extends DocumentSnapshot$1 {
/**
* Retrieves all fields in the document as an `Object`.
*
* @override
* @returns An `Object` containing all fields in the document.
*/
data() {
return super.data();
}
}
/**
* Helper that calls `fromDotSeparatedString()` but wraps any error thrown.
*/
function fieldPathFromArgument(methodName, arg) {
if (typeof arg === 'string') {
return fieldPathFromDotSeparatedString(methodName, arg);
}
else if (arg instanceof FieldPath) {
return arg._internalPath;
}
else {
return arg._delegate._internalPath;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function validateHasExplicitOrderByForLimitToLast(query) {
if (query.limitType === "L" /* LimitType.Last */ &&
query.explicitOrderBy.length === 0) {
throw new FirestoreError(Code.UNIMPLEMENTED, 'limitToLast() queries require specifying at least one orderBy() clause');
}
}
/**
* An `AppliableConstraint` is an abstraction of a constraint that can be applied
* to a Firestore query.
*/
class AppliableConstraint {
}
/**
* A `QueryConstraint` is used to narrow the set of documents returned by a
* Firestore query. `QueryConstraint`s are created by invoking {@link where},
* {@link orderBy}, {@link (startAt:1)}, {@link (startAfter:1)}, {@link
* (endBefore:1)}, {@link (endAt:1)}, {@link limit}, {@link limitToLast} and
* can then be passed to {@link (query:1)} to create a new query instance that
* also contains this `QueryConstraint`.
*/
class QueryConstraint extends AppliableConstraint {
}
function query(query, queryConstraint, ...additionalQueryConstraints) {
let queryConstraints = [];
if (queryConstraint instanceof AppliableConstraint) {
queryConstraints.push(queryConstraint);
}
queryConstraints = queryConstraints.concat(additionalQueryConstraints);
validateQueryConstraintArray(queryConstraints);
for (const constraint of queryConstraints) {
query = constraint._apply(query);
}
return query;
}
/**
* A `QueryFieldFilterConstraint` is used to narrow the set of documents returned by
* a Firestore query by filtering on one or more document fields.
* `QueryFieldFilterConstraint`s are created by invoking {@link where} and can then
* be passed to {@link (query:1)} to create a new query instance that also contains
* this `QueryFieldFilterConstraint`.
*/
class QueryFieldFilterConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(_field, _op, _value) {
super();
this._field = _field;
this._op = _op;
this._value = _value;
/** The type of this query constraint */
this.type = 'where';
}
static _create(_field, _op, _value) {
return new QueryFieldFilterConstraint(_field, _op, _value);
}
_apply(query) {
const filter = this._parse(query);
validateNewFieldFilter(query._query, filter);
return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, filter));
}
_parse(query) {
const reader = newUserDataReader(query.firestore);
const filter = newQueryFilter(query._query, 'where', reader, query.firestore._databaseId, this._field, this._op, this._value);
return filter;
}
}
/**
* Creates a {@link QueryFieldFilterConstraint} that enforces that documents
* must contain the specified field and that the value should satisfy the
* relation constraint provided.
*
* @param fieldPath - The path to compare
* @param opStr - The operation string (e.g "&lt;", "&lt;=", "==", "&lt;",
* "&lt;=", "!=").
* @param value - The value for comparison
* @returns The created {@link QueryFieldFilterConstraint}.
*/
function where(fieldPath, opStr, value) {
const op = opStr;
const field = fieldPathFromArgument('where', fieldPath);
return QueryFieldFilterConstraint._create(field, op, value);
}
/**
* A `QueryCompositeFilterConstraint` is used to narrow the set of documents
* returned by a Firestore query by performing the logical OR or AND of multiple
* {@link QueryFieldFilterConstraint}s or {@link QueryCompositeFilterConstraint}s.
* `QueryCompositeFilterConstraint`s are created by invoking {@link or} or
* {@link and} and can then be passed to {@link (query:1)} to create a new query
* instance that also contains the `QueryCompositeFilterConstraint`.
*/
class QueryCompositeFilterConstraint extends AppliableConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _queryConstraints) {
super();
this.type = type;
this._queryConstraints = _queryConstraints;
}
static _create(type, _queryConstraints) {
return new QueryCompositeFilterConstraint(type, _queryConstraints);
}
_parse(query) {
const parsedFilters = this._queryConstraints
.map(queryConstraint => {
return queryConstraint._parse(query);
})
.filter(parsedFilter => parsedFilter.getFilters().length > 0);
if (parsedFilters.length === 1) {
return parsedFilters[0];
}
return CompositeFilter.create(parsedFilters, this._getOperator());
}
_apply(query) {
const parsedFilter = this._parse(query);
if (parsedFilter.getFilters().length === 0) {
// Return the existing query if not adding any more filters (e.g. an empty
// composite filter).
return query;
}
validateNewFilter(query._query, parsedFilter);
return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, parsedFilter));
}
_getQueryConstraints() {
return this._queryConstraints;
}
_getOperator() {
return this.type === 'and' ? "and" /* CompositeOperator.AND */ : "or" /* CompositeOperator.OR */;
}
}
/**
* Creates a new {@link QueryCompositeFilterConstraint} that is a disjunction of
* the given filter constraints. A disjunction filter includes a document if it
* satisfies any of the given filters.
*
* @param queryConstraints - Optional. The list of
* {@link QueryFilterConstraint}s to perform a disjunction for. These must be
* created with calls to {@link where}, {@link or}, or {@link and}.
* @returns The newly created {@link QueryCompositeFilterConstraint}.
*/
function or(...queryConstraints) {
// Only support QueryFilterConstraints
queryConstraints.forEach(queryConstraint => validateQueryFilterConstraint('or', queryConstraint));
return QueryCompositeFilterConstraint._create("or" /* CompositeOperator.OR */, queryConstraints);
}
/**
* Creates a new {@link QueryCompositeFilterConstraint} that is a conjunction of
* the given filter constraints. A conjunction filter includes a document if it
* satisfies all of the given filters.
*
* @param queryConstraints - Optional. The list of
* {@link QueryFilterConstraint}s to perform a conjunction for. These must be
* created with calls to {@link where}, {@link or}, or {@link and}.
* @returns The newly created {@link QueryCompositeFilterConstraint}.
*/
function and(...queryConstraints) {
// Only support QueryFilterConstraints
queryConstraints.forEach(queryConstraint => validateQueryFilterConstraint('and', queryConstraint));
return QueryCompositeFilterConstraint._create("and" /* CompositeOperator.AND */, queryConstraints);
}
/**
* A `QueryOrderByConstraint` is used to sort the set of documents returned by a
* Firestore query. `QueryOrderByConstraint`s are created by invoking
* {@link orderBy} and can then be passed to {@link (query:1)} to create a new query
* instance that also contains this `QueryOrderByConstraint`.
*
* Note: Documents that do not contain the orderBy field will not be present in
* the query result.
*/
class QueryOrderByConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(_field, _direction) {
super();
this._field = _field;
this._direction = _direction;
/** The type of this query constraint */
this.type = 'orderBy';
}
static _create(_field, _direction) {
return new QueryOrderByConstraint(_field, _direction);
}
_apply(query) {
const orderBy = newQueryOrderBy(query._query, this._field, this._direction);
return new Query(query.firestore, query.converter, queryWithAddedOrderBy(query._query, orderBy));
}
}
/**
* Creates a {@link QueryOrderByConstraint} that sorts the query result by the
* specified field, optionally in descending order instead of ascending.
*
* Note: Documents that do not contain the specified field will not be present
* in the query result.
*
* @param fieldPath - The field to sort by.
* @param directionStr - Optional direction to sort by ('asc' or 'desc'). If
* not specified, order will be ascending.
* @returns The created {@link QueryOrderByConstraint}.
*/
function orderBy(fieldPath, directionStr = 'asc') {
const direction = directionStr;
const path = fieldPathFromArgument('orderBy', fieldPath);
return QueryOrderByConstraint._create(path, direction);
}
/**
* A `QueryLimitConstraint` is used to limit the number of documents returned by
* a Firestore query.
* `QueryLimitConstraint`s are created by invoking {@link limit} or
* {@link limitToLast} and can then be passed to {@link (query:1)} to create a new
* query instance that also contains this `QueryLimitConstraint`.
*/
class QueryLimitConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _limit, _limitType) {
super();
this.type = type;
this._limit = _limit;
this._limitType = _limitType;
}
static _create(type, _limit, _limitType) {
return new QueryLimitConstraint(type, _limit, _limitType);
}
_apply(query) {
return new Query(query.firestore, query.converter, queryWithLimit(query._query, this._limit, this._limitType));
}
}
/**
* Creates a {@link QueryLimitConstraint} that only returns the first matching
* documents.
*
* @param limit - The maximum number of items to return.
* @returns The created {@link QueryLimitConstraint}.
*/
function limit(limit) {
validatePositiveNumber('limit', limit);
return QueryLimitConstraint._create('limit', limit, "F" /* LimitType.First */);
}
/**
* Creates a {@link QueryLimitConstraint} that only returns the last matching
* documents.
*
* You must specify at least one `orderBy` clause for `limitToLast` queries,
* otherwise an exception will be thrown during execution.
*
* @param limit - The maximum number of items to return.
* @returns The created {@link QueryLimitConstraint}.
*/
function limitToLast(limit) {
validatePositiveNumber('limitToLast', limit);
return QueryLimitConstraint._create('limitToLast', limit, "L" /* LimitType.Last */);
}
/**
* A `QueryStartAtConstraint` is used to exclude documents from the start of a
* result set returned by a Firestore query.
* `QueryStartAtConstraint`s are created by invoking {@link (startAt:1)} or
* {@link (startAfter:1)} and can then be passed to {@link (query:1)} to create a
* new query instance that also contains this `QueryStartAtConstraint`.
*/
class QueryStartAtConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _docOrFields, _inclusive) {
super();
this.type = type;
this._docOrFields = _docOrFields;
this._inclusive = _inclusive;
}
static _create(type, _docOrFields, _inclusive) {
return new QueryStartAtConstraint(type, _docOrFields, _inclusive);
}
_apply(query) {
const bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._inclusive);
return new Query(query.firestore, query.converter, queryWithStartAt(query._query, bound));
}
}
function startAt(...docOrFields) {
return QueryStartAtConstraint._create('startAt', docOrFields,
/*inclusive=*/ true);
}
function startAfter(...docOrFields) {
return QueryStartAtConstraint._create('startAfter', docOrFields,
/*inclusive=*/ false);
}
/**
* A `QueryEndAtConstraint` is used to exclude documents from the end of a
* result set returned by a Firestore query.
* `QueryEndAtConstraint`s are created by invoking {@link (endAt:1)} or
* {@link (endBefore:1)} and can then be passed to {@link (query:1)} to create a new
* query instance that also contains this `QueryEndAtConstraint`.
*/
class QueryEndAtConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _docOrFields, _inclusive) {
super();
this.type = type;
this._docOrFields = _docOrFields;
this._inclusive = _inclusive;
}
static _create(type, _docOrFields, _inclusive) {
return new QueryEndAtConstraint(type, _docOrFields, _inclusive);
}
_apply(query) {
const bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._inclusive);
return new Query(query.firestore, query.converter, queryWithEndAt(query._query, bound));
}
}
function endBefore(...docOrFields) {
return QueryEndAtConstraint._create('endBefore', docOrFields,
/*inclusive=*/ false);
}
function endAt(...docOrFields) {
return QueryEndAtConstraint._create('endAt', docOrFields,
/*inclusive=*/ true);
}
/** Helper function to create a bound from a document or fields */
function newQueryBoundFromDocOrFields(query, methodName, docOrFields, inclusive) {
docOrFields[0] = getModularInstance(docOrFields[0]);
if (docOrFields[0] instanceof DocumentSnapshot$1) {
return newQueryBoundFromDocument(query._query, query.firestore._databaseId, methodName, docOrFields[0]._document, inclusive);
}
else {
const reader = newUserDataReader(query.firestore);
return newQueryBoundFromFields(query._query, query.firestore._databaseId, reader, methodName, docOrFields, inclusive);
}
}
function newQueryFilter(query, methodName, dataReader, databaseId, fieldPath, op, value) {
let fieldValue;
if (fieldPath.isKeyField()) {
if (op === "array-contains" /* Operator.ARRAY_CONTAINS */ || op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid Query. You can't perform '${op}' queries on documentId().`);
}
else if (op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */) {
validateDisjunctiveFilterElements(value, op);
const referenceList = [];
for (const arrayValue of value) {
referenceList.push(parseDocumentIdValue(databaseId, query, arrayValue));
}
fieldValue = { arrayValue: { values: referenceList } };
}
else {
fieldValue = parseDocumentIdValue(databaseId, query, value);
}
}
else {
if (op === "in" /* Operator.IN */ ||
op === "not-in" /* Operator.NOT_IN */ ||
op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
validateDisjunctiveFilterElements(value, op);
}
fieldValue = parseQueryValue(dataReader, methodName, value,
/* allowArrays= */ op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */);
}
const filter = FieldFilter.create(fieldPath, op, fieldValue);
return filter;
}
function newQueryOrderBy(query, fieldPath, direction) {
if (query.startAt !== null) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call startAt() or startAfter() before ' +
'calling orderBy().');
}
if (query.endAt !== null) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call endAt() or endBefore() before ' +
'calling orderBy().');
}
const orderBy = new OrderBy(fieldPath, direction);
return orderBy;
}
/**
* Create a `Bound` from a query and a document.
*
* Note that the `Bound` will always include the key of the document
* and so only the provided document will compare equal to the returned
* position.
*
* Will throw if the document does not contain all fields of the order by
* of the query or if any of the fields in the order by are an uncommitted
* server timestamp.
*/
function newQueryBoundFromDocument(query, databaseId, methodName, doc, inclusive) {
if (!doc) {
throw new FirestoreError(Code.NOT_FOUND, `Can't use a DocumentSnapshot that doesn't exist for ` +
`${methodName}().`);
}
const components = [];
// Because people expect to continue/end a query at the exact document
// provided, we need to use the implicit sort order rather than the explicit
// sort order, because it's guaranteed to contain the document key. That way
// the position becomes unambiguous and the query continues/ends exactly at
// the provided document. Without the key (by using the explicit sort
// orders), multiple documents could match the position, yielding duplicate
// results.
for (const orderBy of queryNormalizedOrderBy(query)) {
if (orderBy.field.isKeyField()) {
components.push(refValue(databaseId, doc.key));
}
else {
const value = doc.data.field(orderBy.field);
if (isServerTimestamp(value)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You are trying to start or end a query using a ' +
'document for which the field "' +
orderBy.field +
'" is an uncommitted server timestamp. (Since the value of ' +
'this field is unknown, you cannot start/end a query with it.)');
}
else if (value !== null) {
components.push(value);
}
else {
const field = orderBy.field.canonicalString();
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You are trying to start or end a query using a ` +
`document for which the field '${field}' (used as the ` +
`orderBy) does not exist.`);
}
}
}
return new Bound(components, inclusive);
}
/**
* Converts a list of field values to a `Bound` for the given query.
*/
function newQueryBoundFromFields(query, databaseId, dataReader, methodName, values, inclusive) {
// Use explicit order by's because it has to match the query the user made
const orderBy = query.explicitOrderBy;
if (values.length > orderBy.length) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Too many arguments provided to ${methodName}(). ` +
`The number of arguments must be less than or equal to the ` +
`number of orderBy() clauses`);
}
const components = [];
for (let i = 0; i < values.length; i++) {
const rawValue = values[i];
const orderByComponent = orderBy[i];
if (orderByComponent.field.isKeyField()) {
if (typeof rawValue !== 'string') {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. Expected a string for document ID in ` +
`${methodName}(), but got a ${typeof rawValue}`);
}
if (!isCollectionGroupQuery(query) && rawValue.indexOf('/') !== -1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection and ordering by documentId(), ` +
`the value passed to ${methodName}() must be a plain document ID, but ` +
`'${rawValue}' contains a slash.`);
}
const path = query.path.child(ResourcePath.fromString(rawValue));
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection group and ordering by ` +
`documentId(), the value passed to ${methodName}() must result in a ` +
`valid document path, but '${path}' is not because it contains an odd number ` +
`of segments.`);
}
const key = new DocumentKey(path);
components.push(refValue(databaseId, key));
}
else {
const wrapped = parseQueryValue(dataReader, methodName, rawValue);
components.push(wrapped);
}
}
return new Bound(components, inclusive);
}
/**
* Parses the given `documentIdValue` into a `ReferenceValue`, throwing
* appropriate errors if the value is anything other than a `DocumentReference`
* or `string`, or if the string is malformed.
*/
function parseDocumentIdValue(databaseId, query, documentIdValue) {
documentIdValue = getModularInstance(documentIdValue);
if (typeof documentIdValue === 'string') {
if (documentIdValue === '') {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. When querying with documentId(), you ' +
'must provide a valid document ID, but it was an empty string.');
}
if (!isCollectionGroupQuery(query) && documentIdValue.indexOf('/') !== -1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection by ` +
`documentId(), you must provide a plain document ID, but ` +
`'${documentIdValue}' contains a '/' character.`);
}
const path = query.path.child(ResourcePath.fromString(documentIdValue));
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection group by ` +
`documentId(), the value provided must result in a valid document path, ` +
`but '${path}' is not because it has an odd number of segments (${path.length}).`);
}
return refValue(databaseId, new DocumentKey(path));
}
else if (documentIdValue instanceof DocumentReference) {
return refValue(databaseId, documentIdValue._key);
}
else {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying with documentId(), you must provide a valid ` +
`string or a DocumentReference, but it was: ` +
`${valueDescription(documentIdValue)}.`);
}
}
/**
* Validates that the value passed into a disjunctive filter satisfies all
* array requirements.
*/
function validateDisjunctiveFilterElements(value, operator) {
if (!Array.isArray(value) || value.length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid Query. A non-empty array is required for ' +
`'${operator.toString()}' filters.`);
}
}
/**
* Given an operator, returns the set of operators that cannot be used with it.
*
* This is not a comprehensive check, and this function should be removed in the
* long term. Validations should occur in the Firestore backend.
*
* Operators in a query must adhere to the following set of rules:
* 1. Only one inequality per query.
* 2. `NOT_IN` cannot be used with array, disjunctive, or `NOT_EQUAL` operators.
*/
function conflictingOps(op) {
switch (op) {
case "!=" /* Operator.NOT_EQUAL */:
return ["!=" /* Operator.NOT_EQUAL */, "not-in" /* Operator.NOT_IN */];
case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */:
case "in" /* Operator.IN */:
return ["not-in" /* Operator.NOT_IN */];
case "not-in" /* Operator.NOT_IN */:
return [
"array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
"in" /* Operator.IN */,
"not-in" /* Operator.NOT_IN */,
"!=" /* Operator.NOT_EQUAL */
];
default:
return [];
}
}
function validateNewFieldFilter(query, fieldFilter) {
const conflictingOp = findOpInsideFilters(query.filters, conflictingOps(fieldFilter.op));
if (conflictingOp !== null) {
// Special case when it's a duplicate op to give a slightly clearer error message.
if (conflictingOp === fieldFilter.op) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You cannot use more than one ' +
`'${fieldFilter.op.toString()}' filter.`);
}
else {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You cannot use '${fieldFilter.op.toString()}' filters ` +
`with '${conflictingOp.toString()}' filters.`);
}
}
}
function validateNewFilter(query, filter) {
let testQuery = query;
const subFilters = filter.getFlattenedFilters();
for (const subFilter of subFilters) {
validateNewFieldFilter(testQuery, subFilter);
testQuery = queryWithAddedFilter(testQuery, subFilter);
}
}
// Checks if any of the provided filter operators are included in the given list of filters and
// returns the first one that is, or null if none are.
function findOpInsideFilters(filters, operators) {
for (const filter of filters) {
for (const fieldFilter of filter.getFlattenedFilters()) {
if (operators.indexOf(fieldFilter.op) >= 0) {
return fieldFilter.op;
}
}
}
return null;
}
function validateQueryFilterConstraint(functionName, queryConstraint) {
if (!(queryConstraint instanceof QueryFieldFilterConstraint) &&
!(queryConstraint instanceof QueryCompositeFilterConstraint)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() requires AppliableConstraints created with a call to 'where(...)', 'or(...)', or 'and(...)'.`);
}
}
function validateQueryConstraintArray(queryConstraint) {
const compositeFilterCount = queryConstraint.filter(filter => filter instanceof QueryCompositeFilterConstraint).length;
const fieldFilterCount = queryConstraint.filter(filter => filter instanceof QueryFieldFilterConstraint).length;
if (compositeFilterCount > 1 ||
(compositeFilterCount > 0 && fieldFilterCount > 0)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'InvalidQuery. When using composite filters, you cannot use ' +
'more than one filter at the top level. Consider nesting the multiple ' +
'filters within an `and(...)` statement. For example: ' +
'change `query(query, where(...), or(...))` to ' +
'`query(query, and(where(...), or(...)))`.');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts Firestore's internal types to the JavaScript types that we expose
* to the user.
*
* @internal
*/
class AbstractUserDataWriter {
convertValue(value, serverTimestampBehavior = 'none') {
switch (typeOrder(value)) {
case 0 /* TypeOrder.NullValue */:
return null;
case 1 /* TypeOrder.BooleanValue */:
return value.booleanValue;
case 2 /* TypeOrder.NumberValue */:
return normalizeNumber(value.integerValue || value.doubleValue);
case 3 /* TypeOrder.TimestampValue */:
return this.convertTimestamp(value.timestampValue);
case 4 /* TypeOrder.ServerTimestampValue */:
return this.convertServerTimestamp(value, serverTimestampBehavior);
case 5 /* TypeOrder.StringValue */:
return value.stringValue;
case 6 /* TypeOrder.BlobValue */:
return this.convertBytes(normalizeByteString(value.bytesValue));
case 7 /* TypeOrder.RefValue */:
return this.convertReference(value.referenceValue);
case 8 /* TypeOrder.GeoPointValue */:
return this.convertGeoPoint(value.geoPointValue);
case 9 /* TypeOrder.ArrayValue */:
return this.convertArray(value.arrayValue, serverTimestampBehavior);
case 10 /* TypeOrder.ObjectValue */:
return this.convertObject(value.mapValue, serverTimestampBehavior);
default:
throw fail();
}
}
convertObject(mapValue, serverTimestampBehavior) {
return this.convertObjectMap(mapValue.fields, serverTimestampBehavior);
}
/**
* @internal
*/
convertObjectMap(fields, serverTimestampBehavior = 'none') {
const result = {};
forEach(fields, (key, value) => {
result[key] = this.convertValue(value, serverTimestampBehavior);
});
return result;
}
convertGeoPoint(value) {
return new GeoPoint(normalizeNumber(value.latitude), normalizeNumber(value.longitude));
}
convertArray(arrayValue, serverTimestampBehavior) {
return (arrayValue.values || []).map(value => this.convertValue(value, serverTimestampBehavior));
}
convertServerTimestamp(value, serverTimestampBehavior) {
switch (serverTimestampBehavior) {
case 'previous':
const previousValue = getPreviousValue(value);
if (previousValue == null) {
return null;
}
return this.convertValue(previousValue, serverTimestampBehavior);
case 'estimate':
return this.convertTimestamp(getLocalWriteTime(value));
default:
return null;
}
}
convertTimestamp(value) {
const normalizedValue = normalizeTimestamp(value);
return new Timestamp(normalizedValue.seconds, normalizedValue.nanos);
}
convertDocumentKey(name, expectedDatabaseId) {
const resourcePath = ResourcePath.fromString(name);
hardAssert(isValidResourceName(resourcePath));
const databaseId = new DatabaseId(resourcePath.get(1), resourcePath.get(3));
const key = new DocumentKey(resourcePath.popFirst(5));
if (!databaseId.isEqual(expectedDatabaseId)) {
// TODO(b/64130202): Somehow support foreign references.
logError(`Document ${key} contains a document ` +
`reference within a different database (` +
`${databaseId.projectId}/${databaseId.database}) which is not ` +
`supported. It will be treated as a reference in the current ` +
`database (${expectedDatabaseId.projectId}/${expectedDatabaseId.database}) ` +
`instead.`);
}
return key;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts custom model object of type T into `DocumentData` by applying the
* converter if it exists.
*
* This function is used when converting user objects to `DocumentData`
* because we want to provide the user with a more specific error message if
* their `set()` or fails due to invalid data originating from a `toFirestore()`
* call.
*/
function applyFirestoreDataConverter(converter, value, options) {
let convertedValue;
if (converter) {
if (options && (options.merge || options.mergeFields)) {
// Cast to `any` in order to satisfy the union type constraint on
// toFirestore().
// eslint-disable-next-line @typescript-eslint/no-explicit-any
convertedValue = converter.toFirestore(value, options);
}
else {
convertedValue = converter.toFirestore(value);
}
}
else {
convertedValue = value;
}
return convertedValue;
}
class LiteUserDataWriter extends AbstractUserDataWriter {
constructor(firestore) {
super();
this.firestore = firestore;
}
convertBytes(bytes) {
return new Bytes(bytes);
}
convertReference(name) {
const key = this.convertDocumentKey(name, this.firestore._databaseId);
return new DocumentReference(this.firestore, /* converter= */ null, key);
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Create an AggregateField object that can be used to compute the sum of
* a specified field over a range of documents in the result set of a query.
* @param field Specifies the field to sum across the result set.
*/
function sum(field) {
return new AggregateField('sum', fieldPathFromArgument$1('sum', field));
}
/**
* Create an AggregateField object that can be used to compute the average of
* a specified field over a range of documents in the result set of a query.
* @param field Specifies the field to average across the result set.
*/
function average(field) {
return new AggregateField('avg', fieldPathFromArgument$1('average', field));
}
/**
* Create an AggregateField object that can be used to compute the count of
* documents in the result set of a query.
*/
function count() {
return new AggregateField('count');
}
/**
* Compares two 'AggregateField` instances for equality.
*
* @param left Compare this AggregateField to the `right`.
* @param right Compare this AggregateField to the `left`.
*/
function aggregateFieldEqual(left, right) {
var _a, _b;
return (left instanceof AggregateField &&
right instanceof AggregateField &&
left.aggregateType === right.aggregateType &&
((_a = left._internalFieldPath) === null || _a === void 0 ? void 0 : _a.canonicalString()) ===
((_b = right._internalFieldPath) === null || _b === void 0 ? void 0 : _b.canonicalString()));
}
/**
* Compares two `AggregateQuerySnapshot` instances for equality.
*
* Two `AggregateQuerySnapshot` instances are considered "equal" if they have
* underlying queries that compare equal, and the same data.
*
* @param left - The first `AggregateQuerySnapshot` to compare.
* @param right - The second `AggregateQuerySnapshot` to compare.
*
* @returns `true` if the objects are "equal", as defined above, or `false`
* otherwise.
*/
function aggregateQuerySnapshotEqual(left, right) {
return (queryEqual(left.query, right.query) && deepEqual(left.data(), right.data()));
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function isPartialObserver(obj) {
return implementsAnyMethods(obj, ['next', 'error', 'complete']);
}
/**
* Returns true if obj is an object and contains at least one of the specified
* methods.
*/
function implementsAnyMethods(obj, methods) {
if (typeof obj !== 'object' || obj === null) {
return false;
}
const object = obj;
for (const method of methods) {
if (method in object && typeof object[method] === 'function') {
return true;
}
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Metadata about a snapshot, describing the state of the snapshot.
*/
class SnapshotMetadata {
/** @hideconstructor */
constructor(hasPendingWrites, fromCache) {
this.hasPendingWrites = hasPendingWrites;
this.fromCache = fromCache;
}
/**
* Returns true if this `SnapshotMetadata` is equal to the provided one.
*
* @param other - The `SnapshotMetadata` to compare against.
* @returns true if this `SnapshotMetadata` is equal to the provided one.
*/
isEqual(other) {
return (this.hasPendingWrites === other.hasPendingWrites &&
this.fromCache === other.fromCache);
}
}
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get(<field>)` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/
class DocumentSnapshot extends DocumentSnapshot$1 {
/** @hideconstructor protected */
constructor(_firestore, userDataWriter, key, document, metadata, converter) {
super(_firestore, userDataWriter, key, document, converter);
this._firestore = _firestore;
this._firestoreImpl = _firestore;
this.metadata = metadata;
}
/**
* Returns whether or not the data exists. True if the document exists.
*/
exists() {
return super.exists();
}
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* By default, `serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document or `undefined` if
* the document doesn't exist.
*/
data(options = {}) {
if (!this._document) {
return undefined;
}
else if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
const snapshot = new QueryDocumentSnapshot(this._firestore, this._userDataWriter, this._key, this._document, this.metadata,
/* converter= */ null);
return this._converter.fromFirestore(snapshot, options);
}
else {
return this._userDataWriter.convertValue(this._document.data.value, options.serverTimestamps);
}
}
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* By default, a `serverTimestamp()` that has not yet been set to
* its final value will be returned as `null`. You can override this by
* passing an options object.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @param options - An options object to configure how the field is retrieved
* from the snapshot (for example the desired behavior for server timestamps
* that have not yet been set to their final value).
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get(fieldPath, options = {}) {
if (this._document) {
const value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
if (value !== null) {
return this._userDataWriter.convertValue(value, options.serverTimestamps);
}
}
return undefined;
}
}
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get(<field>)` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/
class QueryDocumentSnapshot extends DocumentSnapshot {
/**
* Retrieves all fields in the document as an `Object`.
*
* By default, `serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @override
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document.
*/
data(options = {}) {
return super.data(options);
}
}
/**
* A `QuerySnapshot` contains zero or more `DocumentSnapshot` objects
* representing the results of a query. The documents can be accessed as an
* array via the `docs` property or enumerated using the `forEach` method. The
* number of documents can be determined via the `empty` and `size`
* properties.
*/
class QuerySnapshot {
/** @hideconstructor */
constructor(_firestore, _userDataWriter, query, _snapshot) {
this._firestore = _firestore;
this._userDataWriter = _userDataWriter;
this._snapshot = _snapshot;
this.metadata = new SnapshotMetadata(_snapshot.hasPendingWrites, _snapshot.fromCache);
this.query = query;
}
/** An array of all the documents in the `QuerySnapshot`. */
get docs() {
const result = [];
this.forEach(doc => result.push(doc));
return result;
}
/** The number of documents in the `QuerySnapshot`. */
get size() {
return this._snapshot.docs.size;
}
/** True if there are no documents in the `QuerySnapshot`. */
get empty() {
return this.size === 0;
}
/**
* Enumerates all of the documents in the `QuerySnapshot`.
*
* @param callback - A callback to be called with a `QueryDocumentSnapshot` for
* each document in the snapshot.
* @param thisArg - The `this` binding for the callback.
*/
forEach(callback, thisArg) {
this._snapshot.docs.forEach(doc => {
callback.call(thisArg, new QueryDocumentSnapshot(this._firestore, this._userDataWriter, doc.key, doc, new SnapshotMetadata(this._snapshot.mutatedKeys.has(doc.key), this._snapshot.fromCache), this.query.converter));
});
}
/**
* Returns an array of the documents changes since the last snapshot. If this
* is the first snapshot, all documents will be in the list as 'added'
* changes.
*
* @param options - `SnapshotListenOptions` that control whether metadata-only
* changes (i.e. only `DocumentSnapshot.metadata` changed) should trigger
* snapshot events.
*/
docChanges(options = {}) {
const includeMetadataChanges = !!options.includeMetadataChanges;
if (includeMetadataChanges && this._snapshot.excludesMetadataChanges) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'To include metadata changes with your document changes, you must ' +
'also pass { includeMetadataChanges:true } to onSnapshot().');
}
if (!this._cachedChanges ||
this._cachedChangesIncludeMetadataChanges !== includeMetadataChanges) {
this._cachedChanges = changesFromSnapshot(this, includeMetadataChanges);
this._cachedChangesIncludeMetadataChanges = includeMetadataChanges;
}
return this._cachedChanges;
}
}
/** Calculates the array of `DocumentChange`s for a given `ViewSnapshot`. */
function changesFromSnapshot(querySnapshot, includeMetadataChanges) {
if (querySnapshot._snapshot.oldDocs.isEmpty()) {
let index = 0;
return querySnapshot._snapshot.docChanges.map(change => {
const doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
change.doc;
return {
type: 'added',
doc,
oldIndex: -1,
newIndex: index++
};
});
}
else {
// A `DocumentSet` that is updated incrementally as changes are applied to use
// to lookup the index of a document.
let indexTracker = querySnapshot._snapshot.oldDocs;
return querySnapshot._snapshot.docChanges
.filter(change => includeMetadataChanges || change.type !== 3 /* ChangeType.Metadata */)
.map(change => {
const doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
let oldIndex = -1;
let newIndex = -1;
if (change.type !== 0 /* ChangeType.Added */) {
oldIndex = indexTracker.indexOf(change.doc.key);
indexTracker = indexTracker.delete(change.doc.key);
}
if (change.type !== 1 /* ChangeType.Removed */) {
indexTracker = indexTracker.add(change.doc);
newIndex = indexTracker.indexOf(change.doc.key);
}
return {
type: resultChangeType(change.type),
doc,
oldIndex,
newIndex
};
});
}
}
function resultChangeType(type) {
switch (type) {
case 0 /* ChangeType.Added */:
return 'added';
case 2 /* ChangeType.Modified */:
case 3 /* ChangeType.Metadata */:
return 'modified';
case 1 /* ChangeType.Removed */:
return 'removed';
default:
return fail();
}
}
// TODO(firestoreexp): Add tests for snapshotEqual with different snapshot
// metadata
/**
* Returns true if the provided snapshots are equal.
*
* @param left - A snapshot to compare.
* @param right - A snapshot to compare.
* @returns true if the snapshots are equal.
*/
function snapshotEqual(left, right) {
if (left instanceof DocumentSnapshot && right instanceof DocumentSnapshot) {
return (left._firestore === right._firestore &&
left._key.isEqual(right._key) &&
(left._document === null
? right._document === null
: left._document.isEqual(right._document)) &&
left._converter === right._converter);
}
else if (left instanceof QuerySnapshot && right instanceof QuerySnapshot) {
return (left._firestore === right._firestore &&
queryEqual(left.query, right.query) &&
left.metadata.isEqual(right.metadata) &&
left._snapshot.isEqual(right._snapshot));
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Reads the document referred to by this `DocumentReference`.
*
* Note: `getDoc()` attempts to provide up-to-date data when possible by waiting
* for data from the server, but it may return cached data or fail if you are
* offline and the server cannot be reached. To specify this behavior, invoke
* {@link getDocFromCache} or {@link getDocFromServer}.
*
* @param reference - The reference of the document to fetch.
* @returns A Promise resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDoc(reference) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientGetDocumentViaSnapshotListener(client, reference._key).then(snapshot => convertToDocSnapshot(firestore, reference, snapshot));
}
class ExpUserDataWriter extends AbstractUserDataWriter {
constructor(firestore) {
super();
this.firestore = firestore;
}
convertBytes(bytes) {
return new Bytes(bytes);
}
convertReference(name) {
const key = this.convertDocumentKey(name, this.firestore._databaseId);
return new DocumentReference(this.firestore, /* converter= */ null, key);
}
}
/**
* Reads the document referred to by this `DocumentReference` from cache.
* Returns an error if the document is not currently cached.
*
* @returns A `Promise` resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDocFromCache(reference) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentFromLocalCache(client, reference._key).then(doc => new DocumentSnapshot(firestore, userDataWriter, reference._key, doc, new SnapshotMetadata(doc !== null && doc.hasLocalMutations,
/* fromCache= */ true), reference.converter));
}
/**
* Reads the document referred to by this `DocumentReference` from the server.
* Returns an error if the network is not available.
*
* @returns A `Promise` resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDocFromServer(reference) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientGetDocumentViaSnapshotListener(client, reference._key, {
source: 'server'
}).then(snapshot => convertToDocSnapshot(firestore, reference, snapshot));
}
/**
* Executes the query and returns the results as a `QuerySnapshot`.
*
* Note: `getDocs()` attempts to provide up-to-date data when possible by
* waiting for data from the server, but it may return cached data or fail if
* you are offline and the server cannot be reached. To specify this behavior,
* invoke {@link getDocsFromCache} or {@link getDocsFromServer}.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/
function getDocs(query) {
query = cast(query, Query);
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
validateHasExplicitOrderByForLimitToLast(query._query);
return firestoreClientGetDocumentsViaSnapshotListener(client, query._query).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from cache.
* Returns an empty result set if no documents matching the query are currently
* cached.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/
function getDocsFromCache(query) {
query = cast(query, Query);
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentsFromLocalCache(client, query._query).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from the
* server. Returns an error if the network is not available.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/
function getDocsFromServer(query) {
query = cast(query, Query);
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentsViaSnapshotListener(client, query._query, {
source: 'server'
}).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
function setDoc(reference, data, options) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const convertedValue = applyFirestoreDataConverter(reference.converter, data, options);
const dataReader = newUserDataReader(firestore);
const parsed = parseSetData(dataReader, 'setDoc', reference._key, convertedValue, reference.converter !== null, options);
const mutation = parsed.toMutation(reference._key, Precondition.none());
return executeWrite(firestore, [mutation]);
}
function updateDoc(reference, fieldOrUpdateData, value, ...moreFieldsAndValues) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const dataReader = newUserDataReader(firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = getModularInstance(fieldOrUpdateData);
let parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(dataReader, 'updateDoc', reference._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(dataReader, 'updateDoc', reference._key, fieldOrUpdateData);
}
const mutation = parsed.toMutation(reference._key, Precondition.exists(true));
return executeWrite(firestore, [mutation]);
}
/**
* Deletes the document referred to by the specified `DocumentReference`.
*
* @param reference - A reference to the document to delete.
* @returns A Promise resolved once the document has been successfully
* deleted from the backend (note that it won't resolve while you're offline).
*/
function deleteDoc(reference) {
const firestore = cast(reference.firestore, Firestore);
const mutations = [new DeleteMutation(reference._key, Precondition.none())];
return executeWrite(firestore, mutations);
}
/**
* Add a new document to specified `CollectionReference` with the given data,
* assigning it a document ID automatically.
*
* @param reference - A reference to the collection to add this document to.
* @param data - An Object containing the data for the new document.
* @returns A `Promise` resolved with a `DocumentReference` pointing to the
* newly created document after it has been written to the backend (Note that it
* won't resolve while you're offline).
*/
function addDoc(reference, data) {
const firestore = cast(reference.firestore, Firestore);
const docRef = doc(reference);
const convertedValue = applyFirestoreDataConverter(reference.converter, data);
const dataReader = newUserDataReader(reference.firestore);
const parsed = parseSetData(dataReader, 'addDoc', docRef._key, convertedValue, reference.converter !== null, {});
const mutation = parsed.toMutation(docRef._key, Precondition.exists(false));
return executeWrite(firestore, [mutation]).then(() => docRef);
}
function onSnapshot(reference, ...args) {
var _a, _b, _c;
reference = getModularInstance(reference);
let options = {
includeMetadataChanges: false
};
let currArg = 0;
if (typeof args[currArg] === 'object' && !isPartialObserver(args[currArg])) {
options = args[currArg];
currArg++;
}
const internalOptions = {
includeMetadataChanges: options.includeMetadataChanges
};
if (isPartialObserver(args[currArg])) {
const userObserver = args[currArg];
args[currArg] = (_a = userObserver.next) === null || _a === void 0 ? void 0 : _a.bind(userObserver);
args[currArg + 1] = (_b = userObserver.error) === null || _b === void 0 ? void 0 : _b.bind(userObserver);
args[currArg + 2] = (_c = userObserver.complete) === null || _c === void 0 ? void 0 : _c.bind(userObserver);
}
let observer;
let firestore;
let internalQuery;
if (reference instanceof DocumentReference) {
firestore = cast(reference.firestore, Firestore);
internalQuery = newQueryForPath(reference._key.path);
observer = {
next: snapshot => {
if (args[currArg]) {
args[currArg](convertToDocSnapshot(firestore, reference, snapshot));
}
},
error: args[currArg + 1],
complete: args[currArg + 2]
};
}
else {
const query = cast(reference, Query);
firestore = cast(query.firestore, Firestore);
internalQuery = query._query;
const userDataWriter = new ExpUserDataWriter(firestore);
observer = {
next: snapshot => {
if (args[currArg]) {
args[currArg](new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
},
error: args[currArg + 1],
complete: args[currArg + 2]
};
validateHasExplicitOrderByForLimitToLast(reference._query);
}
const client = ensureFirestoreConfigured(firestore);
return firestoreClientListen(client, internalQuery, internalOptions, observer);
}
function onSnapshotsInSync(firestore, arg) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const observer = isPartialObserver(arg)
? arg
: {
next: arg
};
return firestoreClientAddSnapshotsInSyncListener(client, observer);
}
/**
* Locally writes `mutations` on the async queue.
* @internal
*/
function executeWrite(firestore, mutations) {
const client = ensureFirestoreConfigured(firestore);
return firestoreClientWrite(client, mutations);
}
/**
* Converts a {@link ViewSnapshot} that contains the single document specified by `ref`
* to a {@link DocumentSnapshot}.
*/
function convertToDocSnapshot(firestore, ref, snapshot) {
const doc = snapshot.docs.get(ref._key);
const userDataWriter = new ExpUserDataWriter(firestore);
return new DocumentSnapshot(firestore, userDataWriter, ref._key, doc, new SnapshotMetadata(snapshot.hasPendingWrites, snapshot.fromCache), ref.converter);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Calculates the number of documents in the result set of the given query
* without actually downloading the documents.
*
* Using this function to count the documents is efficient because only the
* final count, not the documents' data, is downloaded. This function can
* count the documents in cases where the result set is prohibitively large to
* download entirely (thousands of documents).
*
* The result received from the server is presented, unaltered, without
* considering any local state. That is, documents in the local cache are not
* taken into consideration, neither are local modifications not yet
* synchronized with the server. Previously-downloaded results, if any, are not
* used. Every invocation of this function necessarily involves a round trip to
* the server.
*
* @param query The query whose result set size is calculated.
* @returns A Promise that will be resolved with the count; the count can be
* retrieved from `snapshot.data().count`, where `snapshot` is the
* `AggregateQuerySnapshot` to which the returned Promise resolves.
*/
function getCountFromServer(query) {
const countQuerySpec = {
count: count()
};
return getAggregateFromServer(query, countQuerySpec);
}
/**
* Calculates the specified aggregations over the documents in the result
* set of the given query without actually downloading the documents.
*
* Using this function to perform aggregations is efficient because only the
* final aggregation values, not the documents' data, are downloaded. This
* function can perform aggregations of the documents in cases where the result
* set is prohibitively large to download entirely (thousands of documents).
*
* The result received from the server is presented, unaltered, without
* considering any local state. That is, documents in the local cache are not
* taken into consideration, neither are local modifications not yet
* synchronized with the server. Previously-downloaded results, if any, are not
* used. Every invocation of this function necessarily involves a round trip to
* the server.
*
* @param query The query whose result set is aggregated over.
* @param aggregateSpec An `AggregateSpec` object that specifies the aggregates
* to perform over the result set. The AggregateSpec specifies aliases for each
* aggregate, which can be used to retrieve the aggregate result.
* @example
* ```typescript
* const aggregateSnapshot = await getAggregateFromServer(query, {
* countOfDocs: count(),
* totalHours: sum('hours'),
* averageScore: average('score')
* });
*
* const countOfDocs: number = aggregateSnapshot.data().countOfDocs;
* const totalHours: number = aggregateSnapshot.data().totalHours;
* const averageScore: number | null = aggregateSnapshot.data().averageScore;
* ```
*/
function getAggregateFromServer(query, aggregateSpec) {
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const internalAggregates = mapToArray(aggregateSpec, (aggregate, alias) => {
return new AggregateImpl(alias, aggregate.aggregateType, aggregate._internalFieldPath);
});
// Run the aggregation and convert the results
return firestoreClientRunAggregateQuery(client, query._query, internalAggregates).then(aggregateResult => convertToAggregateQuerySnapshot(firestore, query, aggregateResult));
}
/**
* Converts the core aggregration result to an `AggregateQuerySnapshot`
* that can be returned to the consumer.
* @param query
* @param aggregateResult Core aggregation result
* @internal
*/
function convertToAggregateQuerySnapshot(firestore, query, aggregateResult) {
const userDataWriter = new ExpUserDataWriter(firestore);
const querySnapshot = new AggregateQuerySnapshot(query, userDataWriter, aggregateResult);
return querySnapshot;
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryLocalCacheImpl {
constructor(settings) {
this.kind = 'memory';
this._onlineComponentProvider = new OnlineComponentProvider();
if (settings === null || settings === void 0 ? void 0 : settings.garbageCollector) {
this._offlineComponentProvider =
settings.garbageCollector._offlineComponentProvider;
}
else {
this._offlineComponentProvider = new MemoryOfflineComponentProvider();
}
}
toJSON() {
return { kind: this.kind };
}
}
class PersistentLocalCacheImpl {
constructor(settings) {
this.kind = 'persistent';
let tabManager;
if (settings === null || settings === void 0 ? void 0 : settings.tabManager) {
settings.tabManager._initialize(settings);
tabManager = settings.tabManager;
}
else {
tabManager = persistentSingleTabManager(undefined);
tabManager._initialize(settings);
}
this._onlineComponentProvider = tabManager._onlineComponentProvider;
this._offlineComponentProvider = tabManager._offlineComponentProvider;
}
toJSON() {
return { kind: this.kind };
}
}
class MemoryEagerGabageCollectorImpl {
constructor() {
this.kind = 'memoryEager';
this._offlineComponentProvider = new MemoryOfflineComponentProvider();
}
toJSON() {
return { kind: this.kind };
}
}
class MemoryLruGabageCollectorImpl {
constructor(cacheSize) {
this.kind = 'memoryLru';
this._offlineComponentProvider = new LruGcMemoryOfflineComponentProvider(cacheSize);
}
toJSON() {
return { kind: this.kind };
}
}
/**
* Creates an instance of `MemoryEagerGarbageCollector`. This is also the
* default garbage collector unless it is explicitly specified otherwise.
*/
function memoryEagerGarbageCollector() {
return new MemoryEagerGabageCollectorImpl();
}
/**
* Creates an instance of `MemoryLruGarbageCollector`.
*
* A target size can be specified as part of the setting parameter. The
* collector will start deleting documents once the cache size exceeds
* the given size. The default cache size is 40MB (40 * 1024 * 1024 bytes).
*/
function memoryLruGarbageCollector(settings) {
return new MemoryLruGabageCollectorImpl(settings === null || settings === void 0 ? void 0 : settings.cacheSizeBytes);
}
/**
* Creates an instance of `MemoryLocalCache`. The instance can be set to
* `FirestoreSettings.cache` to tell the SDK which cache layer to use.
*/
function memoryLocalCache(settings) {
return new MemoryLocalCacheImpl(settings);
}
/**
* Creates an instance of `PersistentLocalCache`. The instance can be set to
* `FirestoreSettings.cache` to tell the SDK which cache layer to use.
*
* Persistent cache cannot be used in a Node.js environment.
*/
function persistentLocalCache(settings) {
return new PersistentLocalCacheImpl(settings);
}
class SingleTabManagerImpl {
constructor(forceOwnership) {
this.forceOwnership = forceOwnership;
this.kind = 'persistentSingleTab';
}
toJSON() {
return { kind: this.kind };
}
/**
* @internal
*/
_initialize(settings) {
this._onlineComponentProvider = new OnlineComponentProvider();
this._offlineComponentProvider = new IndexedDbOfflineComponentProvider(this._onlineComponentProvider, settings === null || settings === void 0 ? void 0 : settings.cacheSizeBytes, this.forceOwnership);
}
}
class MultiTabManagerImpl {
constructor() {
this.kind = 'PersistentMultipleTab';
}
toJSON() {
return { kind: this.kind };
}
/**
* @internal
*/
_initialize(settings) {
this._onlineComponentProvider = new OnlineComponentProvider();
this._offlineComponentProvider = new MultiTabOfflineComponentProvider(this._onlineComponentProvider, settings === null || settings === void 0 ? void 0 : settings.cacheSizeBytes);
}
}
/**
* Creates an instance of `PersistentSingleTabManager`.
*
* @param settings Configures the created tab manager.
*/
function persistentSingleTabManager(settings) {
return new SingleTabManagerImpl(settings === null || settings === void 0 ? void 0 : settings.forceOwnership);
}
/**
* Creates an instance of `PersistentMultipleTabManager`.
*/
function persistentMultipleTabManager() {
return new MultiTabManagerImpl();
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const DEFAULT_TRANSACTION_OPTIONS = {
maxAttempts: 5
};
function validateTransactionOptions(options) {
if (options.maxAttempts < 1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Max attempts must be at least 1');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A write batch, used to perform multiple writes as a single atomic unit.
*
* A `WriteBatch` object can be acquired by calling {@link writeBatch}. It
* provides methods for adding writes to the write batch. None of the writes
* will be committed (or visible locally) until {@link WriteBatch.commit} is
* called.
*/
class WriteBatch {
/** @hideconstructor */
constructor(_firestore, _commitHandler) {
this._firestore = _firestore;
this._commitHandler = _commitHandler;
this._mutations = [];
this._committed = false;
this._dataReader = newUserDataReader(_firestore);
}
set(documentRef, data, options) {
this._verifyNotCommitted();
const ref = validateReference(documentRef, this._firestore);
const convertedValue = applyFirestoreDataConverter(ref.converter, data, options);
const parsed = parseSetData(this._dataReader, 'WriteBatch.set', ref._key, convertedValue, ref.converter !== null, options);
this._mutations.push(parsed.toMutation(ref._key, Precondition.none()));
return this;
}
update(documentRef, fieldOrUpdateData, value, ...moreFieldsAndValues) {
this._verifyNotCommitted();
const ref = validateReference(documentRef, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = getModularInstance(fieldOrUpdateData);
let parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData);
}
this._mutations.push(parsed.toMutation(ref._key, Precondition.exists(true)));
return this;
}
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `WriteBatch` instance. Used for chaining method calls.
*/
delete(documentRef) {
this._verifyNotCommitted();
const ref = validateReference(documentRef, this._firestore);
this._mutations = this._mutations.concat(new DeleteMutation(ref._key, Precondition.none()));
return this;
}
/**
* Commits all of the writes in this write batch as a single atomic unit.
*
* The result of these writes will only be reflected in document reads that
* occur after the returned promise resolves. If the client is offline, the
* write fails. If you would like to see local modifications or buffer writes
* until the client is online, use the full Firestore SDK.
*
* @returns A `Promise` resolved once all of the writes in the batch have been
* successfully written to the backend as an atomic unit (note that it won't
* resolve while you're offline).
*/
commit() {
this._verifyNotCommitted();
this._committed = true;
if (this._mutations.length > 0) {
return this._commitHandler(this._mutations);
}
return Promise.resolve();
}
_verifyNotCommitted() {
if (this._committed) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'A write batch can no longer be used after commit() ' +
'has been called.');
}
}
}
function validateReference(documentRef, firestore) {
documentRef = getModularInstance(documentRef);
if (documentRef.firestore !== firestore) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Provided document reference is from a different Firestore instance.');
}
else {
return documentRef;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO(mrschmidt) Consider using `BaseTransaction` as the base class in the
// legacy SDK.
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
class Transaction$1 {
/** @hideconstructor */
constructor(_firestore, _transaction) {
this._firestore = _firestore;
this._transaction = _transaction;
this._dataReader = newUserDataReader(_firestore);
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/
get(documentRef) {
const ref = validateReference(documentRef, this._firestore);
const userDataWriter = new LiteUserDataWriter(this._firestore);
return this._transaction.lookup([ref._key]).then(docs => {
if (!docs || docs.length !== 1) {
return fail();
}
const doc = docs[0];
if (doc.isFoundDocument()) {
return new DocumentSnapshot$1(this._firestore, userDataWriter, doc.key, doc, ref.converter);
}
else if (doc.isNoDocument()) {
return new DocumentSnapshot$1(this._firestore, userDataWriter, ref._key, null, ref.converter);
}
else {
throw fail();
}
});
}
set(documentRef, value, options) {
const ref = validateReference(documentRef, this._firestore);
const convertedValue = applyFirestoreDataConverter(ref.converter, value, options);
const parsed = parseSetData(this._dataReader, 'Transaction.set', ref._key, convertedValue, ref.converter !== null, options);
this._transaction.set(ref._key, parsed);
return this;
}
update(documentRef, fieldOrUpdateData, value, ...moreFieldsAndValues) {
const ref = validateReference(documentRef, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = getModularInstance(fieldOrUpdateData);
let parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData);
}
this._transaction.update(ref._key, parsed);
return this;
}
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `Transaction` instance. Used for chaining method calls.
*/
delete(documentRef) {
const ref = validateReference(documentRef, this._firestore);
this._transaction.delete(ref._key);
return this;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
class Transaction extends Transaction$1 {
// This class implements the same logic as the Transaction API in the Lite SDK
// but is subclassed in order to return its own DocumentSnapshot types.
/** @hideconstructor */
constructor(_firestore, _transaction) {
super(_firestore, _transaction);
this._firestore = _firestore;
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/
get(documentRef) {
const ref = validateReference(documentRef, this._firestore);
const userDataWriter = new ExpUserDataWriter(this._firestore);
return super
.get(documentRef)
.then(liteDocumentSnapshot => new DocumentSnapshot(this._firestore, userDataWriter, ref._key, liteDocumentSnapshot._document, new SnapshotMetadata(
/* hasPendingWrites= */ false,
/* fromCache= */ false), ref.converter));
}
}
/**
* Executes the given `updateFunction` and then attempts to commit the changes
* applied within the transaction. If any document read within the transaction
* has changed, Cloud Firestore retries the `updateFunction`. If it fails to
* commit after 5 attempts, the transaction fails.
*
* The maximum number of writes allowed in a single transaction is 500.
*
* @param firestore - A reference to the Firestore database to run this
* transaction against.
* @param updateFunction - The function to execute within the transaction
* context.
* @param options - An options object to configure maximum number of attempts to
* commit.
* @returns If the transaction completed successfully or was explicitly aborted
* (the `updateFunction` returned a failed promise), the promise returned by the
* `updateFunction `is returned here. Otherwise, if the transaction failed, a
* rejected promise with the corresponding failure error is returned.
*/
function runTransaction(firestore, updateFunction, options) {
firestore = cast(firestore, Firestore);
const optionsWithDefaults = Object.assign(Object.assign({}, DEFAULT_TRANSACTION_OPTIONS), options);
validateTransactionOptions(optionsWithDefaults);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientTransaction(client, internalTransaction => updateFunction(new Transaction(firestore, internalTransaction)), optionsWithDefaults);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Returns a sentinel for use with {@link @firebase/firestore/lite#(updateDoc:1)} or
* {@link @firebase/firestore/lite#(setDoc:1)} with `{merge: true}` to mark a field for deletion.
*/
function deleteField() {
return new DeleteFieldValueImpl('deleteField');
}
/**
* Returns a sentinel used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link @firebase/firestore/lite#(updateDoc:1)} to
* include a server-generated timestamp in the written data.
*/
function serverTimestamp() {
return new ServerTimestampFieldValueImpl('serverTimestamp');
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to union the given elements with any array
* value that already exists on the server. Each specified element that doesn't
* already exist in the array will be added to the end. If the field being
* modified is not already an array it will be overwritten with an array
* containing exactly the specified elements.
*
* @param elements - The elements to union into the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`.
*/
function arrayUnion(...elements) {
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new ArrayUnionFieldValueImpl('arrayUnion', elements);
}
/**
* Returns a special value that can be used with {@link (setDoc:1)} or {@link
* updateDoc:1} that tells the server to remove the given elements from any
* array value that already exists on the server. All instances of each element
* specified will be removed from the array. If the field being modified is not
* already an array it will be overwritten with an empty array.
*
* @param elements - The elements to remove from the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/
function arrayRemove(...elements) {
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new ArrayRemoveFieldValueImpl('arrayRemove', elements);
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to increment the field's current value by
* the given value.
*
* If either the operand or the current field value uses floating point
* precision, all arithmetic follows IEEE 754 semantics. If both values are
* integers, values outside of JavaScript's safe number range
* (`Number.MIN_SAFE_INTEGER` to `Number.MAX_SAFE_INTEGER`) are also subject to
* precision loss. Furthermore, once processed by the Firestore backend, all
* integer operations are capped between -2^63 and 2^63-1.
*
* If the current field value is not of type `number`, or if the field does not
* yet exist, the transformation sets the field to the given value.
*
* @param n - The value to increment by.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/
function increment(n) {
return new NumericIncrementFieldValueImpl('increment', n);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Creates a write batch, used for performing multiple writes as a single
* atomic operation. The maximum number of writes allowed in a single {@link WriteBatch}
* is 500.
*
* Unlike transactions, write batches are persisted offline and therefore are
* preferable when you don't need to condition your writes on read data.
*
* @returns A {@link WriteBatch} that can be used to atomically execute multiple
* writes.
*/
function writeBatch(firestore) {
firestore = cast(firestore, Firestore);
ensureFirestoreConfigured(firestore);
return new WriteBatch(firestore, mutations => executeWrite(firestore, mutations));
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function setIndexConfiguration(firestore, jsonOrConfiguration) {
var _a;
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
if (!client._uninitializedComponentsProvider ||
((_a = client._uninitializedComponentsProvider) === null || _a === void 0 ? void 0 : _a._offlineKind) === 'memory') {
// PORTING NOTE: We don't return an error if the user has not enabled
// persistence since `enableIndexeddbPersistence()` can fail on the Web.
logWarn('Cannot enable indexes when persistence is disabled');
return Promise.resolve();
}
const parsedIndexes = parseIndexes(jsonOrConfiguration);
return firestoreClientSetIndexConfiguration(client, parsedIndexes);
}
function parseIndexes(jsonOrConfiguration) {
const indexConfiguration = typeof jsonOrConfiguration === 'string'
? tryParseJson(jsonOrConfiguration)
: jsonOrConfiguration;
const parsedIndexes = [];
if (Array.isArray(indexConfiguration.indexes)) {
for (const index of indexConfiguration.indexes) {
const collectionGroup = tryGetString(index, 'collectionGroup');
const segments = [];
if (Array.isArray(index.fields)) {
for (const field of index.fields) {
const fieldPathString = tryGetString(field, 'fieldPath');
const fieldPath = fieldPathFromDotSeparatedString('setIndexConfiguration', fieldPathString);
if (field.arrayConfig === 'CONTAINS') {
segments.push(new IndexSegment(fieldPath, 2 /* IndexKind.CONTAINS */));
}
else if (field.order === 'ASCENDING') {
segments.push(new IndexSegment(fieldPath, 0 /* IndexKind.ASCENDING */));
}
else if (field.order === 'DESCENDING') {
segments.push(new IndexSegment(fieldPath, 1 /* IndexKind.DESCENDING */));
}
}
}
parsedIndexes.push(new FieldIndex(FieldIndex.UNKNOWN_ID, collectionGroup, segments, IndexState.empty()));
}
}
return parsedIndexes;
}
function tryParseJson(json) {
try {
return JSON.parse(json);
}
catch (e) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to parse JSON: ' + (e === null || e === void 0 ? void 0 : e.message));
}
}
function tryGetString(data, property) {
if (typeof data[property] !== 'string') {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Missing string value for: ' + property);
}
return data[property];
}
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `PersistentCacheIndexManager` for configuring persistent cache indexes used
* for local query execution.
*
* To use, call `getPersistentCacheIndexManager()` to get an instance.
*/
class PersistentCacheIndexManager {
/** @hideconstructor */
constructor(_client) {
this._client = _client;
/** A type string to uniquely identify instances of this class. */
this.type = 'PersistentCacheIndexManager';
}
}
/**
* Returns the PersistentCache Index Manager used by the given `Firestore`
* object.
*
* @return The `PersistentCacheIndexManager` instance, or `null` if local
* persistent storage is not in use.
*/
function getPersistentCacheIndexManager(firestore) {
var _a;
firestore = cast(firestore, Firestore);
const cachedInstance = persistentCacheIndexManagerByFirestore.get(firestore);
if (cachedInstance) {
return cachedInstance;
}
const client = ensureFirestoreConfigured(firestore);
if (((_a = client._uninitializedComponentsProvider) === null || _a === void 0 ? void 0 : _a._offlineKind) !== 'persistent') {
return null;
}
const instance = new PersistentCacheIndexManager(client);
persistentCacheIndexManagerByFirestore.set(firestore, instance);
return instance;
}
/**
* Enables the SDK to create persistent cache indexes automatically for local
* query execution when the SDK believes cache indexes can help improve
* performance.
*
* This feature is disabled by default.
*/
function enablePersistentCacheIndexAutoCreation(indexManager) {
setPersistentCacheIndexAutoCreationEnabled(indexManager, true);
}
/**
* Stops creating persistent cache indexes automatically for local query
* execution. The indexes which have been created by calling
* `enablePersistentCacheIndexAutoCreation()` still take effect.
*/
function disablePersistentCacheIndexAutoCreation(indexManager) {
setPersistentCacheIndexAutoCreationEnabled(indexManager, false);
}
/**
* Removes all persistent cache indexes.
*
* Please note this function will also deletes indexes generated by
* `setIndexConfiguration()`, which is deprecated.
*/
function deleteAllPersistentCacheIndexes(indexManager) {
indexManager._client.verifyNotTerminated();
const promise = firestoreClientDeleteAllFieldIndexes(indexManager._client);
promise
.then(_ => logDebug('deleting all persistent cache indexes succeeded'))
.catch(error => logWarn('deleting all persistent cache indexes failed', error));
}
function setPersistentCacheIndexAutoCreationEnabled(indexManager, isEnabled) {
indexManager._client.verifyNotTerminated();
const promise = firestoreClientSetPersistentCacheIndexAutoCreationEnabled(indexManager._client, isEnabled);
promise
.then(_ => logDebug(`setting persistent cache index auto creation ` +
`isEnabled=${isEnabled} succeeded`))
.catch(error => logWarn(`setting persistent cache index auto creation ` +
`isEnabled=${isEnabled} failed`, error));
}
/**
* Maps `Firestore` instances to their corresponding
* `PersistentCacheIndexManager` instances.
*
* Use a `WeakMap` so that the mapping will be automatically dropped when the
* `Firestore` instance is garbage collected. This emulates a private member
* as described in https://goo.gle/454yvug.
*/
const persistentCacheIndexManagerByFirestore = new WeakMap();
/**
* @license
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Testing hooks for use by Firestore's integration test suite to reach into the
* SDK internals to validate logic and behavior that is not visible from the
* public API surface.
*
* @internal
*/
class TestingHooks {
constructor() {
throw new Error('instances of this class should not be created');
}
/**
* Registers a callback to be notified when an existence filter mismatch
* occurs in the Watch listen stream.
*
* The relative order in which callbacks are notified is unspecified; do not
* rely on any particular ordering. If a given callback is registered multiple
* times then it will be notified multiple times, once per registration.
*
* @param callback the callback to invoke upon existence filter mismatch.
*
* @return a function that, when called, unregisters the given callback; only
* the first invocation of the returned function does anything; all subsequent
* invocations do nothing.
*/
static onExistenceFilterMismatch(callback) {
return TestingHooksSpiImpl.instance.onExistenceFilterMismatch(callback);
}
}
/**
* The implementation of `TestingHooksSpi`.
*/
class TestingHooksSpiImpl {
constructor() {
this.existenceFilterMismatchCallbacksById = new Map();
}
static get instance() {
if (!testingHooksSpiImplInstance) {
testingHooksSpiImplInstance = new TestingHooksSpiImpl();
setTestingHooksSpi(testingHooksSpiImplInstance);
}
return testingHooksSpiImplInstance;
}
notifyOnExistenceFilterMismatch(info) {
this.existenceFilterMismatchCallbacksById.forEach(callback => callback(info));
}
onExistenceFilterMismatch(callback) {
const id = Symbol();
const callbacks = this.existenceFilterMismatchCallbacksById;
callbacks.set(id, callback);
return () => callbacks.delete(id);
}
}
let testingHooksSpiImplInstance = null;
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
registerFirestore('node');
export { AbstractUserDataWriter, AggregateField, AggregateQuerySnapshot, Bytes, CACHE_SIZE_UNLIMITED, CollectionReference, DocumentReference, DocumentSnapshot, FieldPath, FieldValue, Firestore, FirestoreError, GeoPoint, LoadBundleTask, PersistentCacheIndexManager, Query, QueryCompositeFilterConstraint, QueryConstraint, QueryDocumentSnapshot, QueryEndAtConstraint, QueryFieldFilterConstraint, QueryLimitConstraint, QueryOrderByConstraint, QuerySnapshot, QueryStartAtConstraint, SnapshotMetadata, Timestamp, Transaction, WriteBatch, AutoId as _AutoId, ByteString as _ByteString, DatabaseId as _DatabaseId, DocumentKey as _DocumentKey, EmptyAppCheckTokenProvider as _EmptyAppCheckTokenProvider, EmptyAuthCredentialsProvider as _EmptyAuthCredentialsProvider, FieldPath$1 as _FieldPath, TestingHooks as _TestingHooks, cast as _cast, debugAssert as _debugAssert, isBase64Available as _isBase64Available, logWarn as _logWarn, validateIsNotUsedTogether as _validateIsNotUsedTogether, addDoc, aggregateFieldEqual, aggregateQuerySnapshotEqual, and, arrayRemove, arrayUnion, average, clearIndexedDbPersistence, collection, collectionGroup, connectFirestoreEmulator, count, deleteAllPersistentCacheIndexes, deleteDoc, deleteField, disableNetwork, disablePersistentCacheIndexAutoCreation, doc, documentId, enableIndexedDbPersistence, enableMultiTabIndexedDbPersistence, enableNetwork, enablePersistentCacheIndexAutoCreation, endAt, endBefore, ensureFirestoreConfigured, executeWrite, getAggregateFromServer, getCountFromServer, getDoc, getDocFromCache, getDocFromServer, getDocs, getDocsFromCache, getDocsFromServer, getFirestore, getPersistentCacheIndexManager, increment, initializeFirestore, limit, limitToLast, loadBundle, memoryEagerGarbageCollector, memoryLocalCache, memoryLruGarbageCollector, namedQuery, onSnapshot, onSnapshotsInSync, or, orderBy, persistentLocalCache, persistentMultipleTabManager, persistentSingleTabManager, query, queryEqual, refEqual, runTransaction, serverTimestamp, setDoc, setIndexConfiguration, setLogLevel, snapshotEqual, startAfter, startAt, sum, terminate, updateDoc, waitForPendingWrites, where, writeBatch };
//# sourceMappingURL=index.node.mjs.map