aboutsummaryrefslogtreecommitdiffhomepage
path: root/Firestore/core/src/firebase/firestore/remote/serializer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'Firestore/core/src/firebase/firestore/remote/serializer.cc')
-rw-r--r--Firestore/core/src/firebase/firestore/remote/serializer.cc769
1 files changed, 331 insertions, 438 deletions
diff --git a/Firestore/core/src/firebase/firestore/remote/serializer.cc b/Firestore/core/src/firebase/firestore/remote/serializer.cc
index b5a0720..6240c21 100644
--- a/Firestore/core/src/firebase/firestore/remote/serializer.cc
+++ b/Firestore/core/src/firebase/firestore/remote/serializer.cc
@@ -25,364 +25,84 @@
#include <utility>
#include "Firestore/Protos/nanopb/google/firestore/v1beta1/document.pb.h"
+#include "Firestore/Protos/nanopb/google/firestore/v1beta1/firestore.pb.h"
+#include "Firestore/core/include/firebase/firestore/firestore_errors.h"
+#include "Firestore/core/include/firebase/firestore/timestamp.h"
+#include "Firestore/core/src/firebase/firestore/model/document.h"
+#include "Firestore/core/src/firebase/firestore/model/no_document.h"
+#include "Firestore/core/src/firebase/firestore/model/resource_path.h"
+#include "Firestore/core/src/firebase/firestore/model/snapshot_version.h"
+#include "Firestore/core/src/firebase/firestore/nanopb/reader.h"
+#include "Firestore/core/src/firebase/firestore/nanopb/tag.h"
+#include "Firestore/core/src/firebase/firestore/nanopb/writer.h"
+#include "Firestore/core/src/firebase/firestore/timestamp_internal.h"
#include "Firestore/core/src/firebase/firestore/util/firebase_assert.h"
+#include "absl/memory/memory.h"
namespace firebase {
namespace firestore {
namespace remote {
+using firebase::Timestamp;
+using firebase::TimestampInternal;
+using firebase::firestore::model::DatabaseId;
+using firebase::firestore::model::Document;
+using firebase::firestore::model::DocumentKey;
using firebase::firestore::model::FieldValue;
+using firebase::firestore::model::MaybeDocument;
+using firebase::firestore::model::NoDocument;
using firebase::firestore::model::ObjectValue;
+using firebase::firestore::model::ResourcePath;
+using firebase::firestore::model::SnapshotVersion;
+using firebase::firestore::nanopb::Reader;
+using firebase::firestore::nanopb::Tag;
+using firebase::firestore::nanopb::Writer;
using firebase::firestore::util::Status;
+using firebase::firestore::util::StatusOr;
namespace {
-class Writer;
-
-class Reader;
-
void EncodeObject(Writer* writer, const ObjectValue& object_value);
-ObjectValue DecodeObject(Reader* reader);
-
-/**
- * Represents a nanopb tag.
- *
- * field_number is one of the field tags that nanopb generates based off of
- * the proto messages. They're typically named in the format:
- * <parentNameSpace>_<childNameSpace>_<message>_<field>_tag, e.g.
- * google_firestore_v1beta1_Document_name_tag.
- */
-struct Tag {
- pb_wire_type_t wire_type;
- uint32_t field_number;
-};
+ObjectValue::Map DecodeObject(Reader* reader);
-/**
- * Docs TODO(rsgowman). But currently, this just wraps the underlying nanopb
- * pb_ostream_t. Also doc how to check status.
- */
-class Writer {
- public:
- /**
- * Creates an output stream that writes to the specified vector. Note that
- * this vector pointer must remain valid for the lifetime of this Writer.
- *
- * (This is roughly equivalent to the nanopb function
- * pb_ostream_from_buffer())
- *
- * @param out_bytes where the output should be serialized to.
- */
- static Writer Wrap(std::vector<uint8_t>* out_bytes);
-
- /**
- * Creates a non-writing output stream used to calculate the size of
- * the serialized output.
- */
- static Writer Sizing() {
- return Writer(PB_OSTREAM_SIZING);
- }
-
- /**
- * Writes a message type to the output stream.
- *
- * This essentially wraps calls to nanopb's pb_encode_tag() method.
- */
- void WriteTag(Tag tag);
-
- void WriteSize(size_t size);
- void WriteNull();
- void WriteBool(bool bool_value);
- void WriteInteger(int64_t integer_value);
-
- void WriteString(const std::string& string_value);
-
- /**
- * Writes a message and its length.
- *
- * When writing a top level message, protobuf doesn't include the length
- * (since you can get that already from the length of the binary output.) But
- * when writing a sub/nested message, you must include the length in the
- * serialization.
- *
- * Call this method when writing a nested message. Provide a function to
- * write the message itself. This method will calculate the size of the
- * written message (using the provided function with a non-writing sizing
- * stream), write out the size (and perform sanity checks), and then serialize
- * the message by calling the provided function a second time.
- */
- void WriteNestedMessage(const std::function<void(Writer*)>& write_message_fn);
-
- size_t bytes_written() const {
- return stream_.bytes_written;
- }
-
- Status status() const {
- return status_;
- }
-
- private:
- Status status_ = Status::OK();
-
- /**
- * Creates a new Writer, based on the given nanopb pb_ostream_t. Note that
- * a shallow copy will be taken. (Non-null pointers within this struct must
- * remain valid for the lifetime of this Writer.)
- */
- explicit Writer(const pb_ostream_t& stream) : stream_(stream) {
- }
-
- /**
- * Writes a "varint" to the output stream.
- *
- * This essentially wraps calls to nanopb's pb_encode_varint() method.
- *
- * Note that (despite the value parameter type) this works for bool, enum,
- * int32, int64, uint32 and uint64 proto field types.
- *
- * Note: This is not expected to be called directly, but rather only
- * via the other Write* methods (i.e. WriteBool, WriteLong, etc)
- *
- * @param value The value to write, represented as a uint64_t.
- */
- void WriteVarint(uint64_t value);
-
- pb_ostream_t stream_;
-};
-
-/**
- * Docs TODO(rsgowman). But currently, this just wraps the underlying nanopb
- * pb_istream_t.
- */
-class Reader {
- public:
- /**
- * Creates an input stream that reads from the specified bytes. Note that
- * this reference must remain valid for the lifetime of this Reader.
- *
- * (This is roughly equivalent to the nanopb function
- * pb_istream_from_buffer())
- *
- * @param bytes where the input should be deserialized from.
- */
- static Reader Wrap(const uint8_t* bytes, size_t length);
-
- /**
- * Reads a message type from the input stream.
- *
- * This essentially wraps calls to nanopb's pb_decode_tag() method.
- */
- Tag ReadTag();
-
- void ReadNull();
- bool ReadBool();
- int64_t ReadInteger();
-
- std::string ReadString();
-
- /**
- * Reads a message and its length.
- *
- * Analog to Writer::WriteNestedMessage(). See that methods docs for further
- * details.
- *
- * Call this method when reading a nested message. Provide a function to read
- * the message itself.
- */
- template <typename T>
- T ReadNestedMessage(const std::function<T(Reader*)>& read_message_fn);
-
- size_t bytes_left() const {
- return stream_.bytes_left;
- }
-
- private:
- /**
- * Creates a new Reader, based on the given nanopb pb_istream_t. Note that
- * a shallow copy will be taken. (Non-null pointers within this struct must
- * remain valid for the lifetime of this Reader.)
- */
- explicit Reader(pb_istream_t stream) : stream_(stream) {
- }
-
- /**
- * Reads a "varint" from the input stream.
- *
- * This essentially wraps calls to nanopb's pb_decode_varint() method.
- *
- * Note that (despite the return type) this works for bool, enum, int32,
- * int64, uint32 and uint64 proto field types.
- *
- * Note: This is not expected to be called direclty, but rather only via the
- * other Decode* methods (i.e. DecodeBool, DecodeLong, etc)
- *
- * @return The decoded varint as a uint64_t.
- */
- uint64_t ReadVarint();
-
- pb_istream_t stream_;
-};
-
-Writer Writer::Wrap(std::vector<uint8_t>* out_bytes) {
- // TODO(rsgowman): find a better home for this constant.
- // A document is defined to have a max size of 1MiB - 4 bytes.
- static const size_t kMaxDocumentSize = 1 * 1024 * 1024 - 4;
-
- // Construct a nanopb output stream.
- //
- // Set the max_size to be the max document size (as an upper bound; one would
- // expect individual FieldValue's to be smaller than this).
- //
- // bytes_written is (always) initialized to 0. (NB: nanopb does not know or
- // care about the underlying output vector, so where we are in the vector
- // itself is irrelevant. i.e. don't use out_bytes->size())
- pb_ostream_t raw_stream = {
- /*callback=*/[](pb_ostream_t* stream, const pb_byte_t* buf,
- size_t count) -> bool {
- auto* out_bytes = static_cast<std::vector<uint8_t>*>(stream->state);
- out_bytes->insert(out_bytes->end(), buf, buf + count);
- return true;
- },
- /*state=*/out_bytes,
- /*max_size=*/kMaxDocumentSize,
- /*bytes_written=*/0,
- /*errmsg=*/nullptr};
- return Writer(raw_stream);
-}
-
-Reader Reader::Wrap(const uint8_t* bytes, size_t length) {
- return Reader{pb_istream_from_buffer(bytes, length)};
-}
-
-// TODO(rsgowman): I've left the methods as near as possible to where they were
-// before, which implies that the Writer methods are interspersed with the
-// Reader methods. This should make it a bit easier to review. Refactor these to
-// group the related methods together (probably within their own file rather
-// than here).
-
-void Writer::WriteTag(Tag tag) {
- if (!status_.ok()) return;
-
- if (!pb_encode_tag(&stream_, tag.wire_type, tag.field_number)) {
- FIREBASE_ASSERT_MESSAGE(false, PB_GET_ERROR(&stream_));
- }
+void EncodeTimestamp(Writer* writer, const Timestamp& timestamp_value) {
+ google_protobuf_Timestamp timestamp_proto =
+ google_protobuf_Timestamp_init_zero;
+ timestamp_proto.seconds = timestamp_value.seconds();
+ timestamp_proto.nanos = timestamp_value.nanoseconds();
+ writer->WriteNanopbMessage(google_protobuf_Timestamp_fields,
+ &timestamp_proto);
}
-Tag Reader::ReadTag() {
- Tag tag;
- bool eof;
- bool ok = pb_decode_tag(&stream_, &tag.wire_type, &tag.field_number, &eof);
- if (!ok || eof) {
- // TODO(rsgowman): figure out error handling
- abort();
+Timestamp DecodeTimestamp(Reader* reader) {
+ if (!reader->status().ok()) return {};
+
+ google_protobuf_Timestamp timestamp_proto =
+ google_protobuf_Timestamp_init_zero;
+ reader->ReadNanopbMessage(google_protobuf_Timestamp_fields, &timestamp_proto);
+
+ // The Timestamp ctor will assert if we provide values outside the valid
+ // range. However, since we're decoding, a single corrupt byte could cause
+ // this to occur, so we'll verify the ranges before passing them in since we'd
+ // rather not abort in these situations.
+ if (timestamp_proto.seconds < TimestampInternal::Min().seconds()) {
+ reader->set_status(Status(
+ FirestoreErrorCode::DataLoss,
+ "Invalid message: timestamp beyond the earliest supported date"));
+ return {};
+ } else if (TimestampInternal::Max().seconds() < timestamp_proto.seconds) {
+ reader->set_status(
+ Status(FirestoreErrorCode::DataLoss,
+ "Invalid message: timestamp behond the latest supported date"));
+ return {};
+ } else if (timestamp_proto.nanos < 0 || timestamp_proto.nanos > 999999999) {
+ reader->set_status(Status(
+ FirestoreErrorCode::DataLoss,
+ "Invalid message: timestamp nanos must be between 0 and 999999999"));
+ return {};
}
- return tag;
-}
-
-void Writer::WriteSize(size_t size) {
- return WriteVarint(size);
-}
-
-void Writer::WriteVarint(uint64_t value) {
- if (!status_.ok()) return;
-
- if (!pb_encode_varint(&stream_, value)) {
- FIREBASE_ASSERT_MESSAGE(false, PB_GET_ERROR(&stream_));
- }
-}
-
-/**
- * Note that (despite the return type) this works for bool, enum, int32, int64,
- * uint32 and uint64 proto field types.
- *
- * Note: This is not expected to be called directly, but rather only via the
- * other Decode* methods (i.e. DecodeBool, DecodeLong, etc)
- *
- * @return The decoded varint as a uint64_t.
- */
-uint64_t Reader::ReadVarint() {
- uint64_t varint_value;
- if (!pb_decode_varint(&stream_, &varint_value)) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
- return varint_value;
-}
-
-void Writer::WriteNull() {
- return WriteVarint(google_protobuf_NullValue_NULL_VALUE);
-}
-
-void Reader::ReadNull() {
- uint64_t varint = ReadVarint();
- if (varint != google_protobuf_NullValue_NULL_VALUE) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
-}
-
-void Writer::WriteBool(bool bool_value) {
- return WriteVarint(bool_value);
-}
-
-bool Reader::ReadBool() {
- uint64_t varint = ReadVarint();
- switch (varint) {
- case 0:
- return false;
- case 1:
- return true;
- default:
- // TODO(rsgowman): figure out error handling
- abort();
- }
-}
-
-void Writer::WriteInteger(int64_t integer_value) {
- return WriteVarint(integer_value);
-}
-
-int64_t Reader::ReadInteger() {
- return ReadVarint();
-}
-
-void Writer::WriteString(const std::string& string_value) {
- if (!status_.ok()) return;
-
- if (!pb_encode_string(
- &stream_, reinterpret_cast<const pb_byte_t*>(string_value.c_str()),
- string_value.length())) {
- FIREBASE_ASSERT_MESSAGE(false, PB_GET_ERROR(&stream_));
- }
-}
-
-std::string Reader::ReadString() {
- pb_istream_t substream;
- if (!pb_make_string_substream(&stream_, &substream)) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
-
- std::string result(substream.bytes_left, '\0');
- if (!pb_read(&substream, reinterpret_cast<pb_byte_t*>(&result[0]),
- substream.bytes_left)) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
-
- // NB: future versions of nanopb read the remaining characters out of the
- // substream (and return false if that fails) as an additional safety
- // check within pb_close_string_substream. Unfortunately, that's not present
- // in the current version (0.38). We'll make a stronger assertion and check
- // to make sure there *are* no remaining characters in the substream.
- if (substream.bytes_left != 0) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
-
- pb_close_string_substream(&stream_, &substream);
-
- return result;
+ return Timestamp{timestamp_proto.seconds, timestamp_proto.nanos};
}
// Named '..Impl' so as to not conflict with Serializer::EncodeFieldValue.
@@ -417,6 +137,14 @@ void EncodeFieldValueImpl(Writer* writer, const FieldValue& field_value) {
writer->WriteString(field_value.string_value());
break;
+ case FieldValue::Type::Timestamp:
+ writer->WriteTag(
+ {PB_WT_STRING, google_firestore_v1beta1_Value_timestamp_value_tag});
+ writer->WriteNestedMessage([&field_value](Writer* writer) {
+ EncodeTimestamp(writer, field_value.timestamp_value());
+ });
+ break;
+
case FieldValue::Type::Object:
writer->WriteTag(
{PB_WT_STRING, google_firestore_v1beta1_Value_map_value_tag});
@@ -430,30 +158,56 @@ void EncodeFieldValueImpl(Writer* writer, const FieldValue& field_value) {
}
FieldValue DecodeFieldValueImpl(Reader* reader) {
+ if (!reader->status().ok()) return FieldValue::NullValue();
+
Tag tag = reader->ReadTag();
+ if (!reader->status().ok()) return FieldValue::NullValue();
// Ensure the tag matches the wire type
- // TODO(rsgowman): figure out error handling
switch (tag.field_number) {
case google_firestore_v1beta1_Value_null_value_tag:
case google_firestore_v1beta1_Value_boolean_value_tag:
case google_firestore_v1beta1_Value_integer_value_tag:
if (tag.wire_type != PB_WT_VARINT) {
- abort();
+ reader->set_status(
+ Status(FirestoreErrorCode::DataLoss,
+ "Input proto bytes cannot be parsed (mismatch between "
+ "the wiretype and the field number (tag))"));
}
break;
case google_firestore_v1beta1_Value_string_value_tag:
+ case google_firestore_v1beta1_Value_timestamp_value_tag:
case google_firestore_v1beta1_Value_map_value_tag:
if (tag.wire_type != PB_WT_STRING) {
- abort();
+ reader->set_status(
+ Status(FirestoreErrorCode::DataLoss,
+ "Input proto bytes cannot be parsed (mismatch between "
+ "the wiretype and the field number (tag))"));
}
break;
default:
- abort();
+ // We could get here for one of two reasons; either because the input
+ // bytes are corrupt, or because we're attempting to parse a tag that we
+ // haven't implemented yet. Long term, the latter reason should become
+ // less likely (especially in production), so we'll assume former.
+
+ // TODO(rsgowman): While still in development, we'll contradict the above
+ // and assume the latter. Remove the following assertion when we're
+ // confident that we're handling all the tags in the protos.
+ FIREBASE_ASSERT_MESSAGE(
+ false,
+ "Unhandled message field number (tag): %i. (Or possibly "
+ "corrupt input bytes)",
+ tag.field_number);
+ reader->set_status(Status(
+ FirestoreErrorCode::DataLoss,
+ "Input proto bytes cannot be parsed (invalid field number (tag))"));
}
+ if (!reader->status().ok()) return FieldValue::NullValue();
+
switch (tag.field_number) {
case google_firestore_v1beta1_Value_null_value_tag:
reader->ReadNull();
@@ -464,98 +218,22 @@ FieldValue DecodeFieldValueImpl(Reader* reader) {
return FieldValue::IntegerValue(reader->ReadInteger());
case google_firestore_v1beta1_Value_string_value_tag:
return FieldValue::StringValue(reader->ReadString());
+ case google_firestore_v1beta1_Value_timestamp_value_tag:
+ return FieldValue::TimestampValue(
+ reader->ReadNestedMessage<Timestamp>(DecodeTimestamp));
case google_firestore_v1beta1_Value_map_value_tag:
- return FieldValue::ObjectValueFromMap(
- DecodeObject(reader).internal_value);
+ return FieldValue::ObjectValueFromMap(DecodeObject(reader));
default:
- // TODO(rsgowman): figure out error handling
- abort();
+ // This indicates an internal error as we've already ensured that this is
+ // a valid field_number.
+ FIREBASE_ASSERT_MESSAGE(
+ false,
+ "Somehow got an unexpected field number (tag) after verifying that "
+ "the field number was expected.");
}
}
-void Writer::WriteNestedMessage(
- const std::function<void(Writer*)>& write_message_fn) {
- if (!status_.ok()) return;
-
- // First calculate the message size using a non-writing substream.
- Writer sizer = Writer::Sizing();
- write_message_fn(&sizer);
- status_ = sizer.status();
- if (!status_.ok()) return;
- size_t size = sizer.bytes_written();
-
- // Write out the size to the output writer.
- WriteSize(size);
- if (!status_.ok()) return;
-
- // If this stream is itself a sizing stream, then we don't need to actually
- // parse field_value a second time; just update the bytes_written via a call
- // to pb_write. (If we try to write the contents into a sizing stream, it'll
- // fail since sizing streams don't actually have any buffer space.)
- if (stream_.callback == nullptr) {
- if (!pb_write(&stream_, nullptr, size)) {
- FIREBASE_ASSERT_MESSAGE(false, PB_GET_ERROR(&stream_));
- }
- return;
- }
-
- // Ensure the output stream has enough space
- if (stream_.bytes_written + size > stream_.max_size) {
- FIREBASE_ASSERT_MESSAGE(
- false,
- "Insufficient space in the output stream to write the given message");
- }
-
- // Use a substream to verify that a callback doesn't write more than what it
- // did the first time. (Use an initializer rather than setting fields
- // individually like nanopb does. This gives us a *chance* of noticing if
- // nanopb adds new fields.)
- Writer writer({stream_.callback, stream_.state,
- /*max_size=*/size, /*bytes_written=*/0,
- /*errmsg=*/nullptr});
- write_message_fn(&writer);
- status_ = writer.status();
- if (!status_.ok()) return;
-
- stream_.bytes_written += writer.stream_.bytes_written;
- stream_.state = writer.stream_.state;
- stream_.errmsg = writer.stream_.errmsg;
-
- if (writer.bytes_written() != size) {
- // submsg size changed
- FIREBASE_ASSERT_MESSAGE(
- false, "Parsing the nested message twice yielded different sizes");
- }
-}
-
-template <typename T>
-T Reader::ReadNestedMessage(const std::function<T(Reader*)>& read_message_fn) {
- // Implementation note: This is roughly modeled on pb_decode_delimited,
- // adjusted to account for the oneof in FieldValue.
- pb_istream_t raw_substream;
- if (!pb_make_string_substream(&stream_, &raw_substream)) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
- Reader substream(raw_substream);
-
- T message = read_message_fn(&substream);
-
- // NB: future versions of nanopb read the remaining characters out of the
- // substream (and return false if that fails) as an additional safety
- // check within pb_close_string_substream. Unfortunately, that's not present
- // in the current version (0.38). We'll make a stronger assertion and check
- // to make sure there *are* no remaining characters in the substream.
- if (substream.bytes_left() != 0) {
- // TODO(rsgowman): figure out error handling
- abort();
- }
- pb_close_string_substream(&stream_, &substream.stream_);
-
- return message;
-}
-
/**
* Encodes a 'FieldsEntry' object, within a FieldValue's map_value type.
*
@@ -590,7 +268,10 @@ void EncodeFieldsEntry(Writer* writer, const ObjectValue::Map::value_type& kv) {
}
ObjectValue::Map::value_type DecodeFieldsEntry(Reader* reader) {
+ if (!reader->status().ok()) return {};
+
Tag tag = reader->ReadTag();
+ if (!reader->status().ok()) return {};
// TODO(rsgowman): figure out error handling: We can do better than a failed
// assertion.
@@ -600,6 +281,7 @@ ObjectValue::Map::value_type DecodeFieldsEntry(Reader* reader) {
std::string key = reader->ReadString();
tag = reader->ReadTag();
+ if (!reader->status().ok()) return {};
FIREBASE_ASSERT(tag.field_number ==
google_firestore_v1beta1_MapValue_FieldsEntry_value_tag);
FIREBASE_ASSERT(tag.wire_type == PB_WT_STRING);
@@ -607,27 +289,32 @@ ObjectValue::Map::value_type DecodeFieldsEntry(Reader* reader) {
FieldValue value =
reader->ReadNestedMessage<FieldValue>(DecodeFieldValueImpl);
- return {key, value};
+ return ObjectValue::Map::value_type{key, value};
}
void EncodeObject(Writer* writer, const ObjectValue& object_value) {
return writer->WriteNestedMessage([&object_value](Writer* writer) {
// Write each FieldsEntry (i.e. key-value pair.)
for (const auto& kv : object_value.internal_value) {
- writer->WriteTag({PB_WT_STRING,
- google_firestore_v1beta1_MapValue_FieldsEntry_key_tag});
+ writer->WriteTag(
+ {PB_WT_STRING, google_firestore_v1beta1_MapValue_fields_tag});
writer->WriteNestedMessage(
[&kv](Writer* writer) { return EncodeFieldsEntry(writer, kv); });
}
});
}
-ObjectValue DecodeObject(Reader* reader) {
- ObjectValue::Map internal_value = reader->ReadNestedMessage<ObjectValue::Map>(
+ObjectValue::Map DecodeObject(Reader* reader) {
+ if (!reader->status().ok()) return ObjectValue::Map();
+
+ return reader->ReadNestedMessage<ObjectValue::Map>(
[](Reader* reader) -> ObjectValue::Map {
ObjectValue::Map result;
+ if (!reader->status().ok()) return result;
+
while (reader->bytes_left()) {
Tag tag = reader->ReadTag();
+ if (!reader->status().ok()) return result;
FIREBASE_ASSERT(tag.field_number ==
google_firestore_v1beta1_MapValue_fields_tag);
FIREBASE_ASSERT(tag.wire_type == PB_WT_STRING);
@@ -640,6 +327,7 @@ ObjectValue DecodeObject(Reader* reader) {
// map.
// TODO(rsgowman): figure out error handling: We can do better than a
// failed assertion.
+ if (!reader->status().ok()) return result;
FIREBASE_ASSERT(result.find(fv.first) == result.end());
// Add this key,fieldvalue to the results map.
@@ -647,7 +335,64 @@ ObjectValue DecodeObject(Reader* reader) {
}
return result;
});
- return ObjectValue{internal_value};
+}
+
+/**
+ * Creates the prefix for a fully qualified resource path, without a local path
+ * on the end.
+ */
+ResourcePath EncodeDatabaseId(const DatabaseId& database_id) {
+ return ResourcePath{"projects", database_id.project_id(), "databases",
+ database_id.database_id()};
+}
+
+/**
+ * Encodes a databaseId and resource path into the following form:
+ * /projects/$projectId/database/$databaseId/documents/$path
+ */
+std::string EncodeResourceName(const DatabaseId& database_id,
+ const ResourcePath& path) {
+ return EncodeDatabaseId(database_id)
+ .Append("documents")
+ .Append(path)
+ .CanonicalString();
+}
+
+/**
+ * Validates that a path has a prefix that looks like a valid encoded
+ * databaseId.
+ */
+bool IsValidResourceName(const ResourcePath& path) {
+ // Resource names have at least 4 components (project ID, database ID)
+ // and commonly the (root) resource type, e.g. documents
+ return path.size() >= 4 && path[0] == "projects" && path[2] == "databases";
+}
+
+/**
+ * Decodes a fully qualified resource name into a resource path and validates
+ * that there is a project and database encoded in the path. There are no
+ * guarantees that a local path is also encoded in this resource name.
+ */
+ResourcePath DecodeResourceName(absl::string_view encoded) {
+ ResourcePath resource = ResourcePath::FromString(encoded);
+ FIREBASE_ASSERT_MESSAGE(IsValidResourceName(resource),
+ "Tried to deserialize invalid key %s",
+ resource.CanonicalString().c_str());
+ return resource;
+}
+
+/**
+ * Decodes a fully qualified resource name into a resource path and validates
+ * that there is a project and database encoded in the path along with a local
+ * path.
+ */
+ResourcePath ExtractLocalPathFromResourceName(
+ const ResourcePath& resource_name) {
+ FIREBASE_ASSERT_MESSAGE(
+ resource_name.size() > 4 && resource_name[4] == "documents",
+ "Tried to deserialize invalid key %s",
+ resource_name.CanonicalString().c_str());
+ return resource_name.PopFirst(5);
}
} // namespace
@@ -659,9 +404,157 @@ Status Serializer::EncodeFieldValue(const FieldValue& field_value,
return writer.status();
}
-FieldValue Serializer::DecodeFieldValue(const uint8_t* bytes, size_t length) {
+StatusOr<FieldValue> Serializer::DecodeFieldValue(const uint8_t* bytes,
+ size_t length) {
Reader reader = Reader::Wrap(bytes, length);
- return DecodeFieldValueImpl(&reader);
+ FieldValue fv = DecodeFieldValueImpl(&reader);
+ if (reader.status().ok()) {
+ return fv;
+ } else {
+ return reader.status();
+ }
+}
+
+std::string Serializer::EncodeKey(const DocumentKey& key) const {
+ return EncodeResourceName(database_id_, key.path());
+}
+
+DocumentKey Serializer::DecodeKey(absl::string_view name) const {
+ ResourcePath resource = DecodeResourceName(name);
+ FIREBASE_ASSERT_MESSAGE(resource[1] == database_id_.project_id(),
+ "Tried to deserialize key from different project.");
+ FIREBASE_ASSERT_MESSAGE(resource[3] == database_id_.database_id(),
+ "Tried to deserialize key from different database.");
+ return DocumentKey{ExtractLocalPathFromResourceName(resource)};
+}
+
+util::Status Serializer::EncodeDocument(const DocumentKey& key,
+ const ObjectValue& value,
+ std::vector<uint8_t>* out_bytes) const {
+ Writer writer = Writer::Wrap(out_bytes);
+ EncodeDocument(&writer, key, value);
+ return writer.status();
+}
+
+void Serializer::EncodeDocument(Writer* writer,
+ const DocumentKey& key,
+ const ObjectValue& object_value) const {
+ // Encode Document.name
+ writer->WriteTag({PB_WT_STRING, google_firestore_v1beta1_Document_name_tag});
+ writer->WriteString(EncodeKey(key));
+
+ // Encode Document.fields (unless it's empty)
+ if (!object_value.internal_value.empty()) {
+ writer->WriteTag(
+ {PB_WT_STRING, google_firestore_v1beta1_Document_fields_tag});
+ EncodeObject(writer, object_value);
+ }
+
+ // Skip Document.create_time and Document.update_time, since they're
+ // output-only fields.
+}
+
+util::StatusOr<std::unique_ptr<model::MaybeDocument>>
+Serializer::DecodeMaybeDocument(const uint8_t* bytes, size_t length) const {
+ Reader reader = Reader::Wrap(bytes, length);
+ std::unique_ptr<MaybeDocument> maybeDoc =
+ DecodeBatchGetDocumentsResponse(&reader);
+
+ if (reader.status().ok()) {
+ return maybeDoc;
+ } else {
+ return reader.status();
+ }
+}
+
+std::unique_ptr<MaybeDocument> Serializer::DecodeBatchGetDocumentsResponse(
+ Reader* reader) const {
+ Tag tag = reader->ReadTag();
+ if (!reader->status().ok()) return nullptr;
+
+ // Ensure the tag matches the wire type
+ switch (tag.field_number) {
+ case google_firestore_v1beta1_BatchGetDocumentsResponse_found_tag:
+ case google_firestore_v1beta1_BatchGetDocumentsResponse_missing_tag:
+ if (tag.wire_type != PB_WT_STRING) {
+ reader->set_status(
+ Status(FirestoreErrorCode::DataLoss,
+ "Input proto bytes cannot be parsed (mismatch between "
+ "the wiretype and the field number (tag))"));
+ }
+ break;
+
+ default:
+ reader->set_status(Status(
+ FirestoreErrorCode::DataLoss,
+ "Input proto bytes cannot be parsed (invalid field number (tag))"));
+ }
+
+ if (!reader->status().ok()) return nullptr;
+
+ switch (tag.field_number) {
+ case google_firestore_v1beta1_BatchGetDocumentsResponse_found_tag:
+ return reader->ReadNestedMessage<std::unique_ptr<MaybeDocument>>(
+ [this](Reader* reader) -> std::unique_ptr<MaybeDocument> {
+ return DecodeDocument(reader);
+ });
+ case google_firestore_v1beta1_BatchGetDocumentsResponse_missing_tag:
+ // TODO(rsgowman): Right now, we only support Document (and don't support
+ // NoDocument). That should change in the next PR or so.
+ abort();
+ default:
+ // This indicates an internal error as we've already ensured that this is
+ // a valid field_number.
+ FIREBASE_ASSERT_MESSAGE(
+ false,
+ "Somehow got an unexpected field number (tag) after verifying that "
+ "the field number was expected.");
+ }
+}
+
+std::unique_ptr<Document> Serializer::DecodeDocument(Reader* reader) const {
+ if (!reader->status().ok()) return nullptr;
+
+ std::string name;
+ FieldValue fields = FieldValue::ObjectValueFromMap({});
+ SnapshotVersion version = SnapshotVersion::None();
+
+ while (reader->bytes_left()) {
+ Tag tag = reader->ReadTag();
+ if (!reader->status().ok()) return nullptr;
+ FIREBASE_ASSERT(tag.wire_type == PB_WT_STRING);
+ switch (tag.field_number) {
+ case google_firestore_v1beta1_Document_name_tag:
+ name = reader->ReadString();
+ break;
+ case google_firestore_v1beta1_Document_fields_tag:
+ // TODO(rsgowman): Rather than overwriting, we should instead merge with
+ // the existing FieldValue (if any).
+ fields = DecodeFieldValueImpl(reader);
+ break;
+ case google_firestore_v1beta1_Document_create_time_tag:
+ // This field is ignored by the client sdk, but we still need to extract
+ // it.
+ reader->ReadNestedMessage<Timestamp>(DecodeTimestamp);
+ break;
+ case google_firestore_v1beta1_Document_update_time_tag:
+ // TODO(rsgowman): Rather than overwriting, we should instead merge with
+ // the existing SnapshotVersion (if any). Less relevant here, since it's
+ // just two numbers which are both expected to be present, but if the
+ // proto evolves that might change.
+ version = SnapshotVersion{
+ reader->ReadNestedMessage<Timestamp>(DecodeTimestamp)};
+ break;
+ default:
+ // TODO(rsgowman): Error handling. (Invalid tags should fail to decode,
+ // but shouldn't cause a crash.)
+ abort();
+ }
+ }
+
+ return absl::make_unique<Document>(std::move(fields), DecodeKey(name),
+ version,
+ /*has_local_modifications=*/false);
}
} // namespace remote