flatbuffers.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. /*
  2. * Copyright 2014 Google Inc. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef FLATBUFFERS_H_
  17. #define FLATBUFFERS_H_
  18. #include <assert.h>
  19. #include <cstdint>
  20. #include <cstddef>
  21. #include <cstring>
  22. #include <string>
  23. #include <type_traits>
  24. #include <vector>
  25. #include <algorithm>
  26. #if __cplusplus <= 199711L && \
  27. (!defined(_MSC_VER) || _MSC_VER < 1600) && \
  28. (!defined(__GNUC__) || \
  29. (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40603))
  30. #error A C++11 compatible compiler is required for FlatBuffers.
  31. #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
  32. #endif
  33. // The wire format uses a little endian encoding (since that's efficient for
  34. // the common platforms).
  35. #if !defined(FLATBUFFERS_LITTLEENDIAN)
  36. #if defined(__GNUC__) || defined(__clang__)
  37. #ifdef __BIG_ENDIAN__
  38. #define FLATBUFFERS_LITTLEENDIAN 0
  39. #else
  40. #define FLATBUFFERS_LITTLEENDIAN 1
  41. #endif // __BIG_ENDIAN__
  42. #elif defined(_MSC_VER)
  43. #if defined(_M_PPC)
  44. #define FLATBUFFERS_LITTLEENDIAN 0
  45. #else
  46. #define FLATBUFFERS_LITTLEENDIAN 1
  47. #endif
  48. #else
  49. #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN.
  50. #endif
  51. #endif // !defined(FLATBUFFERS_LITTLEENDIAN)
  52. #define FLATBUFFERS_VERSION_MAJOR 1
  53. #define FLATBUFFERS_VERSION_MINOR 0
  54. #define FLATBUFFERS_VERSION_REVISION 0
  55. #define FLATBUFFERS_STRING_EXPAND(X) #X
  56. #define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
  57. namespace flatbuffers {
  58. // Our default offset / size type, 32bit on purpose on 64bit systems.
  59. // Also, using a consistent offset type maintains compatibility of serialized
  60. // offset values between 32bit and 64bit systems.
  61. typedef uint32_t uoffset_t;
  62. // Signed offsets for references that can go in both directions.
  63. typedef int32_t soffset_t;
  64. // Offset/index used in v-tables, can be changed to uint8_t in
  65. // format forks to save a bit of space if desired.
  66. typedef uint16_t voffset_t;
  67. typedef uintmax_t largest_scalar_t;
  68. // Wrapper for uoffset_t to allow safe template specialization.
  69. template<typename T> struct Offset {
  70. uoffset_t o;
  71. Offset() : o(0) {}
  72. Offset(uoffset_t _o) : o(_o) {}
  73. Offset<void> Union() const { return Offset<void>(o); }
  74. };
  75. inline void EndianCheck() {
  76. int endiantest = 1;
  77. // If this fails, see FLATBUFFERS_LITTLEENDIAN above.
  78. assert(*reinterpret_cast<char *>(&endiantest) == FLATBUFFERS_LITTLEENDIAN);
  79. (void)endiantest;
  80. }
  81. template<typename T> T EndianScalar(T t) {
  82. #if FLATBUFFERS_LITTLEENDIAN
  83. return t;
  84. #else
  85. #if defined(_MSC_VER)
  86. #pragma push_macro("__builtin_bswap16")
  87. #pragma push_macro("__builtin_bswap32")
  88. #pragma push_macro("__builtin_bswap64")
  89. #define __builtin_bswap16 _byteswap_ushort
  90. #define __builtin_bswap32 _byteswap_ulong
  91. #define __builtin_bswap64 _byteswap_uint64
  92. #endif
  93. // If you're on the few remaining big endian platforms, we make the bold
  94. // assumption you're also on gcc/clang, and thus have bswap intrinsics:
  95. if (sizeof(T) == 1) { // Compile-time if-then's.
  96. return t;
  97. } else if (sizeof(T) == 2) {
  98. auto r = __builtin_bswap16(*reinterpret_cast<uint16_t *>(&t));
  99. return *reinterpret_cast<T *>(&r);
  100. } else if (sizeof(T) == 4) {
  101. auto r = __builtin_bswap32(*reinterpret_cast<uint32_t *>(&t));
  102. return *reinterpret_cast<T *>(&r);
  103. } else if (sizeof(T) == 8) {
  104. auto r = __builtin_bswap64(*reinterpret_cast<uint64_t *>(&t));
  105. return *reinterpret_cast<T *>(&r);
  106. } else {
  107. assert(0);
  108. }
  109. #if defined(_MSC_VER)
  110. #pragma pop_macro("__builtin_bswap16")
  111. #pragma pop_macro("__builtin_bswap32")
  112. #pragma pop_macro("__builtin_bswap64")
  113. #endif
  114. #endif
  115. }
  116. template<typename T> T ReadScalar(const void *p) {
  117. return EndianScalar(*reinterpret_cast<const T *>(p));
  118. }
  119. template<typename T> void WriteScalar(void *p, T t) {
  120. *reinterpret_cast<T *>(p) = EndianScalar(t);
  121. }
  122. template<typename T> size_t AlignOf() {
  123. #ifdef _MSC_VER
  124. return __alignof(T);
  125. #else
  126. return alignof(T);
  127. #endif
  128. }
  129. // When we read serialized data from memory, in the case of most scalars,
  130. // we want to just read T, but in the case of Offset, we want to actually
  131. // perform the indirection and return a pointer.
  132. // The template specialization below does just that.
  133. // It is wrapped in a struct since function templates can't overload on the
  134. // return type like this.
  135. // The typedef is for the convenience of callers of this function
  136. // (avoiding the need for a trailing return decltype)
  137. template<typename T> struct IndirectHelper {
  138. typedef T return_type;
  139. static const size_t element_stride = sizeof(T);
  140. static return_type Read(const uint8_t *p, uoffset_t i) {
  141. return EndianScalar((reinterpret_cast<const T *>(p))[i]);
  142. }
  143. };
  144. template<typename T> struct IndirectHelper<Offset<T>> {
  145. typedef const T *return_type;
  146. static const size_t element_stride = sizeof(uoffset_t);
  147. static return_type Read(const uint8_t *p, uoffset_t i) {
  148. p += i * sizeof(uoffset_t);
  149. return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
  150. }
  151. };
  152. template<typename T> struct IndirectHelper<const T *> {
  153. typedef const T *return_type;
  154. static const size_t element_stride = sizeof(T);
  155. static return_type Read(const uint8_t *p, uoffset_t i) {
  156. return reinterpret_cast<const T *>(p + i * sizeof(T));
  157. }
  158. };
  159. // An STL compatible iterator implementation for Vector below, effectively
  160. // calling Get() for every element.
  161. template<typename T, bool bConst>
  162. struct VectorIterator : public
  163. std::iterator < std::input_iterator_tag,
  164. typename std::conditional < bConst,
  165. const typename IndirectHelper<T>::return_type,
  166. typename IndirectHelper<T>::return_type > ::type, uoffset_t > {
  167. typedef std::iterator<std::input_iterator_tag,
  168. typename std::conditional<bConst,
  169. const typename IndirectHelper<T>::return_type,
  170. typename IndirectHelper<T>::return_type>::type, uoffset_t> super_type;
  171. public:
  172. VectorIterator(const uint8_t *data, uoffset_t i) :
  173. data_(data + IndirectHelper<T>::element_stride * i) {};
  174. VectorIterator(const VectorIterator &other) : data_(other.data_) {}
  175. VectorIterator(VectorIterator &&other) : data_(std::move(other.data_)) {}
  176. VectorIterator &operator=(const VectorIterator &other) {
  177. data_ = other.data_;
  178. return *this;
  179. }
  180. VectorIterator &operator=(VectorIterator &&other) {
  181. data_ = other.data_;
  182. return *this;
  183. }
  184. bool operator==(const VectorIterator& other) const {
  185. return data_ == other.data_;
  186. }
  187. bool operator!=(const VectorIterator& other) const {
  188. return data_ != other.data_;
  189. }
  190. ptrdiff_t operator-(const VectorIterator& other) const {
  191. return (data_ - other.data_) / IndirectHelper<T>::element_stride;
  192. }
  193. typename super_type::value_type operator *() const {
  194. return IndirectHelper<T>::Read(data_, 0);
  195. }
  196. typename super_type::value_type operator->() const {
  197. return IndirectHelper<T>::Read(data_, 0);
  198. }
  199. VectorIterator &operator++() {
  200. data_ += IndirectHelper<T>::element_stride;
  201. return *this;
  202. }
  203. VectorIterator operator++(int) {
  204. VectorIterator temp(data_);
  205. data_ += IndirectHelper<T>::element_stride;
  206. return temp;
  207. }
  208. private:
  209. const uint8_t *data_;
  210. };
  211. // This is used as a helper type for accessing vectors.
  212. // Vector::data() assumes the vector elements start after the length field.
  213. template<typename T> class Vector {
  214. public:
  215. typedef VectorIterator<T, false> iterator;
  216. typedef VectorIterator<T, true> const_iterator;
  217. uoffset_t size() const { return EndianScalar(length_); }
  218. // Deprecated: use size(). Here for backwards compatibility.
  219. uoffset_t Length() const { return size(); }
  220. typedef typename IndirectHelper<T>::return_type return_type;
  221. return_type Get(uoffset_t i) const {
  222. assert(i < size());
  223. return IndirectHelper<T>::Read(Data(), i);
  224. }
  225. // If this is a Vector of enums, T will be its storage type, not the enum
  226. // type. This function makes it convenient to retrieve value with enum
  227. // type E.
  228. template<typename E> E GetEnum(uoffset_t i) const {
  229. return static_cast<E>(Get(i));
  230. }
  231. const void *GetStructFromOffset(size_t o) const {
  232. return reinterpret_cast<const void *>(Data() + o);
  233. }
  234. iterator begin() { return iterator(Data(), 0); }
  235. const_iterator begin() const { return const_iterator(Data(), 0); }
  236. iterator end() { return iterator(Data(), length_); }
  237. const_iterator end() const { return const_iterator(Data(), length_); }
  238. // The raw data in little endian format. Use with care.
  239. const uint8_t *Data() const {
  240. return reinterpret_cast<const uint8_t *>(&length_ + 1);
  241. }
  242. protected:
  243. // This class is only used to access pre-existing data. Don't ever
  244. // try to construct these manually.
  245. Vector();
  246. uoffset_t length_;
  247. };
  248. struct String : public Vector<char> {
  249. const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
  250. };
  251. // Simple indirection for buffer allocation, to allow this to be overridden
  252. // with custom allocation (see the FlatBufferBuilder constructor).
  253. class simple_allocator {
  254. public:
  255. virtual ~simple_allocator(){}
  256. virtual uint8_t *allocate(size_t size) const { return new uint8_t[size]; }
  257. virtual void deallocate(uint8_t *p) const { delete[] p; }
  258. };
  259. // This is a minimal replication of std::vector<uint8_t> functionality,
  260. // except growing from higher to lower addresses. i.e push_back() inserts data
  261. // in the lowest address in the vector.
  262. class vector_downward {
  263. public:
  264. explicit vector_downward(size_t initial_size,
  265. const simple_allocator &allocator)
  266. : reserved_(initial_size),
  267. buf_(allocator.allocate(reserved_)),
  268. cur_(buf_ + reserved_),
  269. allocator_(allocator) {
  270. assert((initial_size & (sizeof(largest_scalar_t) - 1)) == 0);
  271. }
  272. ~vector_downward() { allocator_.deallocate(buf_); }
  273. void clear() { cur_ = buf_ + reserved_; }
  274. size_t growth_policy(size_t bytes) {
  275. return (bytes / 2) & ~(sizeof(largest_scalar_t) - 1);
  276. }
  277. uint8_t *make_space(size_t len) {
  278. if (buf_ > cur_ - len) {
  279. auto old_size = size();
  280. reserved_ += std::max(len, growth_policy(reserved_));
  281. auto new_buf = allocator_.allocate(reserved_);
  282. auto new_cur = new_buf + reserved_ - old_size;
  283. memcpy(new_cur, cur_, old_size);
  284. cur_ = new_cur;
  285. allocator_.deallocate(buf_);
  286. buf_ = new_buf;
  287. }
  288. cur_ -= len;
  289. // Beyond this, signed offsets may not have enough range:
  290. // (FlatBuffers > 2GB not supported).
  291. assert(size() < (1UL << (sizeof(soffset_t) * 8 - 1)) - 1);
  292. return cur_;
  293. }
  294. uoffset_t size() const {
  295. return static_cast<uoffset_t>(reserved_ - (cur_ - buf_));
  296. }
  297. uint8_t *data() const { return cur_; }
  298. uint8_t *data_at(size_t offset) { return buf_ + reserved_ - offset; }
  299. // push() & fill() are most frequently called with small byte counts (<= 4),
  300. // which is why we're using loops rather than calling memcpy/memset.
  301. void push(const uint8_t *bytes, size_t num) {
  302. auto dest = make_space(num);
  303. for (size_t i = 0; i < num; i++) dest[i] = bytes[i];
  304. }
  305. void fill(size_t zero_pad_bytes) {
  306. auto dest = make_space(zero_pad_bytes);
  307. for (size_t i = 0; i < zero_pad_bytes; i++) dest[i] = 0;
  308. }
  309. void pop(size_t bytes_to_remove) { cur_ += bytes_to_remove; }
  310. private:
  311. // You shouldn't really be copying instances of this class.
  312. vector_downward(const vector_downward &);
  313. vector_downward &operator=(const vector_downward &);
  314. size_t reserved_;
  315. uint8_t *buf_;
  316. uint8_t *cur_; // Points at location between empty (below) and used (above).
  317. const simple_allocator &allocator_;
  318. };
  319. // Converts a Field ID to a virtual table offset.
  320. inline voffset_t FieldIndexToOffset(voffset_t field_id) {
  321. // Should correspond to what EndTable() below builds up.
  322. const int fixed_fields = 2; // Vtable size and Object Size.
  323. return (field_id + fixed_fields) * sizeof(voffset_t);
  324. }
  325. // Computes how many bytes you'd have to pad to be able to write an
  326. // "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
  327. // memory).
  328. inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
  329. return ((~buf_size) + 1) & (scalar_size - 1);
  330. }
  331. // Helper class to hold data needed in creation of a flat buffer.
  332. // To serialize data, you typically call one of the Create*() functions in
  333. // the generated code, which in turn call a sequence of StartTable/PushElement/
  334. // AddElement/EndTable, or the builtin CreateString/CreateVector functions.
  335. // Do this is depth-first order to build up a tree to the root.
  336. // Finish() wraps up the buffer ready for transport.
  337. class FlatBufferBuilder {
  338. public:
  339. explicit FlatBufferBuilder(uoffset_t initial_size = 1024,
  340. const simple_allocator *allocator = nullptr)
  341. : buf_(initial_size, allocator ? *allocator : default_allocator),
  342. minalign_(1), force_defaults_(false) {
  343. offsetbuf_.reserve(16); // Avoid first few reallocs.
  344. vtables_.reserve(16);
  345. EndianCheck();
  346. }
  347. // Reset all the state in this FlatBufferBuilder so it can be reused
  348. // to construct another buffer.
  349. void Clear() {
  350. buf_.clear();
  351. offsetbuf_.clear();
  352. vtables_.clear();
  353. }
  354. // The current size of the serialized buffer, counting from the end.
  355. uoffset_t GetSize() const { return buf_.size(); }
  356. // Get the serialized buffer (after you call Finish()).
  357. uint8_t *GetBufferPointer() const { return buf_.data(); }
  358. void ForceDefaults(bool fd) { force_defaults_ = fd; }
  359. void Pad(size_t num_bytes) { buf_.fill(num_bytes); }
  360. void Align(size_t elem_size) {
  361. if (elem_size > minalign_) minalign_ = elem_size;
  362. buf_.fill(PaddingBytes(buf_.size(), elem_size));
  363. }
  364. void PushBytes(const uint8_t *bytes, size_t size) {
  365. buf_.push(bytes, size);
  366. }
  367. void PopBytes(size_t amount) { buf_.pop(amount); }
  368. template<typename T> void AssertScalarT() {
  369. // The code assumes power of 2 sizes and endian-swap-ability.
  370. static_assert(std::is_scalar<T>::value
  371. // The Offset<T> type is essentially a scalar but fails is_scalar.
  372. || sizeof(T) == sizeof(Offset<void>),
  373. "T must be a scalar type");
  374. }
  375. // Write a single aligned scalar to the buffer
  376. template<typename T> uoffset_t PushElement(T element) {
  377. AssertScalarT<T>();
  378. T litle_endian_element = EndianScalar(element);
  379. Align(sizeof(T));
  380. PushBytes(reinterpret_cast<uint8_t *>(&litle_endian_element), sizeof(T));
  381. return GetSize();
  382. }
  383. template<typename T> uoffset_t PushElement(Offset<T> off) {
  384. // Special case for offsets: see ReferTo below.
  385. return PushElement(ReferTo(off.o));
  386. }
  387. // When writing fields, we track where they are, so we can create correct
  388. // vtables later.
  389. void TrackField(voffset_t field, uoffset_t off) {
  390. FieldLoc fl = { off, field };
  391. offsetbuf_.push_back(fl);
  392. }
  393. // Like PushElement, but additionally tracks the field this represents.
  394. template<typename T> void AddElement(voffset_t field, T e, T def) {
  395. // We don't serialize values equal to the default.
  396. if (e == def && !force_defaults_) return;
  397. auto off = PushElement(e);
  398. TrackField(field, off);
  399. }
  400. template<typename T> void AddOffset(voffset_t field, Offset<T> off) {
  401. if (!off.o) return; // An offset of 0 means NULL, don't store.
  402. AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
  403. }
  404. template<typename T> void AddStruct(voffset_t field, const T *structptr) {
  405. if (!structptr) return; // Default, don't store.
  406. Align(AlignOf<T>());
  407. PushBytes(reinterpret_cast<const uint8_t *>(structptr), sizeof(T));
  408. TrackField(field, GetSize());
  409. }
  410. void AddStructOffset(voffset_t field, uoffset_t off) {
  411. TrackField(field, off);
  412. }
  413. // Offsets initially are relative to the end of the buffer (downwards).
  414. // This function converts them to be relative to the current location
  415. // in the buffer (when stored here), pointing upwards.
  416. uoffset_t ReferTo(uoffset_t off) {
  417. Align(sizeof(uoffset_t)); // To ensure GetSize() below is correct.
  418. assert(off <= GetSize()); // Must refer to something already in buffer.
  419. return GetSize() - off + sizeof(uoffset_t);
  420. }
  421. void NotNested() {
  422. // If you hit this, you're trying to construct an object when another
  423. // hasn't finished yet.
  424. assert(!offsetbuf_.size());
  425. }
  426. // From generated code (or from the parser), we call StartTable/EndTable
  427. // with a sequence of AddElement calls in between.
  428. uoffset_t StartTable() {
  429. NotNested();
  430. return GetSize();
  431. }
  432. // This finishes one serialized object by generating the vtable if it's a
  433. // table, comparing it against existing vtables, and writing the
  434. // resulting vtable offset.
  435. uoffset_t EndTable(uoffset_t start, voffset_t numfields) {
  436. // Write the vtable offset, which is the start of any Table.
  437. // We fill it's value later.
  438. auto vtableoffsetloc = PushElement<uoffset_t>(0);
  439. // Write a vtable, which consists entirely of voffset_t elements.
  440. // It starts with the number of offsets, followed by a type id, followed
  441. // by the offsets themselves. In reverse:
  442. buf_.fill(numfields * sizeof(voffset_t));
  443. auto table_object_size = vtableoffsetloc - start;
  444. assert(table_object_size < 0x10000); // Vtable use 16bit offsets.
  445. PushElement<voffset_t>(static_cast<voffset_t>(table_object_size));
  446. PushElement<voffset_t>(FieldIndexToOffset(numfields));
  447. // Write the offsets into the table
  448. for (auto field_location = offsetbuf_.begin();
  449. field_location != offsetbuf_.end();
  450. ++field_location) {
  451. auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
  452. // If this asserts, it means you've set a field twice.
  453. assert(!ReadScalar<voffset_t>(buf_.data() + field_location->id));
  454. WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
  455. }
  456. offsetbuf_.clear();
  457. auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
  458. auto vt1_size = ReadScalar<voffset_t>(vt1);
  459. auto vt_use = GetSize();
  460. // See if we already have generated a vtable with this exact same
  461. // layout before. If so, make it point to the old one, remove this one.
  462. for (auto it = vtables_.begin(); it != vtables_.end(); ++it) {
  463. if (memcmp(buf_.data_at(*it), vt1, vt1_size)) continue;
  464. vt_use = *it;
  465. buf_.pop(GetSize() - vtableoffsetloc);
  466. break;
  467. }
  468. // If this is a new vtable, remember it.
  469. if (vt_use == GetSize()) {
  470. vtables_.push_back(vt_use);
  471. }
  472. // Fill the vtable offset we created above.
  473. // The offset points from the beginning of the object to where the
  474. // vtable is stored.
  475. // Offsets default direction is downward in memory for future format
  476. // flexibility (storing all vtables at the start of the file).
  477. WriteScalar(buf_.data_at(vtableoffsetloc),
  478. static_cast<soffset_t>(vt_use) -
  479. static_cast<soffset_t>(vtableoffsetloc));
  480. return vtableoffsetloc;
  481. }
  482. // This checks a required field has been set in a given table that has
  483. // just been constructed.
  484. template<typename T> void Required(Offset<T> table, voffset_t field) {
  485. auto table_ptr = buf_.data_at(table.o);
  486. auto vtable_ptr = table_ptr - ReadScalar<uoffset_t>(table_ptr);
  487. bool ok = ReadScalar<voffset_t>(vtable_ptr + field) != 0;
  488. // If this fails, the caller will show what field needs to be set.
  489. assert(ok);
  490. (void)ok;
  491. }
  492. uoffset_t StartStruct(size_t alignment) {
  493. Align(alignment);
  494. return GetSize();
  495. }
  496. uoffset_t EndStruct() { return GetSize(); }
  497. void ClearOffsets() { offsetbuf_.clear(); }
  498. // Aligns such that when "len" bytes are written, an object can be written
  499. // after it with "alignment" without padding.
  500. void PreAlign(size_t len, size_t alignment) {
  501. buf_.fill(PaddingBytes(GetSize() + len, alignment));
  502. }
  503. template<typename T> void PreAlign(size_t len) {
  504. AssertScalarT<T>();
  505. PreAlign(len, sizeof(T));
  506. }
  507. // Functions to store strings, which are allowed to contain any binary data.
  508. Offset<String> CreateString(const char *str, size_t len) {
  509. NotNested();
  510. PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
  511. buf_.fill(1);
  512. PushBytes(reinterpret_cast<const uint8_t *>(str), len);
  513. PushElement(static_cast<uoffset_t>(len));
  514. return Offset<String>(GetSize());
  515. }
  516. Offset<String> CreateString(const char *str) {
  517. return CreateString(str, strlen(str));
  518. }
  519. Offset<String> CreateString(const std::string &str) {
  520. return CreateString(str.c_str(), str.length());
  521. }
  522. uoffset_t EndVector(size_t len) {
  523. return PushElement(static_cast<uoffset_t>(len));
  524. }
  525. void StartVector(size_t len, size_t elemsize) {
  526. PreAlign<uoffset_t>(len * elemsize);
  527. PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t.
  528. }
  529. uint8_t *ReserveElements(size_t len, size_t elemsize) {
  530. return buf_.make_space(len * elemsize);
  531. }
  532. template<typename T> Offset<Vector<T>> CreateVector(const T *v, size_t len) {
  533. NotNested();
  534. StartVector(len, sizeof(T));
  535. for (auto i = len; i > 0; ) {
  536. PushElement(v[--i]);
  537. }
  538. return Offset<Vector<T>>(EndVector(len));
  539. }
  540. // Specialized version for non-copying use cases. Data to be written later.
  541. // After calling this function, GetBufferPointer() can be cast to the
  542. // corresponding Vector<> type to write the data (through Data()).
  543. template<typename T> Offset<Vector<T>> CreateUninitializedVector(size_t len) {
  544. NotNested();
  545. StartVector(len, sizeof(T));
  546. buf_.make_space(len * sizeof(T));
  547. return Offset<Vector<T>>(EndVector(len));
  548. }
  549. template<typename T> Offset<Vector<T>> CreateVector(const std::vector<T> &v){
  550. return CreateVector(v.data(), v.size());
  551. }
  552. template<typename T> Offset<Vector<const T *>> CreateVectorOfStructs(
  553. const T *v, size_t len) {
  554. NotNested();
  555. StartVector(len * sizeof(T) / AlignOf<T>(), AlignOf<T>());
  556. PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
  557. return Offset<Vector<const T *>>(EndVector(len));
  558. }
  559. template<typename T> Offset<Vector<const T *>> CreateVectorOfStructs(
  560. const std::vector<T> &v) {
  561. return CreateVectorOfStructs(v.data(), v.size());
  562. }
  563. static const size_t kFileIdentifierLength = 4;
  564. // Finish serializing a buffer by writing the root offset.
  565. // If a file_identifier is given, the buffer will be prefix with a standard
  566. // FlatBuffers file header.
  567. template<typename T> void Finish(Offset<T> root,
  568. const char *file_identifier = nullptr) {
  569. // This will cause the whole buffer to be aligned.
  570. PreAlign(sizeof(uoffset_t) + (file_identifier ? kFileIdentifierLength : 0),
  571. minalign_);
  572. if (file_identifier) {
  573. assert(strlen(file_identifier) == kFileIdentifierLength);
  574. buf_.push(reinterpret_cast<const uint8_t *>(file_identifier),
  575. kFileIdentifierLength);
  576. }
  577. PushElement(ReferTo(root.o)); // Location of root.
  578. }
  579. private:
  580. // You shouldn't really be copying instances of this class.
  581. FlatBufferBuilder(const FlatBufferBuilder &);
  582. FlatBufferBuilder &operator=(const FlatBufferBuilder &);
  583. struct FieldLoc {
  584. uoffset_t off;
  585. voffset_t id;
  586. };
  587. simple_allocator default_allocator;
  588. vector_downward buf_;
  589. // Accumulating offsets of table members while it is being built.
  590. std::vector<FieldLoc> offsetbuf_;
  591. std::vector<uoffset_t> vtables_; // todo: Could make this into a map?
  592. size_t minalign_;
  593. bool force_defaults_; // Serialize values equal to their defaults anyway.
  594. };
  595. // Helper to get a typed pointer to the root object contained in the buffer.
  596. template<typename T> const T *GetRoot(const void *buf) {
  597. EndianCheck();
  598. return reinterpret_cast<const T *>(reinterpret_cast<const uint8_t *>(buf) +
  599. EndianScalar(*reinterpret_cast<const uoffset_t *>(buf)));
  600. }
  601. // Helper to see if the identifier in a buffer has the expected value.
  602. inline bool BufferHasIdentifier(const void *buf, const char *identifier) {
  603. return strncmp(reinterpret_cast<const char *>(buf) + sizeof(uoffset_t),
  604. identifier, FlatBufferBuilder::kFileIdentifierLength) == 0;
  605. }
  606. // Helper class to verify the integrity of a FlatBuffer
  607. class Verifier {
  608. public:
  609. Verifier(const uint8_t *buf, size_t buf_len, size_t _max_depth = 64,
  610. size_t _max_tables = 1000000)
  611. : buf_(buf), end_(buf + buf_len), depth_(0), max_depth_(_max_depth),
  612. num_tables_(0), max_tables_(_max_tables)
  613. {}
  614. // Central location where any verification failures register.
  615. bool Check(bool ok) const {
  616. #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
  617. assert(ok);
  618. #endif
  619. return ok;
  620. }
  621. // Verify any range within the buffer.
  622. bool Verify(const void *elem, size_t elem_len) const {
  623. return Check(elem >= buf_ && elem <= end_ - elem_len);
  624. }
  625. // Verify a range indicated by sizeof(T).
  626. template<typename T> bool Verify(const void *elem) const {
  627. return Verify(elem, sizeof(T));
  628. }
  629. // Verify a pointer (may be NULL) of a table type.
  630. template<typename T> bool VerifyTable(const T *table) {
  631. return !table || table->Verify(*this);
  632. }
  633. // Verify a pointer (may be NULL) of any vector type.
  634. template<typename T> bool Verify(const Vector<T> *vec) const {
  635. const uint8_t *end;
  636. return !vec ||
  637. VerifyVector(reinterpret_cast<const uint8_t *>(vec), sizeof(T),
  638. &end);
  639. }
  640. // Verify a pointer (may be NULL) to string.
  641. bool Verify(const String *str) const {
  642. const uint8_t *end;
  643. return !str ||
  644. (VerifyVector(reinterpret_cast<const uint8_t *>(str), 1, &end) &&
  645. Verify(end, 1) && // Must have terminator
  646. Check(*end == '\0')); // Terminating byte must be 0.
  647. }
  648. // Common code between vectors and strings.
  649. bool VerifyVector(const uint8_t *vec, size_t elem_size,
  650. const uint8_t **end) const {
  651. // Check we can read the size field.
  652. if (!Verify<uoffset_t>(vec)) return false;
  653. // Check the whole array. If this is a string, the byte past the array
  654. // must be 0.
  655. auto size = ReadScalar<uoffset_t>(vec);
  656. auto byte_size = sizeof(size) + elem_size * size;
  657. *end = vec + byte_size;
  658. return Verify(vec, byte_size);
  659. }
  660. // Special case for string contents, after the above has been called.
  661. bool VerifyVectorOfStrings(const Vector<Offset<String>> *vec) const {
  662. if (vec) {
  663. for (uoffset_t i = 0; i < vec->size(); i++) {
  664. if (!Verify(vec->Get(i))) return false;
  665. }
  666. }
  667. return true;
  668. }
  669. // Special case for table contents, after the above has been called.
  670. template<typename T> bool VerifyVectorOfTables(const Vector<Offset<T>> *vec) {
  671. if (vec) {
  672. for (uoffset_t i = 0; i < vec->size(); i++) {
  673. if (!vec->Get(i)->Verify(*this)) return false;
  674. }
  675. }
  676. return true;
  677. }
  678. // Verify this whole buffer, starting with root type T.
  679. template<typename T> bool VerifyBuffer() {
  680. // Call T::Verify, which must be in the generated code for this type.
  681. return Verify<uoffset_t>(buf_) &&
  682. reinterpret_cast<const T *>(buf_ + ReadScalar<uoffset_t>(buf_))->
  683. Verify(*this);
  684. }
  685. // Called at the start of a table to increase counters measuring data
  686. // structure depth and amount, and possibly bails out with false if
  687. // limits set by the constructor have been hit. Needs to be balanced
  688. // with EndTable().
  689. bool VerifyComplexity() {
  690. depth_++;
  691. num_tables_++;
  692. return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_);
  693. }
  694. // Called at the end of a table to pop the depth count.
  695. bool EndTable() {
  696. depth_--;
  697. return true;
  698. }
  699. private:
  700. const uint8_t *buf_;
  701. const uint8_t *end_;
  702. size_t depth_;
  703. size_t max_depth_;
  704. size_t num_tables_;
  705. size_t max_tables_;
  706. };
  707. // "structs" are flat structures that do not have an offset table, thus
  708. // always have all members present and do not support forwards/backwards
  709. // compatible extensions.
  710. class Struct {
  711. public:
  712. template<typename T> T GetField(uoffset_t o) const {
  713. return ReadScalar<T>(&data_[o]);
  714. }
  715. template<typename T> T GetPointer(uoffset_t o) const {
  716. auto p = &data_[o];
  717. return reinterpret_cast<T>(p + ReadScalar<uoffset_t>(p));
  718. }
  719. template<typename T> T GetStruct(uoffset_t o) const {
  720. return reinterpret_cast<T>(&data_[o]);
  721. }
  722. private:
  723. uint8_t data_[1];
  724. };
  725. // "tables" use an offset table (possibly shared) that allows fields to be
  726. // omitted and added at will, but uses an extra indirection to read.
  727. class Table {
  728. public:
  729. // This gets the field offset for any of the functions below it, or 0
  730. // if the field was not present.
  731. voffset_t GetOptionalFieldOffset(voffset_t field) const {
  732. // The vtable offset is always at the start.
  733. auto vtable = data_ - ReadScalar<soffset_t>(data_);
  734. // The first element is the size of the vtable (fields + type id + itself).
  735. auto vtsize = ReadScalar<voffset_t>(vtable);
  736. // If the field we're accessing is outside the vtable, we're reading older
  737. // data, so it's the same as if the offset was 0 (not present).
  738. return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
  739. }
  740. template<typename T> T GetField(voffset_t field, T defaultval) const {
  741. auto field_offset = GetOptionalFieldOffset(field);
  742. return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
  743. }
  744. template<typename P> P GetPointer(voffset_t field) const {
  745. auto field_offset = GetOptionalFieldOffset(field);
  746. auto p = data_ + field_offset;
  747. return field_offset
  748. ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
  749. : nullptr;
  750. }
  751. template<typename P> P GetStruct(voffset_t field) const {
  752. auto field_offset = GetOptionalFieldOffset(field);
  753. return field_offset ? reinterpret_cast<P>(data_ + field_offset) : nullptr;
  754. }
  755. template<typename T> void SetField(voffset_t field, T val) {
  756. auto field_offset = GetOptionalFieldOffset(field);
  757. // If this asserts, you're trying to set a field that's not there
  758. // (or should we return a bool instead?).
  759. // check if it exists first using CheckField()
  760. assert(field_offset);
  761. WriteScalar(data_ + field_offset, val);
  762. }
  763. bool CheckField(voffset_t field) const {
  764. return GetOptionalFieldOffset(field) != 0;
  765. }
  766. // Verify the vtable of this table.
  767. // Call this once per table, followed by VerifyField once per field.
  768. bool VerifyTableStart(Verifier &verifier) const {
  769. // Check the vtable offset.
  770. if (!verifier.Verify<soffset_t>(data_)) return false;
  771. auto vtable = data_ - ReadScalar<soffset_t>(data_);
  772. // Check the vtable size field, then check vtable fits in its entirety.
  773. return verifier.VerifyComplexity() &&
  774. verifier.Verify<voffset_t>(vtable) &&
  775. verifier.Verify(vtable, ReadScalar<voffset_t>(vtable));
  776. }
  777. // Verify a particular field.
  778. template<typename T> bool VerifyField(const Verifier &verifier,
  779. voffset_t field) const {
  780. // Calling GetOptionalFieldOffset should be safe now thanks to
  781. // VerifyTable().
  782. auto field_offset = GetOptionalFieldOffset(field);
  783. // Check the actual field.
  784. return !field_offset || verifier.Verify<T>(data_ + field_offset);
  785. }
  786. // VerifyField for required fields.
  787. template<typename T> bool VerifyFieldRequired(const Verifier &verifier,
  788. voffset_t field) const {
  789. auto field_offset = GetOptionalFieldOffset(field);
  790. return verifier.Check(field_offset != 0) &&
  791. verifier.Verify<T>(data_ + field_offset);
  792. }
  793. private:
  794. // private constructor & copy constructor: you obtain instances of this
  795. // class by pointing to existing data only
  796. Table();
  797. Table(const Table &other);
  798. uint8_t data_[1];
  799. };
  800. // Utility function for reverse lookups on the EnumNames*() functions
  801. // (in the generated C++ code)
  802. // names must be NULL terminated.
  803. inline int LookupEnum(const char **names, const char *name) {
  804. for (const char **p = names; *p; p++)
  805. if (!strcmp(*p, name))
  806. return static_cast<int>(p - names);
  807. return -1;
  808. }
  809. // These macros allow us to layout a struct with a guarantee that they'll end
  810. // up looking the same on different compilers and platforms.
  811. // It does this by disallowing the compiler to do any padding, and then
  812. // does padding itself by inserting extra padding fields that make every
  813. // element aligned to its own size.
  814. // Additionally, it manually sets the alignment of the struct as a whole,
  815. // which is typically its largest element, or a custom size set in the schema
  816. // by the force_align attribute.
  817. // These are used in the generated code only.
  818. #if defined(_MSC_VER)
  819. #define MANUALLY_ALIGNED_STRUCT(alignment) \
  820. __pragma(pack(1)); \
  821. struct __declspec(align(alignment))
  822. #define STRUCT_END(name, size) \
  823. __pragma(pack()); \
  824. static_assert(sizeof(name) == size, "compiler breaks packing rules")
  825. #elif defined(__GNUC__) || defined(__clang__)
  826. #define MANUALLY_ALIGNED_STRUCT(alignment) \
  827. _Pragma("pack(1)") \
  828. struct __attribute__((aligned(alignment)))
  829. #define STRUCT_END(name, size) \
  830. _Pragma("pack()") \
  831. static_assert(sizeof(name) == size, "compiler breaks packing rules")
  832. #else
  833. #error Unknown compiler, please define structure alignment macros
  834. #endif
  835. // String which identifies the current version of FlatBuffers.
  836. // flatbuffer_version_string is used by Google developers to identify which
  837. // applications uploaded to Google Play are using this library. This allows
  838. // the development team at Google to determine the popularity of the library.
  839. // How it works: Applications that are uploaded to the Google Play Store are
  840. // scanned for this version string. We track which applications are using it
  841. // to measure popularity. You are free to remove it (of course) but we would
  842. // appreciate if you left it in.
  843. // Weak linkage is culled by VS & doesn't work on cygwin.
  844. #if !defined(_WIN32) && !defined(__CYGWIN__)
  845. extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
  846. volatile __attribute__((weak)) const char *flatbuffer_version_string =
  847. "FlatBuffers "
  848. FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
  849. FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
  850. FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
  851. #endif // !defined(_WIN32) && !defined(__CYGWIN__)
  852. } // namespace flatbuffers
  853. #endif // FLATBUFFERS_H_