1 /*************************************************************************
3 * Copyright 2016 Realm Inc.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 **************************************************************************/
19 #ifndef REALM_ALLOC_HPP
20 #define REALM_ALLOC_HPP
26 #include <realm/util/features.h>
27 #include <realm/util/terminate.hpp>
28 #include <realm/util/assert.hpp>
36 using ref_type = size_t;
38 int_fast64_t from_ref(ref_type) noexcept;
39 ref_type to_ref(int_fast64_t) noexcept;
40 int64_t to_int64(size_t value) noexcept;
47 MemRef(char* addr, ref_type ref, Allocator& alloc) noexcept;
48 MemRef(ref_type ref, Allocator& alloc) noexcept;
52 void set_ref(ref_type ref);
53 void set_addr(char* addr);
58 #if REALM_ENABLE_MEMDEBUG
59 // Allocator that created m_ref. Used to verify that the ref is valid whenever you call
60 // get_ref()/get_addr and that it e.g. has not been free'ed
61 const Allocator* m_alloc = nullptr;
66 /// The common interface for Realm allocators.
68 /// A Realm allocator must associate a 'ref' to each allocated
69 /// object and be able to efficiently map any 'ref' to the
70 /// corresponding memory address. The 'ref' is an integer and it must
71 /// always be divisible by 8. Also, a value of zero is used to
72 /// indicate a null-reference, and must therefore never be returned by
73 /// Allocator::alloc().
75 /// The purpose of the 'refs' is to decouple the memory reference from
76 /// the actual address and thereby allowing objects to be relocated in
77 /// memory without having to modify stored references.
82 /// The specified size must be divisible by 8, and must not be
85 /// \throw std::bad_alloc If insufficient memory was available.
86 MemRef alloc(size_t size);
88 /// Calls do_realloc().
90 /// Note: The underscore has been added because the name `realloc`
91 /// would conflict with a macro on the Windows platform.
92 MemRef realloc_(ref_type, const char* addr, size_t old_size, size_t new_size);
96 /// Note: The underscore has been added because the name `free
97 /// would conflict with a macro on the Windows platform.
98 void free_(ref_type, const char* addr) noexcept;
100 /// Shorthand for free_(mem.get_ref(), mem.get_addr()).
101 void free_(MemRef mem) noexcept;
103 /// Calls do_translate().
104 char* translate(ref_type ref) const noexcept;
106 /// Returns true if, and only if the object at the specified 'ref'
107 /// is in the immutable part of the memory managed by this
108 /// allocator. The method by which some objects become part of the
109 /// immuatble part is entirely up to the class that implements
111 bool is_read_only(ref_type) const noexcept;
113 /// Returns a simple allocator that can be used with free-standing
114 /// Realm objects (such as a free-standing table). A
115 /// free-standing object is one that is not part of a Group, and
116 /// therefore, is not part of an actual database.
117 static Allocator& get_default() noexcept;
119 virtual ~Allocator() noexcept;
121 // Disable copying. Copying an allocator can produce double frees.
122 Allocator(const Allocator&) = delete;
123 Allocator& operator=(const Allocator&) = delete;
125 virtual void verify() const = 0;
128 /// Terminate the program precisely when the specified 'ref' is
129 /// freed (or reallocated). You can use this to detect whether the
130 /// ref is freed (or reallocated), and even to get a stacktrace at
131 /// the point where it happens. Call watch(0) to stop watching
133 void watch(ref_type ref)
139 Replication* get_replication() noexcept;
142 size_t m_baseline = 0; // Separation line between immutable and mutable refs.
144 Replication* m_replication = nullptr;
146 ref_type m_debug_watch = 0;
148 /// The specified size must be divisible by 8, and must not be
151 /// \throw std::bad_alloc If insufficient memory was available.
152 virtual MemRef do_alloc(const size_t size) = 0;
154 /// The specified size must be divisible by 8, and must not be
157 /// The default version of this function simply allocates a new
158 /// chunk of memory, copies over the old contents, and then frees
161 /// \throw std::bad_alloc If insufficient memory was available.
162 virtual MemRef do_realloc(ref_type, const char* addr, size_t old_size, size_t new_size) = 0;
164 /// Release the specified chunk of memory.
165 virtual void do_free(ref_type, const char* addr) noexcept = 0;
167 /// Map the specified \a ref to the corresponding memory
168 /// address. Note that if is_read_only(ref) returns true, then the
169 /// referenced object is to be considered immutable, and it is
170 /// then entirely the responsibility of the caller that the memory
171 /// is not modified by way of the returned memory pointer.
172 virtual char* do_translate(ref_type ref) const noexcept = 0;
174 Allocator() noexcept;
176 // FIXME: This really doesn't belong in an allocator, but it is the best
177 // place for now, because every table has a pointer leading here. It would
178 // be more obvious to place it in Group, but that would add a runtime overhead,
179 // and access is time critical.
181 // This means that multiple threads that allocate Realm objects through the
182 // default allocator will share this variable, which is a logical design flaw
183 // that can make sync_if_needed() re-run queries even though it is not required.
184 // It must be atomic because it's shared.
185 std::atomic<uint_fast64_t> m_table_versioning_counter;
186 std::atomic<uint_fast64_t> m_latest_observed_counter;
188 /// Bump the global version counter. This method should be called when
189 /// version bumping is initiated. Then following calls to should_propagate_version()
190 /// can be used to prune the version bumping.
191 void bump_global_version() noexcept;
193 /// Determine if the "local_version" is out of sync, so that it should
194 /// be updated. In that case: also update it. Called from Table::bump_version
195 /// to control propagation of version updates on tables within the group.
196 bool should_propagate_version(uint_fast64_t& local_version) noexcept;
198 /// Note the current global version has been observed.
199 void observe_version() noexcept;
205 inline void Allocator::bump_global_version() noexcept
207 if (m_latest_observed_counter == m_table_versioning_counter)
208 m_table_versioning_counter += 1;
212 inline void Allocator::observe_version() noexcept
214 if (m_latest_observed_counter != m_table_versioning_counter)
215 m_latest_observed_counter.store(m_table_versioning_counter, std::memory_order_relaxed);
219 inline bool Allocator::should_propagate_version(uint_fast64_t& local_version) noexcept
221 if (local_version != m_table_versioning_counter) {
222 local_version = m_table_versioning_counter;
233 inline int_fast64_t from_ref(ref_type v) noexcept
235 // Check that v is divisible by 8 (64-bit aligned).
236 REALM_ASSERT_DEBUG(v % 8 == 0);
238 static_assert(std::is_same<ref_type, size_t>::value,
239 "If ref_type changes, from_ref and to_ref should probably be updated");
241 // Make sure that we preserve the bit pattern of the ref_type (without sign extension).
242 return util::from_twos_compl<int_fast64_t>(uint_fast64_t(v));
245 inline ref_type to_ref(int_fast64_t v) noexcept
247 // Check that v is divisible by 8 (64-bit aligned).
248 REALM_ASSERT_DEBUG(v % 8 == 0);
250 // C++11 standard, paragraph 4.7.2 [conv.integral]:
251 // If the destination type is unsigned, the resulting value is the least unsigned integer congruent to the source
252 // integer (modulo 2n where n is the number of bits used to represent the unsigned type). [ Note: In a two's
253 // complement representation, this conversion is conceptual and there is no change in the bit pattern (if there is
254 // no truncation). - end note ]
255 static_assert(std::is_unsigned<ref_type>::value,
256 "If ref_type changes, from_ref and to_ref should probably be updated");
260 inline int64_t to_int64(size_t value) noexcept
262 // FIXME: Enable once we get clang warning flags correct
263 // REALM_ASSERT_DEBUG(value <= std::numeric_limits<int64_t>::max());
264 return static_cast<int64_t>(value);
268 inline MemRef::MemRef() noexcept
274 inline MemRef::~MemRef() noexcept
278 inline MemRef::MemRef(char* addr, ref_type ref, Allocator& alloc) noexcept
282 static_cast<void>(alloc);
283 #if REALM_ENABLE_MEMDEBUG
288 inline MemRef::MemRef(ref_type ref, Allocator& alloc) noexcept
289 : m_addr(alloc.translate(ref))
292 static_cast<void>(alloc);
293 #if REALM_ENABLE_MEMDEBUG
298 inline char* MemRef::get_addr()
300 #if REALM_ENABLE_MEMDEBUG
301 // Asserts if the ref has been freed
302 m_alloc->translate(m_ref);
307 inline ref_type MemRef::get_ref()
309 #if REALM_ENABLE_MEMDEBUG
310 // Asserts if the ref has been freed
311 m_alloc->translate(m_ref);
316 inline void MemRef::set_ref(ref_type ref)
318 #if REALM_ENABLE_MEMDEBUG
319 // Asserts if the ref has been freed
320 m_alloc->translate(ref);
325 inline void MemRef::set_addr(char* addr)
330 inline MemRef Allocator::alloc(size_t size)
332 return do_alloc(size);
335 inline MemRef Allocator::realloc_(ref_type ref, const char* addr, size_t old_size, size_t new_size)
338 if (ref == m_debug_watch)
339 REALM_TERMINATE("Allocator watch: Ref was reallocated");
341 return do_realloc(ref, addr, old_size, new_size);
344 inline void Allocator::free_(ref_type ref, const char* addr) noexcept
347 if (ref == m_debug_watch)
348 REALM_TERMINATE("Allocator watch: Ref was freed");
350 return do_free(ref, addr);
353 inline void Allocator::free_(MemRef mem) noexcept
355 free_(mem.get_ref(), mem.get_addr());
358 inline char* Allocator::translate(ref_type ref) const noexcept
360 return do_translate(ref);
363 inline bool Allocator::is_read_only(ref_type ref) const noexcept
365 REALM_ASSERT_DEBUG(ref != 0);
366 REALM_ASSERT_DEBUG(m_baseline != 0); // Attached SlabAlloc
367 return ref < m_baseline;
370 inline Allocator::Allocator() noexcept
372 m_table_versioning_counter = 0;
373 m_latest_observed_counter = 0;
376 inline Allocator::~Allocator() noexcept
380 inline Replication* Allocator::get_replication() noexcept
382 return m_replication;
387 #endif // REALM_ALLOC_HPP