1 /*************************************************************************
3 * Copyright 2016 Realm Inc.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 **************************************************************************/
19 #ifndef REALM_ARRAY_DIRECT_HPP
20 #define REALM_ARRAY_DIRECT_HPP
22 #include <realm/utilities.hpp>
23 #include <realm/alloc.hpp>
25 using namespace realm::util;
28 /* wid == 16/32 likely when accessing offsets in B tree */
29 #define REALM_TEMPEX(fun, wid, arg) \
30 if (wid == 16) {fun<16> arg;} \
31 else if (wid == 32) {fun<32> arg;} \
32 else if (wid == 0) {fun<0> arg;} \
33 else if (wid == 1) {fun<1> arg;} \
34 else if (wid == 2) {fun<2> arg;} \
35 else if (wid == 4) {fun<4> arg;} \
36 else if (wid == 8) {fun<8> arg;} \
37 else if (wid == 64) {fun<64> arg;} \
38 else {REALM_ASSERT_DEBUG(false); fun<0> arg;}
40 #define REALM_TEMPEX2(fun, targ, wid, arg) \
41 if (wid == 16) {fun<targ, 16> arg;} \
42 else if (wid == 32) {fun<targ, 32> arg;} \
43 else if (wid == 0) {fun<targ, 0> arg;} \
44 else if (wid == 1) {fun<targ, 1> arg;} \
45 else if (wid == 2) {fun<targ, 2> arg;} \
46 else if (wid == 4) {fun<targ, 4> arg;} \
47 else if (wid == 8) {fun<targ, 8> arg;} \
48 else if (wid == 64) {fun<targ, 64> arg;} \
49 else {REALM_ASSERT_DEBUG(false); fun<targ, 0> arg;}
51 #define REALM_TEMPEX3(fun, targ1, targ2, wid, arg) \
52 if (wid == 16) {fun<targ1, targ2, 16> arg;} \
53 else if (wid == 32) {fun<targ1, targ2, 32> arg;} \
54 else if (wid == 0) {fun<targ1, targ2, 0> arg;} \
55 else if (wid == 1) {fun<targ1, targ2, 1> arg;} \
56 else if (wid == 2) {fun<targ1, targ2, 2> arg;} \
57 else if (wid == 4) {fun<targ1, targ2, 4> arg;} \
58 else if (wid == 8) {fun<targ1, targ2, 8> arg;} \
59 else if (wid == 64) {fun<targ1, targ2, 64> arg;} \
60 else {REALM_ASSERT_DEBUG(false); fun<targ1, targ2, 0> arg;}
62 #define REALM_TEMPEX4(fun, targ1, targ2, wid, targ3, arg) \
63 if (wid == 16) {fun<targ1, targ2, 16, targ3> arg;} \
64 else if (wid == 32) {fun<targ1, targ2, 32, targ3> arg;} \
65 else if (wid == 0) {fun<targ1, targ2, 0, targ3> arg;} \
66 else if (wid == 1) {fun<targ1, targ2, 1, targ3> arg;} \
67 else if (wid == 2) {fun<targ1, targ2, 2, targ3> arg;} \
68 else if (wid == 4) {fun<targ1, targ2, 4, targ3> arg;} \
69 else if (wid == 8) {fun<targ1, targ2, 8, targ3> arg;} \
70 else if (wid == 64) {fun<targ1, targ2, 64, targ3> arg;} \
71 else {REALM_ASSERT_DEBUG(false); fun<targ1, targ2, 0, targ3> arg;}
73 #define REALM_TEMPEX5(fun, targ1, targ2, targ3, targ4, wid, arg) \
74 if (wid == 16) {fun<targ1, targ2, targ3, targ4, 16> arg;} \
75 else if (wid == 32) {fun<targ1, targ2, targ3, targ4, 32> arg;} \
76 else if (wid == 0) {fun<targ1, targ2, targ3, targ4, 0> arg;} \
77 else if (wid == 1) {fun<targ1, targ2, targ3, targ4, 1> arg;} \
78 else if (wid == 2) {fun<targ1, targ2, targ3, targ4, 2> arg;} \
79 else if (wid == 4) {fun<targ1, targ2, targ3, targ4, 4> arg;} \
80 else if (wid == 8) {fun<targ1, targ2, targ3, targ4, 8> arg;} \
81 else if (wid == 64) {fun<targ1, targ2, targ3, targ4, 64> arg;} \
82 else {REALM_ASSERT_DEBUG(false); fun<targ1, targ2, targ3, targ4, 0> arg;}
87 // Direct access methods
89 template <size_t width>
90 void set_direct(char* data, size_t ndx, int_fast64_t value) noexcept
93 REALM_ASSERT_DEBUG(value == 0);
96 else if (width == 1) {
97 REALM_ASSERT_DEBUG(0 <= value && value <= 0x01);
98 size_t byte_ndx = ndx / 8;
99 size_t bit_ndx = ndx % 8;
100 typedef unsigned char uchar;
101 uchar* p = reinterpret_cast<uchar*>(data) + byte_ndx;
102 *p = uchar((*p & ~(0x01 << bit_ndx)) | (int(value) & 0x01) << bit_ndx);
104 else if (width == 2) {
105 REALM_ASSERT_DEBUG(0 <= value && value <= 0x03);
106 size_t byte_ndx = ndx / 4;
107 size_t bit_ndx = ndx % 4 * 2;
108 typedef unsigned char uchar;
109 uchar* p = reinterpret_cast<uchar*>(data) + byte_ndx;
110 *p = uchar((*p & ~(0x03 << bit_ndx)) | (int(value) & 0x03) << bit_ndx);
112 else if (width == 4) {
113 REALM_ASSERT_DEBUG(0 <= value && value <= 0x0F);
114 size_t byte_ndx = ndx / 2;
115 size_t bit_ndx = ndx % 2 * 4;
116 typedef unsigned char uchar;
117 uchar* p = reinterpret_cast<uchar*>(data) + byte_ndx;
118 *p = uchar((*p & ~(0x0F << bit_ndx)) | (int(value) & 0x0F) << bit_ndx);
120 else if (width == 8) {
121 REALM_ASSERT_DEBUG(std::numeric_limits<int8_t>::min() <= value &&
122 value <= std::numeric_limits<int8_t>::max());
123 *(reinterpret_cast<int8_t*>(data) + ndx) = int8_t(value);
125 else if (width == 16) {
126 REALM_ASSERT_DEBUG(std::numeric_limits<int16_t>::min() <= value &&
127 value <= std::numeric_limits<int16_t>::max());
128 *(reinterpret_cast<int16_t*>(data) + ndx) = int16_t(value);
130 else if (width == 32) {
131 REALM_ASSERT_DEBUG(std::numeric_limits<int32_t>::min() <= value &&
132 value <= std::numeric_limits<int32_t>::max());
133 *(reinterpret_cast<int32_t*>(data) + ndx) = int32_t(value);
135 else if (width == 64) {
136 REALM_ASSERT_DEBUG(std::numeric_limits<int64_t>::min() <= value &&
137 value <= std::numeric_limits<int64_t>::max());
138 *(reinterpret_cast<int64_t*>(data) + ndx) = int64_t(value);
141 REALM_ASSERT_DEBUG(false);
145 template <size_t width>
146 void fill_direct(char* data, size_t begin, size_t end, int_fast64_t value) noexcept
148 for (size_t i = begin; i != end; ++i)
149 set_direct<width>(data, i, value);
153 int64_t get_direct(const char* data, size_t ndx) noexcept
159 size_t offset = ndx >> 3;
160 return (data[offset] >> (ndx & 7)) & 0x01;
163 size_t offset = ndx >> 2;
164 return (data[offset] >> ((ndx & 3) << 1)) & 0x03;
167 size_t offset = ndx >> 1;
168 return (data[offset] >> ((ndx & 1) << 2)) & 0x0F;
171 return *reinterpret_cast<const signed char*>(data + ndx);
174 size_t offset = ndx * 2;
175 return *reinterpret_cast<const int16_t*>(data + offset);
178 size_t offset = ndx * 4;
179 return *reinterpret_cast<const int32_t*>(data + offset);
182 size_t offset = ndx * 8;
183 return *reinterpret_cast<const int64_t*>(data + offset);
185 REALM_ASSERT_DEBUG(false);
189 inline int64_t get_direct(const char* data, size_t width, size_t ndx) noexcept
191 REALM_TEMPEX(return get_direct, width, (data, ndx));
196 inline std::pair<int64_t, int64_t> get_two(const char* data, size_t ndx) noexcept
198 return std::make_pair(to_size_t(get_direct<width>(data, ndx + 0)), to_size_t(get_direct<width>(data, ndx + 1)));
201 inline std::pair<int64_t, int64_t> get_two(const char* data, size_t width, size_t ndx) noexcept
203 REALM_TEMPEX(return get_two, width, (data, ndx));
208 inline void get_three(const char* data, size_t ndx, ref_type& v0, ref_type& v1, ref_type& v2) noexcept
210 v0 = to_ref(get_direct<width>(data, ndx + 0));
211 v1 = to_ref(get_direct<width>(data, ndx + 1));
212 v2 = to_ref(get_direct<width>(data, ndx + 2));
215 inline void get_three(const char* data, size_t width, size_t ndx, ref_type& v0, ref_type& v1, ref_type& v2) noexcept
217 REALM_TEMPEX(get_three, width, (data, ndx, v0, v1, v2));
221 // Lower/upper bound in sorted sequence
222 // ------------------------------------
224 // 3 3 3 4 4 4 5 6 7 9 9 9
227 // | | | | -- Lower and upper bound of 15
229 // | | | -- Lower and upper bound of 8
231 // | | -- Upper bound of 4
233 // | -- Lower bound of 4
235 // -- Lower and upper bound of 1
237 // These functions are semantically identical to std::lower_bound() and
238 // std::upper_bound().
240 // We currently use binary search. See for example
241 // http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
243 inline size_t lower_bound(const char* data, size_t size, int64_t value) noexcept
245 // The binary search used here is carefully optimized. Key trick is to use a single
246 // loop controlling variable (size) instead of high/low pair, and to keep updates
247 // to size done inside the loop independent of comparisons. Further key to speed
248 // is to avoid branching inside the loop, using conditional moves instead. This
249 // provides robust performance for random searches, though predictable searches
250 // might be slightly faster if we used branches instead. The loop unrolling yields
251 // a final 5-20% speedup depending on circumstances.
256 // The following code (at X, Y and Z) is 3 times manually unrolled instances of (A) below.
257 // These code blocks must be kept in sync. Meassurements indicate 3 times unrolling to give
258 // the best performance. See (A) for comments on the loop body.
260 size_t half = size / 2;
261 size_t other_half = size - half;
262 size_t probe = low + half;
263 size_t other_low = low + other_half;
264 int64_t v = get_direct<width>(data, probe);
266 low = (v < value) ? other_low : low;
270 other_half = size - half;
272 other_low = low + other_half;
273 v = get_direct<width>(data, probe);
275 low = (v < value) ? other_low : low;
279 other_half = size - half;
281 other_low = low + other_half;
282 v = get_direct<width>(data, probe);
284 low = (v < value) ? other_low : low;
288 // To understand the idea in this code, please note that
289 // for performance, computation of size for the next iteration
290 // MUST be INDEPENDENT of the conditional. This allows the
291 // processor to unroll the loop as fast as possible, and it
292 // minimizes the length of dependence chains leading up to branches.
293 // Making the unfolding of the loop independent of the data being
294 // searched, also minimizes the delays incurred by branch
295 // mispredictions, because they can be determined earlier
296 // and the speculation corrected earlier.
299 // To make size independent of data, we cannot always split the
300 // range at the theoretical optimal point. When we determine that
301 // the key is larger than the probe at some index K, and prepare
302 // to search the upper part of the range, you would normally start
303 // the search at the next index, K+1, to get the shortest range.
304 // We can only do this when splitting a range with odd number of entries.
305 // If there is an even number of entries we search from K instead of K+1.
306 // This potentially leads to redundant comparisons, but in practice we
307 // gain more performance by making the changes to size predictable.
309 // if size is even, half and other_half are the same.
310 // if size is odd, half is one less than other_half.
311 size_t half = size / 2;
312 size_t other_half = size - half;
313 size_t probe = low + half;
314 size_t other_low = low + other_half;
315 int64_t v = get_direct<width>(data, probe);
317 // for max performance, the line below should compile into a conditional
318 // move instruction. Not all compilers do this. To maximize chance
319 // of succes, no computation should be done in the branches of the
321 low = (v < value) ? other_low : low;
329 inline size_t upper_bound(const char* data, size_t size, int64_t value) noexcept
333 size_t half = size / 2;
334 size_t other_half = size - half;
335 size_t probe = low + half;
336 size_t other_low = low + other_half;
337 int64_t v = get_direct<width>(data, probe);
339 low = (value >= v) ? other_low : low;
342 other_half = size - half;
344 other_low = low + other_half;
345 v = get_direct<width>(data, probe);
347 low = (value >= v) ? other_low : low;
350 other_half = size - half;
352 other_low = low + other_half;
353 v = get_direct<width>(data, probe);
355 low = (value >= v) ? other_low : low;
359 size_t half = size / 2;
360 size_t other_half = size - half;
361 size_t probe = low + half;
362 size_t other_low = low + other_half;
363 int64_t v = get_direct<width>(data, probe);
365 low = (value >= v) ? other_low : low;
372 #endif /* ARRAY_TPL_HPP_ */