Bug Summary

File:deps/jemalloc/include/jemalloc/internal/extent_inlines.h
Warning:line 183, column 35
The left operand of '&' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name extent_dss.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _GNU_SOURCE -D _REENTRANT -I include -I include -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -std=gnu99 -fdebug-compilation-dir /home/netto/Desktop/redis-6.2.1/deps/jemalloc -ferror-limit 19 -fmessage-length 0 -funroll-loops -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /tmp/scan-build-2021-03-14-133648-8817-1 -x c src/extent_dss.c

src/extent_dss.c

1#define JEMALLOC_EXTENT_DSS_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/spin.h"
8
9/******************************************************************************/
10/* Data. */
11
12const char *opt_dssje_opt_dss = DSS_DEFAULT"secondary";
13
14const char *dss_prec_namesje_dss_prec_names[] = {
15 "disabled",
16 "primary",
17 "secondary",
18 "N/A"
19};
20
21/*
22 * Current dss precedence default, used when creating new arenas. NB: This is
23 * stored as unsigned rather than dss_prec_t because in principle there's no
24 * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
25 * atomic operations to synchronize the setting.
26 */
27static atomic_u_t dss_prec_default = ATOMIC_INIT({(unsigned)dss_prec_secondary}
28 (unsigned)DSS_PREC_DEFAULT){(unsigned)dss_prec_secondary};
29
30/* Base address of the DSS. */
31static void *dss_base;
32/* Atomic boolean indicating whether a thread is currently extending DSS. */
33static atomic_b_t dss_extending;
34/* Atomic boolean indicating whether the DSS is exhausted. */
35static atomic_b_t dss_exhausted;
36/* Atomic current upper limit on DSS addresses. */
37static atomic_p_t dss_max;
38
39/******************************************************************************/
40
41static void *
42extent_dss_sbrk(intptr_t increment) {
43#ifdef JEMALLOC_DSS
44 return sbrk(increment);
45#else
46 not_implemented()do { if (config_debug) { je_malloc_printf("<jemalloc>: %s:%d: Not implemented\n"
, "src/extent_dss.c", 46); abort(); } } while (0)
;
47 return NULL((void*)0);
48#endif
49}
50
51dss_prec_t
52extent_dss_prec_getje_extent_dss_prec_get(void) {
53 dss_prec_t ret;
54
55 if (!have_dss) {
56 return dss_prec_disabled;
57 }
58 ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIREatomic_memory_order_acquire);
59 return ret;
60}
61
62bool_Bool
63extent_dss_prec_setje_extent_dss_prec_set(dss_prec_t dss_prec) {
64 if (!have_dss) {
65 return (dss_prec != dss_prec_disabled);
66 }
67 atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASEatomic_memory_order_release);
68 return false0;
69}
70
71static void
72extent_dss_extending_start(void) {
73 spin_t spinner = SPIN_INITIALIZER{0U};
74 while (true1) {
75 bool_Bool expected = false0;
76 if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
77 true1, ATOMIC_ACQ_RELatomic_memory_order_acq_rel, ATOMIC_RELAXEDatomic_memory_order_relaxed)) {
78 break;
79 }
80 spin_adaptive(&spinner);
81 }
82}
83
84static void
85extent_dss_extending_finish(void) {
86 assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED))do { if (__builtin_expect(!!(config_debug && !(atomic_load_b
(&dss_extending, atomic_memory_order_relaxed))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/extent_dss.c"
, 86, "atomic_load_b(&dss_extending, ATOMIC_RELAXED)"); abort
(); } } while (0)
;
87
88 atomic_store_b(&dss_extending, false0, ATOMIC_RELEASEatomic_memory_order_release);
89}
90
91static void *
92extent_dss_max_update(void *new_addr) {
93 /*
94 * Get the current end of the DSS as max_cur and assure that dss_max is
95 * up to date.
96 */
97 void *max_cur = extent_dss_sbrk(0);
98 if (max_cur == (void *)-1) {
99 return NULL((void*)0);
100 }
101 atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASEatomic_memory_order_release);
102 /* Fixed new_addr can only be supported if it is at the edge of DSS. */
103 if (new_addr != NULL((void*)0) && max_cur != new_addr) {
104 return NULL((void*)0);
105 }
106 return max_cur;
107}
108
109void *
110extent_alloc_dssje_extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
111 size_t alignment, bool_Bool *zero, bool_Bool *commit) {
112 extent_t *gap;
113
114 cassert(have_dss)do { if (__builtin_expect(!!(!(have_dss)), 0)) { do { if (config_debug
) { je_malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/extent_dss.c", 114); abort(); } __builtin_unreachable(
); } while (0); } } while (0)
;
1
Taking false branch
2
Loop condition is false. Exiting loop
115 assert(size > 0)do { if (__builtin_expect(!!(config_debug && !(size >
0)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/extent_dss.c", 115, "size > 0"); abort(); } } while
(0)
;
3
Taking false branch
4
Loop condition is false. Exiting loop
116 assert(alignment > 0)do { if (__builtin_expect(!!(config_debug && !(alignment
> 0)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/extent_dss.c", 116, "alignment > 0"); abort(); } } while
(0)
;
5
Taking false branch
6
Loop condition is false. Exiting loop
117
118 /*
119 * sbrk() uses a signed increment argument, so take care not to
120 * interpret a large allocation request as a negative increment.
121 */
122 if ((intptr_t)size
6.1
'size' is >= 0
6.1
'size' is >= 0
< 0) {
7
Taking false branch
123 return NULL((void*)0);
124 }
125
126 gap = extent_allocje_extent_alloc(tsdn, arena);
127 if (gap == NULL((void*)0)) {
8
Assuming 'gap' is not equal to NULL
9
Taking false branch
128 return NULL((void*)0);
129 }
130
131 extent_dss_extending_start();
132 if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIREatomic_memory_order_acquire)) {
10
Assuming the condition is true
11
Taking true branch
133 /*
134 * The loop is necessary to recover from races with other
135 * threads that are using the DSS for something other than
136 * malloc.
137 */
138 while (true1) {
12
Loop condition is true. Entering loop body
139 void *max_cur = extent_dss_max_update(new_addr);
140 if (max_cur == NULL((void*)0)) {
13
Assuming 'max_cur' is not equal to NULL
14
Taking false branch
141 goto label_oom;
142 }
143
144 /*
145 * Compute how much page-aligned gap space (if any) is
146 * necessary to satisfy alignment. This space can be
147 * recycled for later use.
148 */
149 void *gap_addr_page = (void *)(PAGE_CEILING(((((uintptr_t)max_cur) + ((size_t)(((size_t)(1U << 12))
- 1))) & ~((size_t)(((size_t)(1U << 12)) - 1)))
150 (uintptr_t)max_cur)((((uintptr_t)max_cur) + ((size_t)(((size_t)(1U << 12))
- 1))) & ~((size_t)(((size_t)(1U << 12)) - 1)))
);
151 void *ret = (void *)ALIGNMENT_CEILING(((((uintptr_t)gap_addr_page) + (alignment - 1)) & ((~(alignment
)) + 1))
152 (uintptr_t)gap_addr_page, alignment)((((uintptr_t)gap_addr_page) + (alignment - 1)) & ((~(alignment
)) + 1))
;
153 size_t gap_size_page = (uintptr_t)ret -
154 (uintptr_t)gap_addr_page;
155 if (gap_size_page != 0) {
15
Assuming 'gap_size_page' is equal to 0
16
Taking false branch
156 extent_init(gap, arena, gap_addr_page,
157 gap_size_page, false0, NSIZES235,
158 arena_extent_sn_nextje_arena_extent_sn_next(arena),
159 extent_state_active, false0, true1, true1);
160 }
161 /*
162 * Compute the address just past the end of the desired
163 * allocation space.
164 */
165 void *dss_next = (void *)((uintptr_t)ret + size);
166 if ((uintptr_t)ret < (uintptr_t)max_cur ||
17
Assuming 'ret' is >= 'max_cur'
19
Taking false branch
167 (uintptr_t)dss_next < (uintptr_t)max_cur) {
18
Assuming 'dss_next' is >= 'max_cur'
168 goto label_oom; /* Wrap-around. */
169 }
170 /* Compute the increment, including subpage bytes. */
171 void *gap_addr_subpage = max_cur;
172 size_t gap_size_subpage = (uintptr_t)ret -
173 (uintptr_t)gap_addr_subpage;
174 intptr_t incr = gap_size_subpage + size;
175
176 assert((uintptr_t)max_cur + incr == (uintptr_t)ret +do { if (__builtin_expect(!!(config_debug && !((uintptr_t
)max_cur + incr == (uintptr_t)ret + size)), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/extent_dss.c"
, 177, "(uintptr_t)max_cur + incr == (uintptr_t)ret + size");
abort(); } } while (0)
20
Taking false branch
21
Loop condition is false. Exiting loop
177 size)do { if (__builtin_expect(!!(config_debug && !((uintptr_t
)max_cur + incr == (uintptr_t)ret + size)), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/extent_dss.c"
, 177, "(uintptr_t)max_cur + incr == (uintptr_t)ret + size");
abort(); } } while (0)
;
178
179 /* Try to allocate. */
180 void *dss_prev = extent_dss_sbrk(incr);
181 if (dss_prev == max_cur) {
22
Assuming 'dss_prev' is equal to 'max_cur'
23
Taking true branch
182 /* Success. */
183 atomic_store_p(&dss_max, dss_next,
184 ATOMIC_RELEASEatomic_memory_order_release);
185 extent_dss_extending_finish();
186
187 if (gap_size_page
23.1
'gap_size_page' is equal to 0
23.1
'gap_size_page' is equal to 0
!= 0) {
24
Taking false branch
188 extent_dalloc_gapje_extent_dalloc_gap(tsdn, arena, gap);
189 } else {
190 extent_dallocje_extent_dalloc(tsdn, arena, gap);
191 }
192 if (!*commit) {
25
Assuming the condition is false
26
Taking false branch
193 *commit = pages_decommitje_pages_decommit(ret, size);
194 }
195 if (*zero && *commit) {
27
Assuming the condition is true
28
Taking true branch
196 extent_hooks_t *extent_hooks =
197 EXTENT_HOOKS_INITIALIZER((void*)0);
198 extent_t extent;
199
200 extent_init(&extent, arena, ret, size,
29
Calling 'extent_init'
201 size, false0, NSIZES235,
202 extent_state_active, false0, true1,
203 true1);
204 if (extent_purge_forced_wrapperje_extent_purge_forced_wrapper(tsdn,
205 arena, &extent_hooks, &extent, 0,
206 size)) {
207 memset(ret, 0, size);
208 }
209 }
210 return ret;
211 }
212 /*
213 * Failure, whether due to OOM or a race with a raw
214 * sbrk() call from outside the allocator.
215 */
216 if (dss_prev == (void *)-1) {
217 /* OOM. */
218 atomic_store_b(&dss_exhausted, true1,
219 ATOMIC_RELEASEatomic_memory_order_release);
220 goto label_oom;
221 }
222 }
223 }
224label_oom:
225 extent_dss_extending_finish();
226 extent_dallocje_extent_dalloc(tsdn, arena, gap);
227 return NULL((void*)0);
228}
229
230static bool_Bool
231extent_in_dss_helper(void *addr, void *max) {
232 return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
233 (uintptr_t)max);
234}
235
236bool_Bool
237extent_in_dssje_extent_in_dss(void *addr) {
238 cassert(have_dss)do { if (__builtin_expect(!!(!(have_dss)), 0)) { do { if (config_debug
) { je_malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/extent_dss.c", 238); abort(); } __builtin_unreachable(
); } while (0); } } while (0)
;
239
240 return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
241 ATOMIC_ACQUIREatomic_memory_order_acquire));
242}
243
244bool_Bool
245extent_dss_mergeableje_extent_dss_mergeable(void *addr_a, void *addr_b) {
246 void *max;
247
248 cassert(have_dss)do { if (__builtin_expect(!!(!(have_dss)), 0)) { do { if (config_debug
) { je_malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/extent_dss.c", 248); abort(); } __builtin_unreachable(
); } while (0); } } while (0)
;
249
250 if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
251 (uintptr_t)dss_base) {
252 return true1;
253 }
254
255 max = atomic_load_p(&dss_max, ATOMIC_ACQUIREatomic_memory_order_acquire);
256 return (extent_in_dss_helper(addr_a, max) ==
257 extent_in_dss_helper(addr_b, max));
258}
259
260void
261extent_dss_bootje_extent_dss_boot(void) {
262 cassert(have_dss)do { if (__builtin_expect(!!(!(have_dss)), 0)) { do { if (config_debug
) { je_malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/extent_dss.c", 262); abort(); } __builtin_unreachable(
); } while (0); } } while (0)
;
263
264 dss_base = extent_dss_sbrk(0);
265 atomic_store_b(&dss_extending, false0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
266 atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXEDatomic_memory_order_relaxed);
267 atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXEDatomic_memory_order_relaxed);
268}
269
270/******************************************************************************/

include/jemalloc/internal/extent_inlines.h

1#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
2#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
3
4#include "jemalloc/internal/mutex.h"
5#include "jemalloc/internal/mutex_pool.h"
6#include "jemalloc/internal/pages.h"
7#include "jemalloc/internal/prng.h"
8#include "jemalloc/internal/ql.h"
9#include "jemalloc/internal/sz.h"
10
11static inline void
12extent_lock(tsdn_t *tsdn, extent_t *extent) {
13 assert(extent != NULL)do { if (__builtin_expect(!!(config_debug && !(extent
!= ((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 13, "extent != NULL"
); abort(); } } while (0)
;
14 mutex_pool_lock(tsdn, &extent_mutex_poolje_extent_mutex_pool, (uintptr_t)extent);
15}
16
17static inline void
18extent_unlock(tsdn_t *tsdn, extent_t *extent) {
19 assert(extent != NULL)do { if (__builtin_expect(!!(config_debug && !(extent
!= ((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 19, "extent != NULL"
); abort(); } } while (0)
;
20 mutex_pool_unlock(tsdn, &extent_mutex_poolje_extent_mutex_pool, (uintptr_t)extent);
21}
22
23static inline void
24extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
25 assert(extent1 != NULL && extent2 != NULL)do { if (__builtin_expect(!!(config_debug && !(extent1
!= ((void*)0) && extent2 != ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "include/jemalloc/internal/extent_inlines.h"
, 25, "extent1 != NULL && extent2 != NULL"); abort();
} } while (0)
;
26 mutex_pool_lock2(tsdn, &extent_mutex_poolje_extent_mutex_pool, (uintptr_t)extent1,
27 (uintptr_t)extent2);
28}
29
30static inline void
31extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
32 assert(extent1 != NULL && extent2 != NULL)do { if (__builtin_expect(!!(config_debug && !(extent1
!= ((void*)0) && extent2 != ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "include/jemalloc/internal/extent_inlines.h"
, 32, "extent1 != NULL && extent2 != NULL"); abort();
} } while (0)
;
33 mutex_pool_unlock2(tsdn, &extent_mutex_poolje_extent_mutex_pool, (uintptr_t)extent1,
34 (uintptr_t)extent2);
35}
36
37static inline arena_t *
38extent_arena_get(const extent_t *extent) {
39 unsigned arena_ind = (unsigned)((extent->e_bits &
40 EXTENT_BITS_ARENA_MASK((((((uint64_t)0x1U) << (12)) - 1)) << (0))) >> EXTENT_BITS_ARENA_SHIFT0);
41 /*
42 * The following check is omitted because we should never actually read
43 * a NULL arena pointer.
44 */
45 if (false0 && arena_ind >= MALLOCX_ARENA_LIMIT((1 << 12) - 1)) {
46 return NULL((void*)0);
47 }
48 assert(arena_ind < MALLOCX_ARENA_LIMIT)do { if (__builtin_expect(!!(config_debug && !(arena_ind
< ((1 << 12) - 1))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 48, "arena_ind < MALLOCX_ARENA_LIMIT"
); abort(); } } while (0)
;
49 return (arena_t *)atomic_load_p(&arenasje_arenas[arena_ind], ATOMIC_ACQUIREatomic_memory_order_acquire);
50}
51
52static inline szind_t
53extent_szind_get_maybe_invalid(const extent_t *extent) {
54 szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK((((((uint64_t)0x1U) << (8)) - 1)) << ((2 + (1 + (
1 + (1 + (1 + (12 + 0))))))))
) >>
55 EXTENT_BITS_SZIND_SHIFT(2 + (1 + (1 + (1 + (1 + (12 + 0)))))));
56 assert(szind <= NSIZES)do { if (__builtin_expect(!!(config_debug && !(szind <=
235)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 56, "szind <= NSIZES"
); abort(); } } while (0)
;
57 return szind;
58}
59
60static inline szind_t
61extent_szind_get(const extent_t *extent) {
62 szind_t szind = extent_szind_get_maybe_invalid(extent);
63 assert(szind < NSIZES)do { if (__builtin_expect(!!(config_debug && !(szind <
235)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 63, "szind < NSIZES"
); abort(); } } while (0)
; /* Never call when "invalid". */
64 return szind;
65}
66
67static inline size_t
68extent_usize_get(const extent_t *extent) {
69 return sz_index2size(extent_szind_get(extent));
70}
71
72static inline size_t
73extent_sn_get(const extent_t *extent) {
74 return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK((18446744073709551615UL) << (((12 - 3) + 1) + (8 + (2 +
(1 + (1 + (1 + (1 + (12 + 0)))))))))
) >>
75 EXTENT_BITS_SN_SHIFT(((12 - 3) + 1) + (8 + (2 + (1 + (1 + (1 + (1 + (12 + 0))))))
))
);
76}
77
78static inline extent_state_t
79extent_state_get(const extent_t *extent) {
80 return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK((((((uint64_t)0x1U) << (2)) - 1)) << ((1 + (1 + (
1 + (1 + (12 + 0)))))))
) >>
81 EXTENT_BITS_STATE_SHIFT(1 + (1 + (1 + (1 + (12 + 0))))));
82}
83
84static inline bool_Bool
85extent_zeroed_get(const extent_t *extent) {
86 return (bool_Bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((1 + (1 + (
1 + (12 + 0))))))
) >>
87 EXTENT_BITS_ZEROED_SHIFT(1 + (1 + (1 + (12 + 0)))));
88}
89
90static inline bool_Bool
91extent_committed_get(const extent_t *extent) {
92 return (bool_Bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((1 + (12 +
0))))
) >>
93 EXTENT_BITS_COMMITTED_SHIFT(1 + (12 + 0)));
94}
95
96static inline bool_Bool
97extent_dumpable_get(const extent_t *extent) {
98 return (bool_Bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((1 + (1 + (
12 + 0)))))
) >>
99 EXTENT_BITS_DUMPABLE_SHIFT(1 + (1 + (12 + 0))));
100}
101
102static inline bool_Bool
103extent_slab_get(const extent_t *extent) {
104 return (bool_Bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((12 + 0)))) >>
105 EXTENT_BITS_SLAB_SHIFT(12 + 0));
106}
107
108static inline unsigned
109extent_nfree_get(const extent_t *extent) {
110 assert(extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 110, "extent_slab_get(extent)"
); abort(); } } while (0)
;
111 return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK((((((uint64_t)0x1U) << (((12 - 3) + 1))) - 1)) <<
((8 + (2 + (1 + (1 + (1 + (1 + (12 + 0)))))))))
) >>
112 EXTENT_BITS_NFREE_SHIFT(8 + (2 + (1 + (1 + (1 + (1 + (12 + 0))))))));
113}
114
115static inline void *
116extent_base_get(const extent_t *extent) {
117 assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||do { if (__builtin_expect(!!(config_debug && !(extent
->e_addr == ((void *)((uintptr_t)(extent->e_addr) &
~((size_t)(((size_t)(1U << 12)) - 1)))) || !extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 118, "extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)"
); abort(); } } while (0)
118 !extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent
->e_addr == ((void *)((uintptr_t)(extent->e_addr) &
~((size_t)(((size_t)(1U << 12)) - 1)))) || !extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 118, "extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)"
); abort(); } } while (0)
;
119 return PAGE_ADDR2BASE(extent->e_addr)((void *)((uintptr_t)(extent->e_addr) & ~((size_t)(((size_t
)(1U << 12)) - 1))))
;
120}
121
122static inline void *
123extent_addr_get(const extent_t *extent) {
124 assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||do { if (__builtin_expect(!!(config_debug && !(extent
->e_addr == ((void *)((uintptr_t)(extent->e_addr) &
~((size_t)(((size_t)(1U << 12)) - 1)))) || !extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 125, "extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)"
); abort(); } } while (0)
125 !extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent
->e_addr == ((void *)((uintptr_t)(extent->e_addr) &
~((size_t)(((size_t)(1U << 12)) - 1)))) || !extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 125, "extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)"
); abort(); } } while (0)
;
126 return extent->e_addr;
127}
128
129static inline size_t
130extent_size_get(const extent_t *extent) {
131 return (extent->e_size_esn & EXTENT_SIZE_MASK((size_t)~(((size_t)(1U << 12))-1)));
132}
133
134static inline size_t
135extent_esn_get(const extent_t *extent) {
136 return (extent->e_size_esn & EXTENT_ESN_MASK((size_t)((size_t)(1U << 12))-1));
137}
138
139static inline size_t
140extent_bsize_get(const extent_t *extent) {
141 return extent->e_bsize;
142}
143
144static inline void *
145extent_before_get(const extent_t *extent) {
146 return (void *)((uintptr_t)extent_base_get(extent) - PAGE((size_t)(1U << 12)));
147}
148
149static inline void *
150extent_last_get(const extent_t *extent) {
151 return (void *)((uintptr_t)extent_base_get(extent) +
152 extent_size_get(extent) - PAGE((size_t)(1U << 12)));
153}
154
155static inline void *
156extent_past_get(const extent_t *extent) {
157 return (void *)((uintptr_t)extent_base_get(extent) +
158 extent_size_get(extent));
159}
160
161static inline arena_slab_data_t *
162extent_slab_data_get(extent_t *extent) {
163 assert(extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 163, "extent_slab_get(extent)"
); abort(); } } while (0)
;
164 return &extent->e_slab_data;
165}
166
167static inline const arena_slab_data_t *
168extent_slab_data_get_const(const extent_t *extent) {
169 assert(extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 169, "extent_slab_get(extent)"
); abort(); } } while (0)
;
170 return &extent->e_slab_data;
171}
172
173static inline prof_tctx_t *
174extent_prof_tctx_get(const extent_t *extent) {
175 return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
176 ATOMIC_ACQUIREatomic_memory_order_acquire);
177}
178
179static inline void
180extent_arena_set(extent_t *extent, arena_t *arena) {
181 unsigned arena_ind = (arena != NULL((void*)0)) ? arena_ind_get(arena) : ((1U <<
33
Assuming 'arena' is equal to NULL
34
'?' condition is false
182 MALLOCX_ARENA_BITS12) - 1);
183 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK((((((uint64_t)0x1U) << (12)) - 1)) << (0))) |
35
The left operand of '&' is a garbage value
184 ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT0);
185}
186
187static inline void
188extent_addr_set(extent_t *extent, void *addr) {
189 extent->e_addr = addr;
190}
191
192static inline void
193extent_addr_randomize(UNUSED__attribute__((unused)) tsdn_t *tsdn, extent_t *extent, size_t alignment) {
194 assert(extent_base_get(extent) == extent_addr_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_base_get
(extent) == extent_addr_get(extent))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "include/jemalloc/internal/extent_inlines.h"
, 194, "extent_base_get(extent) == extent_addr_get(extent)");
abort(); } } while (0)
;
195
196 if (alignment < PAGE((size_t)(1U << 12))) {
197 unsigned lg_range = LG_PAGE12 -
198 lg_floor(CACHELINE_CEILING(alignment)(((alignment) + (64 - 1)) & ~(64 - 1)));
199 size_t r;
200 if (!tsdn_null(tsdn)) {
201 tsd_t *tsd = tsdn_tsd(tsdn);
202 r = (size_t)prng_lg_range_u64(
203 tsd_offset_statep_get(tsd), lg_range);
204 } else {
205 r = prng_lg_range_zu(
206 &extent_arena_get(extent)->offset_state,
207 lg_range, true1);
208 }
209 uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE12 -
210 lg_range);
211 extent->e_addr = (void *)((uintptr_t)extent->e_addr +
212 random_offset);
213 assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==do { if (__builtin_expect(!!(config_debug && !(((void
*)((uintptr_t)(extent->e_addr) & ((~(alignment)) + 1)
)) == extent->e_addr)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 214, "ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == extent->e_addr"
); abort(); } } while (0)
214 extent->e_addr)do { if (__builtin_expect(!!(config_debug && !(((void
*)((uintptr_t)(extent->e_addr) & ((~(alignment)) + 1)
)) == extent->e_addr)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 214, "ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == extent->e_addr"
); abort(); } } while (0)
;
215 }
216}
217
218static inline void
219extent_size_set(extent_t *extent, size_t size) {
220 assert((size & ~EXTENT_SIZE_MASK) == 0)do { if (__builtin_expect(!!(config_debug && !((size &
~((size_t)~(((size_t)(1U << 12))-1))) == 0)), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "include/jemalloc/internal/extent_inlines.h"
, 220, "(size & ~EXTENT_SIZE_MASK) == 0"); abort(); } } while
(0)
;
221 extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK((size_t)~(((size_t)(1U << 12))-1)));
222}
223
224static inline void
225extent_esn_set(extent_t *extent, size_t esn) {
226 extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK((size_t)((size_t)(1U << 12))-1)) | (esn &
227 EXTENT_ESN_MASK((size_t)((size_t)(1U << 12))-1));
228}
229
230static inline void
231extent_bsize_set(extent_t *extent, size_t bsize) {
232 extent->e_bsize = bsize;
233}
234
235static inline void
236extent_szind_set(extent_t *extent, szind_t szind) {
237 assert(szind <= NSIZES)do { if (__builtin_expect(!!(config_debug && !(szind <=
235)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 237, "szind <= NSIZES"
); abort(); } } while (0)
; /* NSIZES means "invalid". */
238 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK((((((uint64_t)0x1U) << (8)) - 1)) << ((2 + (1 + (
1 + (1 + (1 + (12 + 0))))))))
) |
239 ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT(2 + (1 + (1 + (1 + (1 + (12 + 0)))))));
240}
241
242static inline void
243extent_nfree_set(extent_t *extent, unsigned nfree) {
244 assert(extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 244, "extent_slab_get(extent)"
); abort(); } } while (0)
;
245 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK((((((uint64_t)0x1U) << (((12 - 3) + 1))) - 1)) <<
((8 + (2 + (1 + (1 + (1 + (1 + (12 + 0)))))))))
) |
246 ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT(8 + (2 + (1 + (1 + (1 + (1 + (12 + 0))))))));
247}
248
249static inline void
250extent_nfree_inc(extent_t *extent) {
251 assert(extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 251, "extent_slab_get(extent)"
); abort(); } } while (0)
;
252 extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT(8 + (2 + (1 + (1 + (1 + (1 + (12 + 0))))))));
253}
254
255static inline void
256extent_nfree_dec(extent_t *extent) {
257 assert(extent_slab_get(extent))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(extent))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 257, "extent_slab_get(extent)"
); abort(); } } while (0)
;
258 extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT(8 + (2 + (1 + (1 + (1 + (1 + (12 + 0))))))));
259}
260
261static inline void
262extent_sn_set(extent_t *extent, size_t sn) {
263 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK((18446744073709551615UL) << (((12 - 3) + 1) + (8 + (2 +
(1 + (1 + (1 + (1 + (12 + 0)))))))))
) |
264 ((uint64_t)sn << EXTENT_BITS_SN_SHIFT(((12 - 3) + 1) + (8 + (2 + (1 + (1 + (1 + (1 + (12 + 0))))))
))
);
265}
266
267static inline void
268extent_state_set(extent_t *extent, extent_state_t state) {
269 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK((((((uint64_t)0x1U) << (2)) - 1)) << ((1 + (1 + (
1 + (1 + (12 + 0)))))))
) |
270 ((uint64_t)state << EXTENT_BITS_STATE_SHIFT(1 + (1 + (1 + (1 + (12 + 0))))));
271}
272
273static inline void
274extent_zeroed_set(extent_t *extent, bool_Bool zeroed) {
275 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((1 + (1 + (
1 + (12 + 0))))))
) |
276 ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT(1 + (1 + (1 + (12 + 0)))));
277}
278
279static inline void
280extent_committed_set(extent_t *extent, bool_Bool committed) {
281 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((1 + (12 +
0))))
) |
282 ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT(1 + (12 + 0)));
283}
284
285static inline void
286extent_dumpable_set(extent_t *extent, bool_Bool dumpable) {
287 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((1 + (1 + (
12 + 0)))))
) |
288 ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT(1 + (1 + (12 + 0))));
289}
290
291static inline void
292extent_slab_set(extent_t *extent, bool_Bool slab) {
293 extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK((((((uint64_t)0x1U) << (1)) - 1)) << ((12 + 0)))) |
294 ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT(12 + 0));
295}
296
297static inline void
298extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
299 atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASEatomic_memory_order_release);
300}
301
302static inline void
303extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
304 bool_Bool slab, szind_t szind, size_t sn, extent_state_t state, bool_Bool zeroed,
305 bool_Bool committed, bool_Bool dumpable) {
306 assert(addr == PAGE_ADDR2BASE(addr) || !slab)do { if (__builtin_expect(!!(config_debug && !(addr ==
((void *)((uintptr_t)(addr) & ~((size_t)(((size_t)(1U <<
12)) - 1)))) || !slab)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "include/jemalloc/internal/extent_inlines.h", 306, "addr == PAGE_ADDR2BASE(addr) || !slab"
); abort(); } } while (0)
;
30
Taking false branch
31
Loop condition is false. Exiting loop
307
308 extent_arena_set(extent, arena);
32
Calling 'extent_arena_set'
309 extent_addr_set(extent, addr);
310 extent_size_set(extent, size);
311 extent_slab_set(extent, slab);
312 extent_szind_set(extent, szind);
313 extent_sn_set(extent, sn);
314 extent_state_set(extent, state);
315 extent_zeroed_set(extent, zeroed);
316 extent_committed_set(extent, committed);
317 extent_dumpable_set(extent, dumpable);
318 ql_elm_new(extent, ql_link)do { ((extent))->ql_link.qre_next = ((extent)); ((extent))
->ql_link.qre_prev = ((extent)); } while (0)
;
319 if (config_prof) {
320 extent_prof_tctx_set(extent, NULL((void*)0));
321 }
322}
323
324static inline void
325extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
326 extent_arena_set(extent, NULL((void*)0));
327 extent_addr_set(extent, addr);
328 extent_bsize_set(extent, bsize);
329 extent_slab_set(extent, false0);
330 extent_szind_set(extent, NSIZES235);
331 extent_sn_set(extent, sn);
332 extent_state_set(extent, extent_state_active);
333 extent_zeroed_set(extent, true1);
334 extent_committed_set(extent, true1);
335 extent_dumpable_set(extent, true1);
336}
337
338static inline void
339extent_list_init(extent_list_t *list) {
340 ql_new(list)do { (list)->qlh_first = ((void*)0); } while (0);
341}
342
343static inline extent_t *
344extent_list_first(const extent_list_t *list) {
345 return ql_first(list)((list)->qlh_first);
346}
347
348static inline extent_t *
349extent_list_last(const extent_list_t *list) {
350 return ql_last(list, ql_link)((((list)->qlh_first) != ((void*)0)) ? ((((list)->qlh_first
))->ql_link.qre_prev) : ((void*)0))
;
351}
352
353static inline void
354extent_list_append(extent_list_t *list, extent_t *extent) {
355 ql_tail_insert(list, extent, ql_link)do { if (((list)->qlh_first) != ((void*)0)) { do { ((extent
))->ql_link.qre_prev = (((list)->qlh_first))->ql_link
.qre_prev; ((extent))->ql_link.qre_next = (((list)->qlh_first
)); ((extent))->ql_link.qre_prev->ql_link.qre_next = ((
extent)); (((list)->qlh_first))->ql_link.qre_prev = ((extent
)); } while (0); } ((list)->qlh_first) = (((extent))->ql_link
.qre_next); } while (0)
;
356}
357
358static inline void
359extent_list_prepend(extent_list_t *list, extent_t *extent) {
360 ql_head_insert(list, extent, ql_link)do { if (((list)->qlh_first) != ((void*)0)) { do { ((extent
))->ql_link.qre_prev = (((list)->qlh_first))->ql_link
.qre_prev; ((extent))->ql_link.qre_next = (((list)->qlh_first
)); ((extent))->ql_link.qre_prev->ql_link.qre_next = ((
extent)); (((list)->qlh_first))->ql_link.qre_prev = ((extent
)); } while (0); } ((list)->qlh_first) = (extent); } while
(0)
;
361}
362
363static inline void
364extent_list_replace(extent_list_t *list, extent_t *to_remove,
365 extent_t *to_insert) {
366 ql_after_insert(to_remove, to_insert, ql_link)do { ((to_insert))->ql_link.qre_next = ((to_remove))->ql_link
.qre_next; ((to_insert))->ql_link.qre_prev = ((to_remove))
; ((to_insert))->ql_link.qre_next->ql_link.qre_prev = (
(to_insert)); ((to_remove))->ql_link.qre_next = ((to_insert
)); } while (0)
;
367 ql_remove(list, to_remove, ql_link)do { if (((list)->qlh_first) == (to_remove)) { ((list)->
qlh_first) = ((((list)->qlh_first))->ql_link.qre_next);
} if (((list)->qlh_first) != (to_remove)) { do { ((to_remove
))->ql_link.qre_prev->ql_link.qre_next = ((to_remove))->
ql_link.qre_next; ((to_remove))->ql_link.qre_next->ql_link
.qre_prev = ((to_remove))->ql_link.qre_prev; ((to_remove))
->ql_link.qre_next = ((to_remove)); ((to_remove))->ql_link
.qre_prev = ((to_remove)); } while (0); } else { ((list)->
qlh_first) = ((void*)0); } } while (0)
;
368}
369
370static inline void
371extent_list_remove(extent_list_t *list, extent_t *extent) {
372 ql_remove(list, extent, ql_link)do { if (((list)->qlh_first) == (extent)) { ((list)->qlh_first
) = ((((list)->qlh_first))->ql_link.qre_next); } if (((
list)->qlh_first) != (extent)) { do { ((extent))->ql_link
.qre_prev->ql_link.qre_next = ((extent))->ql_link.qre_next
; ((extent))->ql_link.qre_next->ql_link.qre_prev = ((extent
))->ql_link.qre_prev; ((extent))->ql_link.qre_next = ((
extent)); ((extent))->ql_link.qre_prev = ((extent)); } while
(0); } else { ((list)->qlh_first) = ((void*)0); } } while
(0)
;
373}
374
375static inline int
376extent_sn_comp(const extent_t *a, const extent_t *b) {
377 size_t a_sn = extent_sn_get(a);
378 size_t b_sn = extent_sn_get(b);
379
380 return (a_sn > b_sn) - (a_sn < b_sn);
381}
382
383static inline int
384extent_esn_comp(const extent_t *a, const extent_t *b) {
385 size_t a_esn = extent_esn_get(a);
386 size_t b_esn = extent_esn_get(b);
387
388 return (a_esn > b_esn) - (a_esn < b_esn);
389}
390
391static inline int
392extent_ad_comp(const extent_t *a, const extent_t *b) {
393 uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
394 uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
395
396 return (a_addr > b_addr) - (a_addr < b_addr);
397}
398
399static inline int
400extent_ead_comp(const extent_t *a, const extent_t *b) {
401 uintptr_t a_eaddr = (uintptr_t)a;
402 uintptr_t b_eaddr = (uintptr_t)b;
403
404 return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
405}
406
407static inline int
408extent_snad_comp(const extent_t *a, const extent_t *b) {
409 int ret;
410
411 ret = extent_sn_comp(a, b);
412 if (ret != 0) {
413 return ret;
414 }
415
416 ret = extent_ad_comp(a, b);
417 return ret;
418}
419
420static inline int
421extent_esnead_comp(const extent_t *a, const extent_t *b) {
422 int ret;
423
424 ret = extent_esn_comp(a, b);
425 if (ret != 0) {
426 return ret;
427 }
428
429 ret = extent_ead_comp(a, b);
430 return ret;
431}
432
433#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */