Bug Summary

File:deps/jemalloc/src/tcache.c
Warning:line 217, column 10
Value stored to 'ptr' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name tcache.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _GNU_SOURCE -D _REENTRANT -I include -I include -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -std=gnu99 -fdebug-compilation-dir /home/netto/Desktop/redis-6.2.1/deps/jemalloc -ferror-limit 19 -fmessage-length 0 -funroll-loops -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /tmp/scan-build-2021-03-14-133648-8817-1 -x c src/tcache.c
1#define JEMALLOC_TCACHE_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/mutex.h"
7#include "jemalloc/internal/size_classes.h"
8
9/******************************************************************************/
10/* Data. */
11
12bool_Bool opt_tcacheje_opt_tcache = true1;
13ssize_t opt_lg_tcache_maxje_opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT15;
14
15cache_bin_info_t *tcache_bin_infoje_tcache_bin_info;
16static unsigned stack_nelms; /* Total stack elms per tcache. */
17
18unsigned nhbinsje_nhbins;
19size_t tcache_maxclassje_tcache_maxclass;
20
21tcaches_t *tcachesje_tcaches;
22
23/* Index of first element within tcaches that has never been used. */
24static unsigned tcaches_past;
25
26/* Head of singly linked list tracking available tcaches elements. */
27static tcaches_t *tcaches_avail;
28
29/* Protects tcaches{,_past,_avail}. */
30static malloc_mutex_t tcaches_mtx;
31
32/******************************************************************************/
33
34size_t
35tcache_sallocje_tcache_salloc(tsdn_t *tsdn, const void *ptr) {
36 return arena_salloc(tsdn, ptr);
37}
38
39void
40tcache_event_hardje_tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
41 szind_t binind = tcache->next_gc_bin;
42
43 cache_bin_t *tbin;
44 if (binind < NBINS39) {
45 tbin = tcache_small_bin_get(tcache, binind);
46 } else {
47 tbin = tcache_large_bin_get(tcache, binind);
48 }
49 if (tbin->low_water > 0) {
50 /*
51 * Flush (ceiling) 3/4 of the objects below the low water mark.
52 */
53 if (binind < NBINS39) {
54 tcache_bin_flush_smallje_tcache_bin_flush_small(tsd, tcache, tbin, binind,
55 tbin->ncached - tbin->low_water + (tbin->low_water
56 >> 2));
57 /*
58 * Reduce fill count by 2X. Limit lg_fill_div such that
59 * the fill count is always at least 1.
60 */
61 cache_bin_info_t *tbin_info = &tcache_bin_infoje_tcache_bin_info[binind];
62 if ((tbin_info->ncached_max >>
63 (tcache->lg_fill_div[binind] + 1)) >= 1) {
64 tcache->lg_fill_div[binind]++;
65 }
66 } else {
67 tcache_bin_flush_largeje_tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
68 - tbin->low_water + (tbin->low_water >> 2), tcache);
69 }
70 } else if (tbin->low_water < 0) {
71 /*
72 * Increase fill count by 2X for small bins. Make sure
73 * lg_fill_div stays greater than 0.
74 */
75 if (binind < NBINS39 && tcache->lg_fill_div[binind] > 1) {
76 tcache->lg_fill_div[binind]--;
77 }
78 }
79 tbin->low_water = tbin->ncached;
80
81 tcache->next_gc_bin++;
82 if (tcache->next_gc_bin == nhbinsje_nhbins) {
83 tcache->next_gc_bin = 0;
84 }
85}
86
87void *
88tcache_alloc_small_hardje_tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
89 cache_bin_t *tbin, szind_t binind, bool_Bool *tcache_success) {
90 void *ret;
91
92 assert(tcache->arena != NULL)do { if (__builtin_expect(!!(config_debug && !(tcache
->arena != ((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 92, "tcache->arena != NULL"); abort(); }
} while (0)
;
93 arena_tcache_fill_smallje_arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
94 config_prof ? tcache->prof_accumbytes : 0);
95 if (config_prof) {
96 tcache->prof_accumbytes = 0;
97 }
98 ret = cache_bin_alloc_easy(tbin, tcache_success);
99
100 return ret;
101}
102
103void
104tcache_bin_flush_smallje_tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
105 szind_t binind, unsigned rem) {
106 bool_Bool merged_stats = false0;
107
108 assert(binind < NBINS)do { if (__builtin_expect(!!(config_debug && !(binind
< 39)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 108, "binind < NBINS"); abort(); } } while
(0)
;
109 assert((cache_bin_sz_t)rem <= tbin->ncached)do { if (__builtin_expect(!!(config_debug && !((cache_bin_sz_t
)rem <= tbin->ncached)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 109, "(cache_bin_sz_t)rem <= tbin->ncached"
); abort(); } } while (0)
;
110
111 arena_t *arena = tcache->arena;
112 assert(arena != NULL)do { if (__builtin_expect(!!(config_debug && !(arena !=
((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 112, "arena != NULL"); abort(); } } while (
0)
;
113 unsigned nflush = tbin->ncached - rem;
114 VARIABLE_ARRAY(extent_t *, item_extent, nflush)extent_t * item_extent[(nflush)];
115 /* Look up extent once per item. */
116 for (unsigned i = 0 ; i < nflush; i++) {
117 item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
118 }
119
120 while (nflush > 0) {
121 /* Lock the arena bin associated with the first object. */
122 extent_t *extent = item_extent[0];
123 arena_t *bin_arena = extent_arena_get(extent);
124 bin_t *bin = &bin_arena->bins[binind];
125
126 if (config_prof && bin_arena == arena) {
127 if (arena_prof_accum(tsd_tsdn(tsd), arena,
128 tcache->prof_accumbytes)) {
129 prof_idumpje_prof_idump(tsd_tsdn(tsd));
130 }
131 tcache->prof_accumbytes = 0;
132 }
133
134 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
135 if (config_stats && bin_arena == arena) {
136 assert(!merged_stats)do { if (__builtin_expect(!!(config_debug && !(!merged_stats
)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 136, "!merged_stats"); abort(); } } while (
0)
;
137 merged_stats = true1;
138 bin->stats.nflushes++;
139 bin->stats.nrequests += tbin->tstats.nrequests;
140 tbin->tstats.nrequests = 0;
141 }
142 unsigned ndeferred = 0;
143 for (unsigned i = 0; i < nflush; i++) {
144 void *ptr = *(tbin->avail - 1 - i);
145 extent = item_extent[i];
146 assert(ptr != NULL && extent != NULL)do { if (__builtin_expect(!!(config_debug && !(ptr !=
((void*)0) && extent != ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/tcache.c"
, 146, "ptr != NULL && extent != NULL"); abort(); } }
while (0)
;
147
148 if (extent_arena_get(extent) == bin_arena) {
149 arena_dalloc_bin_junked_lockedje_arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
150 bin_arena, extent, ptr);
151 } else {
152 /*
153 * This object was allocated via a different
154 * arena bin than the one that is currently
155 * locked. Stash the object, so that it can be
156 * handled in a future pass.
157 */
158 *(tbin->avail - 1 - ndeferred) = ptr;
159 item_extent[ndeferred] = extent;
160 ndeferred++;
161 }
162 }
163 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
164 arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
165 nflush = ndeferred;
166 }
167 if (config_stats && !merged_stats) {
168 /*
169 * The flush loop didn't happen to flush to this thread's
170 * arena, so the stats didn't get merged. Manually do so now.
171 */
172 bin_t *bin = &arena->bins[binind];
173 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
174 bin->stats.nflushes++;
175 bin->stats.nrequests += tbin->tstats.nrequests;
176 tbin->tstats.nrequests = 0;
177 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
178 }
179
180 memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
181 sizeof(void *));
182 tbin->ncached = rem;
183 if (tbin->ncached < tbin->low_water) {
184 tbin->low_water = tbin->ncached;
185 }
186}
187
188void
189tcache_bin_flush_largeje_tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
190 unsigned rem, tcache_t *tcache) {
191 bool_Bool merged_stats = false0;
192
193 assert(binind < nhbins)do { if (__builtin_expect(!!(config_debug && !(binind
< je_nhbins)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 193, "binind < nhbins"); abort(); } } while
(0)
;
194 assert((cache_bin_sz_t)rem <= tbin->ncached)do { if (__builtin_expect(!!(config_debug && !((cache_bin_sz_t
)rem <= tbin->ncached)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 194, "(cache_bin_sz_t)rem <= tbin->ncached"
); abort(); } } while (0)
;
195
196 arena_t *arena = tcache->arena;
197 assert(arena != NULL)do { if (__builtin_expect(!!(config_debug && !(arena !=
((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 197, "arena != NULL"); abort(); } } while (
0)
;
198 unsigned nflush = tbin->ncached - rem;
199 VARIABLE_ARRAY(extent_t *, item_extent, nflush)extent_t * item_extent[(nflush)];
200 /* Look up extent once per item. */
201 for (unsigned i = 0 ; i < nflush; i++) {
202 item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
203 }
204
205 while (nflush > 0) {
206 /* Lock the arena associated with the first object. */
207 extent_t *extent = item_extent[0];
208 arena_t *locked_arena = extent_arena_get(extent);
209 UNUSED__attribute__((unused)) bool_Bool idump;
210
211 if (config_prof) {
212 idump = false0;
213 }
214
215 malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
216 for (unsigned i = 0; i < nflush; i++) {
217 void *ptr = *(tbin->avail - 1 - i);
Value stored to 'ptr' during its initialization is never read
218 assert(ptr != NULL)do { if (__builtin_expect(!!(config_debug && !(ptr !=
((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 218, "ptr != NULL"); abort(); } } while (0)
;
219 extent = item_extent[i];
220 if (extent_arena_get(extent) == locked_arena) {
221 large_dalloc_prep_junked_lockedje_large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
222 extent);
223 }
224 }
225 if ((config_prof || config_stats) && locked_arena == arena) {
226 if (config_prof) {
227 idump = arena_prof_accum(tsd_tsdn(tsd), arena,
228 tcache->prof_accumbytes);
229 tcache->prof_accumbytes = 0;
230 }
231 if (config_stats) {
232 merged_stats = true1;
233 arena_stats_large_nrequests_add(tsd_tsdn(tsd),
234 &arena->stats, binind,
235 tbin->tstats.nrequests);
236 tbin->tstats.nrequests = 0;
237 }
238 }
239 malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
240
241 unsigned ndeferred = 0;
242 for (unsigned i = 0; i < nflush; i++) {
243 void *ptr = *(tbin->avail - 1 - i);
244 extent = item_extent[i];
245 assert(ptr != NULL && extent != NULL)do { if (__builtin_expect(!!(config_debug && !(ptr !=
((void*)0) && extent != ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/tcache.c"
, 245, "ptr != NULL && extent != NULL"); abort(); } }
while (0)
;
246
247 if (extent_arena_get(extent) == locked_arena) {
248 large_dalloc_finishje_large_dalloc_finish(tsd_tsdn(tsd), extent);
249 } else {
250 /*
251 * This object was allocated via a different
252 * arena than the one that is currently locked.
253 * Stash the object, so that it can be handled
254 * in a future pass.
255 */
256 *(tbin->avail - 1 - ndeferred) = ptr;
257 item_extent[ndeferred] = extent;
258 ndeferred++;
259 }
260 }
261 if (config_prof && idump) {
262 prof_idumpje_prof_idump(tsd_tsdn(tsd));
263 }
264 arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
265 ndeferred);
266 nflush = ndeferred;
267 }
268 if (config_stats && !merged_stats) {
269 /*
270 * The flush loop didn't happen to flush to this thread's
271 * arena, so the stats didn't get merged. Manually do so now.
272 */
273 arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats,
274 binind, tbin->tstats.nrequests);
275 tbin->tstats.nrequests = 0;
276 }
277
278 memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
279 sizeof(void *));
280 tbin->ncached = rem;
281 if (tbin->ncached < tbin->low_water) {
282 tbin->low_water = tbin->ncached;
283 }
284}
285
286void
287tcache_arena_associateje_tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
288 assert(tcache->arena == NULL)do { if (__builtin_expect(!!(config_debug && !(tcache
->arena == ((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 288, "tcache->arena == NULL"); abort(); }
} while (0)
;
289 tcache->arena = arena;
290
291 if (config_stats) {
292 /* Link into list of extant tcaches. */
293 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
294
295 ql_elm_new(tcache, link)do { ((tcache))->link.qre_next = ((tcache)); ((tcache))->
link.qre_prev = ((tcache)); } while (0)
;
296 ql_tail_insert(&arena->tcache_ql, tcache, link)do { if (((&arena->tcache_ql)->qlh_first) != ((void
*)0)) { do { ((tcache))->link.qre_prev = (((&arena->
tcache_ql)->qlh_first))->link.qre_prev; ((tcache))->
link.qre_next = (((&arena->tcache_ql)->qlh_first));
((tcache))->link.qre_prev->link.qre_next = ((tcache));
(((&arena->tcache_ql)->qlh_first))->link.qre_prev
= ((tcache)); } while (0); } ((&arena->tcache_ql)->
qlh_first) = (((tcache))->link.qre_next); } while (0)
;
297 cache_bin_array_descriptor_init(
298 &tcache->cache_bin_array_descriptor, tcache->bins_small,
299 tcache->bins_large);
300 ql_tail_insert(&arena->cache_bin_array_descriptor_ql,do { if (((&arena->cache_bin_array_descriptor_ql)->
qlh_first) != ((void*)0)) { do { ((&tcache->cache_bin_array_descriptor
))->link.qre_prev = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first))->link.qre_prev; ((&tcache->cache_bin_array_descriptor
))->link.qre_next = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first)); ((&tcache->cache_bin_array_descriptor
))->link.qre_prev->link.qre_next = ((&tcache->cache_bin_array_descriptor
)); (((&arena->cache_bin_array_descriptor_ql)->qlh_first
))->link.qre_prev = ((&tcache->cache_bin_array_descriptor
)); } while (0); } ((&arena->cache_bin_array_descriptor_ql
)->qlh_first) = (((&tcache->cache_bin_array_descriptor
))->link.qre_next); } while (0)
301 &tcache->cache_bin_array_descriptor, link)do { if (((&arena->cache_bin_array_descriptor_ql)->
qlh_first) != ((void*)0)) { do { ((&tcache->cache_bin_array_descriptor
))->link.qre_prev = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first))->link.qre_prev; ((&tcache->cache_bin_array_descriptor
))->link.qre_next = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first)); ((&tcache->cache_bin_array_descriptor
))->link.qre_prev->link.qre_next = ((&tcache->cache_bin_array_descriptor
)); (((&arena->cache_bin_array_descriptor_ql)->qlh_first
))->link.qre_prev = ((&tcache->cache_bin_array_descriptor
)); } while (0); } ((&arena->cache_bin_array_descriptor_ql
)->qlh_first) = (((&tcache->cache_bin_array_descriptor
))->link.qre_next); } while (0)
;
302
303 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
304 }
305}
306
307static void
308tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
309 arena_t *arena = tcache->arena;
310 assert(arena != NULL)do { if (__builtin_expect(!!(config_debug && !(arena !=
((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 310, "arena != NULL"); abort(); } } while (
0)
;
311 if (config_stats) {
312 /* Unlink from list of extant tcaches. */
313 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
314 if (config_debug) {
315 bool_Bool in_ql = false0;
316 tcache_t *iter;
317 ql_foreach(iter, &arena->tcache_ql, link)for (((iter)) = (((&arena->tcache_ql)->qlh_first));
((iter)) != ((void*)0); ((iter)) = ((((iter))->link.qre_next
!= (((&arena->tcache_ql)->qlh_first))) ? ((iter))->
link.qre_next : ((void*)0)))
{
318 if (iter == tcache) {
319 in_ql = true1;
320 break;
321 }
322 }
323 assert(in_ql)do { if (__builtin_expect(!!(config_debug && !(in_ql)
), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 323, "in_ql"); abort(); } } while (0)
;
324 }
325 ql_remove(&arena->tcache_ql, tcache, link)do { if (((&arena->tcache_ql)->qlh_first) == (tcache
)) { ((&arena->tcache_ql)->qlh_first) = ((((&arena
->tcache_ql)->qlh_first))->link.qre_next); } if (((&
arena->tcache_ql)->qlh_first) != (tcache)) { do { ((tcache
))->link.qre_prev->link.qre_next = ((tcache))->link.
qre_next; ((tcache))->link.qre_next->link.qre_prev = ((
tcache))->link.qre_prev; ((tcache))->link.qre_next = ((
tcache)); ((tcache))->link.qre_prev = ((tcache)); } while (
0); } else { ((&arena->tcache_ql)->qlh_first) = ((void
*)0); } } while (0)
;
326 ql_remove(&arena->cache_bin_array_descriptor_ql,do { if (((&arena->cache_bin_array_descriptor_ql)->
qlh_first) == (&tcache->cache_bin_array_descriptor)) {
((&arena->cache_bin_array_descriptor_ql)->qlh_first
) = ((((&arena->cache_bin_array_descriptor_ql)->qlh_first
))->link.qre_next); } if (((&arena->cache_bin_array_descriptor_ql
)->qlh_first) != (&tcache->cache_bin_array_descriptor
)) { do { ((&tcache->cache_bin_array_descriptor))->
link.qre_prev->link.qre_next = ((&tcache->cache_bin_array_descriptor
))->link.qre_next; ((&tcache->cache_bin_array_descriptor
))->link.qre_next->link.qre_prev = ((&tcache->cache_bin_array_descriptor
))->link.qre_prev; ((&tcache->cache_bin_array_descriptor
))->link.qre_next = ((&tcache->cache_bin_array_descriptor
)); ((&tcache->cache_bin_array_descriptor))->link.qre_prev
= ((&tcache->cache_bin_array_descriptor)); } while (0
); } else { ((&arena->cache_bin_array_descriptor_ql)->
qlh_first) = ((void*)0); } } while (0)
327 &tcache->cache_bin_array_descriptor, link)do { if (((&arena->cache_bin_array_descriptor_ql)->
qlh_first) == (&tcache->cache_bin_array_descriptor)) {
((&arena->cache_bin_array_descriptor_ql)->qlh_first
) = ((((&arena->cache_bin_array_descriptor_ql)->qlh_first
))->link.qre_next); } if (((&arena->cache_bin_array_descriptor_ql
)->qlh_first) != (&tcache->cache_bin_array_descriptor
)) { do { ((&tcache->cache_bin_array_descriptor))->
link.qre_prev->link.qre_next = ((&tcache->cache_bin_array_descriptor
))->link.qre_next; ((&tcache->cache_bin_array_descriptor
))->link.qre_next->link.qre_prev = ((&tcache->cache_bin_array_descriptor
))->link.qre_prev; ((&tcache->cache_bin_array_descriptor
))->link.qre_next = ((&tcache->cache_bin_array_descriptor
)); ((&tcache->cache_bin_array_descriptor))->link.qre_prev
= ((&tcache->cache_bin_array_descriptor)); } while (0
); } else { ((&arena->cache_bin_array_descriptor_ql)->
qlh_first) = ((void*)0); } } while (0)
;
328 tcache_stats_mergeje_tcache_stats_merge(tsdn, tcache, arena);
329 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
330 }
331 tcache->arena = NULL((void*)0);
332}
333
334void
335tcache_arena_reassociateje_tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
336 tcache_arena_dissociate(tsdn, tcache);
337 tcache_arena_associateje_tcache_arena_associate(tsdn, tcache, arena);
338}
339
340bool_Bool
341tsd_tcache_enabled_data_initje_tsd_tcache_enabled_data_init(tsd_t *tsd) {
342 /* Called upon tsd initialization. */
343 tsd_tcache_enabled_set(tsd, opt_tcacheje_opt_tcache);
344 tsd_slow_updateje_tsd_slow_update(tsd);
345
346 if (opt_tcacheje_opt_tcache) {
347 /* Trigger tcache init. */
348 tsd_tcache_data_initje_tsd_tcache_data_init(tsd);
349 }
350
351 return false0;
352}
353
354/* Initialize auto tcache (embedded in TSD). */
355static void
356tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
357 memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)struct { tcache_t *qre_next; tcache_t *qre_prev; }));
358 tcache->prof_accumbytes = 0;
359 tcache->next_gc_bin = 0;
360 tcache->arena = NULL((void*)0);
361
362 ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR((8192 / 39) + ((8192 / 39 == 0) ? 0 : 1)));
363
364 size_t stack_offset = 0;
365 assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0)do { if (__builtin_expect(!!(config_debug && !((200 &
1U) == 0)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 365, "(TCACHE_NSLOTS_SMALL_MAX & 1U) == 0"
); abort(); } } while (0)
;
366 memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS39);
367 memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbinsje_nhbins - NBINS39));
368 unsigned i = 0;
369 for (; i < NBINS39; i++) {
370 tcache->lg_fill_div[i] = 1;
371 stack_offset += tcache_bin_infoje_tcache_bin_info[i].ncached_max * sizeof(void *);
372 /*
373 * avail points past the available space. Allocations will
374 * access the slots toward higher addresses (for the benefit of
375 * prefetch).
376 */
377 tcache_small_bin_get(tcache, i)->avail =
378 (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
379 }
380 for (; i < nhbinsje_nhbins; i++) {
381 stack_offset += tcache_bin_infoje_tcache_bin_info[i].ncached_max * sizeof(void *);
382 tcache_large_bin_get(tcache, i)->avail =
383 (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
384 }
385 assert(stack_offset == stack_nelms * sizeof(void *))do { if (__builtin_expect(!!(config_debug && !(stack_offset
== stack_nelms * sizeof(void *))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 385, "stack_offset == stack_nelms * sizeof(void *)"
); abort(); } } while (0)
;
386}
387
388/* Initialize auto tcache (embedded in TSD). */
389bool_Bool
390tsd_tcache_data_initje_tsd_tcache_data_init(tsd_t *tsd) {
391 tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
392 assert(tcache_small_bin_get(tcache, 0)->avail == NULL)do { if (__builtin_expect(!!(config_debug && !(tcache_small_bin_get
(tcache, 0)->avail == ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/tcache.c"
, 392, "tcache_small_bin_get(tcache, 0)->avail == NULL"); abort
(); } } while (0)
;
393 size_t size = stack_nelms * sizeof(void *);
394 /* Avoid false cacheline sharing. */
395 size = sz_sa2u(size, CACHELINE64);
396
397 void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE64, true1,
398 NULL((void*)0), true1, arena_get(TSDN_NULL((tsdn_t *)0), 0, true1));
399 if (avail_array == NULL((void*)0)) {
400 return true1;
401 }
402
403 tcache_init(tsd, tcache, avail_array);
404 /*
405 * Initialization is a bit tricky here. After malloc init is done, all
406 * threads can rely on arena_choose and associate tcache accordingly.
407 * However, the thread that does actual malloc bootstrapping relies on
408 * functional tsd, and it can only rely on a0. In that case, we
409 * associate its tcache to a0 temporarily, and later on
410 * arena_choose_hard() will re-associate properly.
411 */
412 tcache->arena = NULL((void*)0);
413 arena_t *arena;
414 if (!malloc_initializedje_malloc_initialized()) {
415 /* If in initialization, assign to a0. */
416 arena = arena_get(tsd_tsdn(tsd), 0, false0);
417 tcache_arena_associateje_tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
418 } else {
419 arena = arena_choose(tsd, NULL((void*)0));
420 /* This may happen if thread.tcache.enabled is used. */
421 if (tcache->arena == NULL((void*)0)) {
422 tcache_arena_associateje_tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
423 }
424 }
425 assert(arena == tcache->arena)do { if (__builtin_expect(!!(config_debug && !(arena ==
tcache->arena)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 425, "arena == tcache->arena"); abort();
} } while (0)
;
426
427 return false0;
428}
429
430/* Created manual tcache for tcache.create mallctl. */
431tcache_t *
432tcache_create_explicitje_tcache_create_explicit(tsd_t *tsd) {
433 tcache_t *tcache;
434 size_t size, stack_offset;
435
436 size = sizeof(tcache_t);
437 /* Naturally align the pointer stacks. */
438 size = PTR_CEILING(size)(((size) + ((1U << 3) - 1)) & ~((1U << 3) - 1
))
;
439 stack_offset = size;
440 size += stack_nelms * sizeof(void *);
441 /* Avoid false cacheline sharing. */
442 size = sz_sa2u(size, CACHELINE64);
443
444 tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE64, true1, NULL((void*)0), true1,
445 arena_get(TSDN_NULL((tsdn_t *)0), 0, true1));
446 if (tcache == NULL((void*)0)) {
447 return NULL((void*)0);
448 }
449
450 tcache_init(tsd, tcache,
451 (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
452 tcache_arena_associateje_tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL((void*)0)));
453
454 return tcache;
455}
456
457static void
458tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
459 assert(tcache->arena != NULL)do { if (__builtin_expect(!!(config_debug && !(tcache
->arena != ((void*)0))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 459, "tcache->arena != NULL"); abort(); }
} while (0)
;
460
461 for (unsigned i = 0; i < NBINS39; i++) {
462 cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
463 tcache_bin_flush_smallje_tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
464
465 if (config_stats) {
466 assert(tbin->tstats.nrequests == 0)do { if (__builtin_expect(!!(config_debug && !(tbin->
tstats.nrequests == 0)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 466, "tbin->tstats.nrequests == 0"); abort
(); } } while (0)
;
467 }
468 }
469 for (unsigned i = NBINS39; i < nhbinsje_nhbins; i++) {
470 cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
471 tcache_bin_flush_largeje_tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
472
473 if (config_stats) {
474 assert(tbin->tstats.nrequests == 0)do { if (__builtin_expect(!!(config_debug && !(tbin->
tstats.nrequests == 0)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 474, "tbin->tstats.nrequests == 0"); abort
(); } } while (0)
;
475 }
476 }
477
478 if (config_prof && tcache->prof_accumbytes > 0 &&
479 arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
480 tcache->prof_accumbytes)) {
481 prof_idumpje_prof_idump(tsd_tsdn(tsd));
482 }
483}
484
485void
486tcache_flushje_tcache_flush(tsd_t *tsd) {
487 assert(tcache_available(tsd))do { if (__builtin_expect(!!(config_debug && !(tcache_available
(tsd))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 487, "tcache_available(tsd)"); abort(); } }
while (0)
;
488 tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
489}
490
491static void
492tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool_Bool tsd_tcache) {
493 tcache_flush_cache(tsd, tcache);
494 tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
495
496 if (tsd_tcache) {
497 /* Release the avail array for the TSD embedded auto tcache. */
498 void *avail_array =
499 (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
500 (uintptr_t)tcache_bin_infoje_tcache_bin_info[0].ncached_max * sizeof(void *));
501 idalloctm(tsd_tsdn(tsd), avail_array, NULL((void*)0), NULL((void*)0), true1, true1);
502 } else {
503 /* Release both the tcache struct and avail array. */
504 idalloctm(tsd_tsdn(tsd), tcache, NULL((void*)0), NULL((void*)0), true1, true1);
505 }
506}
507
508/* For auto tcache (embedded in TSD) only. */
509void
510tcache_cleanupje_tcache_cleanup(tsd_t *tsd) {
511 tcache_t *tcache = tsd_tcachep_get(tsd);
512 if (!tcache_available(tsd)) {
513 assert(tsd_tcache_enabled_get(tsd) == false)do { if (__builtin_expect(!!(config_debug && !(tsd_tcache_enabled_get
(tsd) == 0)), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 513, "tsd_tcache_enabled_get(tsd) == false"
); abort(); } } while (0)
;
514 if (config_debug) {
515 assert(tcache_small_bin_get(tcache, 0)->avail == NULL)do { if (__builtin_expect(!!(config_debug && !(tcache_small_bin_get
(tcache, 0)->avail == ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/tcache.c"
, 515, "tcache_small_bin_get(tcache, 0)->avail == NULL"); abort
(); } } while (0)
;
516 }
517 return;
518 }
519 assert(tsd_tcache_enabled_get(tsd))do { if (__builtin_expect(!!(config_debug && !(tsd_tcache_enabled_get
(tsd))), 0)) { je_malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/tcache.c", 519, "tsd_tcache_enabled_get(tsd)"); abort(
); } } while (0)
;
520 assert(tcache_small_bin_get(tcache, 0)->avail != NULL)do { if (__builtin_expect(!!(config_debug && !(tcache_small_bin_get
(tcache, 0)->avail != ((void*)0))), 0)) { je_malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/tcache.c"
, 520, "tcache_small_bin_get(tcache, 0)->avail != NULL"); abort
(); } } while (0)
;
521
522 tcache_destroy(tsd, tcache, true1);
523 if (config_debug) {
524 tcache_small_bin_get(tcache, 0)->avail = NULL((void*)0);
525 }
526}
527
528void
529tcache_stats_mergeje_tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
530 unsigned i;
531
532 cassert(config_stats)do { if (__builtin_expect(!!(!(config_stats)), 0)) { do { if (
config_debug) { je_malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/tcache.c", 532); abort(); } __builtin_unreachable(); }
while (0); } } while (0)
;
533
534 /* Merge and reset tcache stats. */
535 for (i = 0; i < NBINS39; i++) {
536 bin_t *bin = &arena->bins[i];
537 cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
538 malloc_mutex_lock(tsdn, &bin->lock);
539 bin->stats.nrequests += tbin->tstats.nrequests;
540 malloc_mutex_unlock(tsdn, &bin->lock);
541 tbin->tstats.nrequests = 0;
542 }
543
544 for (; i < nhbinsje_nhbins; i++) {
545 cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
546 arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
547 tbin->tstats.nrequests);
548 tbin->tstats.nrequests = 0;
549 }
550}
551
552static bool_Bool
553tcaches_create_prep(tsd_t *tsd) {
554 bool_Bool err;
555
556 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
557
558 if (tcachesje_tcaches == NULL((void*)0)) {
559 tcachesje_tcaches = base_allocje_base_alloc(tsd_tsdn(tsd), b0getje_b0get(), sizeof(tcache_t *)
560 * (MALLOCX_TCACHE_MAX((1 << 12) - 3)+1), CACHELINE64);
561 if (tcachesje_tcaches == NULL((void*)0)) {
562 err = true1;
563 goto label_return;
564 }
565 }
566
567 if (tcaches_avail == NULL((void*)0) && tcaches_past > MALLOCX_TCACHE_MAX((1 << 12) - 3)) {
568 err = true1;
569 goto label_return;
570 }
571
572 err = false0;
573label_return:
574 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
575 return err;
576}
577
578bool_Bool
579tcaches_createje_tcaches_create(tsd_t *tsd, unsigned *r_ind) {
580 witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
581
582 bool_Bool err;
583
584 if (tcaches_create_prep(tsd)) {
585 err = true1;
586 goto label_return;
587 }
588
589 tcache_t *tcache = tcache_create_explicitje_tcache_create_explicit(tsd);
590 if (tcache == NULL((void*)0)) {
591 err = true1;
592 goto label_return;
593 }
594
595 tcaches_t *elm;
596 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
597 if (tcaches_avail != NULL((void*)0)) {
598 elm = tcaches_avail;
599 tcaches_avail = tcaches_avail->next;
600 elm->tcache = tcache;
601 *r_ind = (unsigned)(elm - tcachesje_tcaches);
602 } else {
603 elm = &tcachesje_tcaches[tcaches_past];
604 elm->tcache = tcache;
605 *r_ind = tcaches_past;
606 tcaches_past++;
607 }
608 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
609
610 err = false0;
611label_return:
612 witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
613 return err;
614}
615
616static tcache_t *
617tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) {
618 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
619
620 if (elm->tcache == NULL((void*)0)) {
621 return NULL((void*)0);
622 }
623 tcache_t *tcache = elm->tcache;
624 elm->tcache = NULL((void*)0);
625 return tcache;
626}
627
628void
629tcaches_flushje_tcaches_flush(tsd_t *tsd, unsigned ind) {
630 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
631 tcache_t *tcache = tcaches_elm_remove(tsd, &tcachesje_tcaches[ind]);
632 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
633 if (tcache != NULL((void*)0)) {
634 tcache_destroy(tsd, tcache, false0);
635 }
636}
637
638void
639tcaches_destroyje_tcaches_destroy(tsd_t *tsd, unsigned ind) {
640 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
641 tcaches_t *elm = &tcachesje_tcaches[ind];
642 tcache_t *tcache = tcaches_elm_remove(tsd, elm);
643 elm->next = tcaches_avail;
644 tcaches_avail = elm;
645 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
646 if (tcache != NULL((void*)0)) {
647 tcache_destroy(tsd, tcache, false0);
648 }
649}
650
651bool_Bool
652tcache_bootje_tcache_boot(tsdn_t *tsdn) {
653 /* If necessary, clamp opt_lg_tcache_max. */
654 if (opt_lg_tcache_maxje_opt_lg_tcache_max < 0 || (ZU(1)((size_t)1) << opt_lg_tcache_maxje_opt_lg_tcache_max) <
655 SMALL_MAXCLASS((((size_t)1) << 13) + (((size_t)3) << 11))) {
656 tcache_maxclassje_tcache_maxclass = SMALL_MAXCLASS((((size_t)1) << 13) + (((size_t)3) << 11));
657 } else {
658 tcache_maxclassje_tcache_maxclass = (ZU(1)((size_t)1) << opt_lg_tcache_maxje_opt_lg_tcache_max);
659 }
660
661 if (malloc_mutex_initje_malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES2U,
662 malloc_mutex_rank_exclusive)) {
663 return true1;
664 }
665
666 nhbinsje_nhbins = sz_size2index(tcache_maxclassje_tcache_maxclass) + 1;
667
668 /* Initialize tcache_bin_info. */
669 tcache_bin_infoje_tcache_bin_info = (cache_bin_info_t *)base_allocje_base_alloc(tsdn, b0getje_b0get(), nhbinsje_nhbins
670 * sizeof(cache_bin_info_t), CACHELINE64);
671 if (tcache_bin_infoje_tcache_bin_info == NULL((void*)0)) {
672 return true1;
673 }
674 stack_nelms = 0;
675 unsigned i;
676 for (i = 0; i < NBINS39; i++) {
677 if ((bin_infosje_bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN20) {
678 tcache_bin_infoje_tcache_bin_info[i].ncached_max =
679 TCACHE_NSLOTS_SMALL_MIN20;
680 } else if ((bin_infosje_bin_infos[i].nregs << 1) <=
681 TCACHE_NSLOTS_SMALL_MAX200) {
682 tcache_bin_infoje_tcache_bin_info[i].ncached_max =
683 (bin_infosje_bin_infos[i].nregs << 1);
684 } else {
685 tcache_bin_infoje_tcache_bin_info[i].ncached_max =
686 TCACHE_NSLOTS_SMALL_MAX200;
687 }
688 stack_nelms += tcache_bin_infoje_tcache_bin_info[i].ncached_max;
689 }
690 for (; i < nhbinsje_nhbins; i++) {
691 tcache_bin_infoje_tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE20;
692 stack_nelms += tcache_bin_infoje_tcache_bin_info[i].ncached_max;
693 }
694
695 return false0;
696}
697
698void
699tcache_preforkje_tcache_prefork(tsdn_t *tsdn) {
700 if (!config_prof && opt_tcacheje_opt_tcache) {
701 malloc_mutex_preforkje_malloc_mutex_prefork(tsdn, &tcaches_mtx);
702 }
703}
704
705void
706tcache_postfork_parentje_tcache_postfork_parent(tsdn_t *tsdn) {
707 if (!config_prof && opt_tcacheje_opt_tcache) {
708 malloc_mutex_postfork_parentje_malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
709 }
710}
711
712void
713tcache_postfork_childje_tcache_postfork_child(tsdn_t *tsdn) {
714 if (!config_prof && opt_tcacheje_opt_tcache) {
715 malloc_mutex_postfork_childje_malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
716 }
717}