Bug Summary

File:deps/jemalloc/src/arena.c
Warning:line 989, column 4
Value stored to 'usize' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name arena.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _GNU_SOURCE -D _REENTRANT -I include -I include -D JEMALLOC_NO_PRIVATE_NAMESPACE -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -std=gnu99 -fdebug-compilation-dir /home/netto/Desktop/redis-6.2.1/deps/jemalloc -ferror-limit 19 -fmessage-length 0 -funroll-loops -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -o /tmp/scan-build-2021-03-14-133648-8817-1 -x c src/arena.c
1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/div.h"
7#include "jemalloc/internal/extent_dss.h"
8#include "jemalloc/internal/extent_mmap.h"
9#include "jemalloc/internal/mutex.h"
10#include "jemalloc/internal/rtree.h"
11#include "jemalloc/internal/size_classes.h"
12#include "jemalloc/internal/util.h"
13
14/******************************************************************************/
15/* Data. */
16
17/*
18 * Define names for both unininitialized and initialized phases, so that
19 * options and mallctl processing are straightforward.
20 */
21const char *percpu_arena_mode_names[] = {
22 "percpu",
23 "phycpu",
24 "disabled",
25 "percpu",
26 "phycpu"
27};
28percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULTpercpu_arena_disabled;
29
30ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT((ssize_t)10 * 1000);
31ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT((ssize_t)10 * 1000);
32
33static atomic_zd_t dirty_decay_ms_default;
34static atomic_zd_t muzzy_decay_ms_default;
35
36const uint64_t h_steps[SMOOTHSTEP_NSTEPS200] = {
37#define STEP(step, h, x, y) \
38 h,
39 SMOOTHSTEPSTEP( 1, 0x0000000000000014UL, 0.005, 0.000001240643750) STEP
( 2, 0x00000000000000a5UL, 0.010, 0.000009850600000) STEP( 3,
0x0000000000000229UL, 0.015, 0.000032995181250) STEP( 4, 0x0000000000000516UL
, 0.020, 0.000077619200000) STEP( 5, 0x00000000000009dcUL, 0.025
, 0.000150449218750) STEP( 6, 0x00000000000010e8UL, 0.030, 0.000257995800000
) STEP( 7, 0x0000000000001aa4UL, 0.035, 0.000406555756250) STEP
( 8, 0x0000000000002777UL, 0.040, 0.000602214400000) STEP( 9,
0x00000000000037c2UL, 0.045, 0.000850847793750) STEP( 10, 0x0000000000004be6UL
, 0.050, 0.001158125000000) STEP( 11, 0x000000000000643cUL, 0.055
, 0.001529510331250) STEP( 12, 0x000000000000811fUL, 0.060, 0.001970265600000
) STEP( 13, 0x000000000000a2e2UL, 0.065, 0.002485452368750) STEP
( 14, 0x000000000000c9d8UL, 0.070, 0.003079934200000) STEP( 15
, 0x000000000000f64fUL, 0.075, 0.003758378906250) STEP( 16, 0x0000000000012891UL
, 0.080, 0.004525260800000) STEP( 17, 0x00000000000160e7UL, 0.085
, 0.005384862943750) STEP( 18, 0x0000000000019f95UL, 0.090, 0.006341279400000
) STEP( 19, 0x000000000001e4dcUL, 0.095, 0.007398417481250) STEP
( 20, 0x00000000000230fcUL, 0.100, 0.008560000000000) STEP( 21
, 0x0000000000028430UL, 0.105, 0.009829567518750) STEP( 22, 0x000000000002deb0UL
, 0.110, 0.011210480600000) STEP( 23, 0x00000000000340b1UL, 0.115
, 0.012705922056250) STEP( 24, 0x000000000003aa67UL, 0.120, 0.014318899200000
) STEP( 25, 0x0000000000041c00UL, 0.125, 0.016052246093750) STEP
( 26, 0x00000000000495a8UL, 0.130, 0.017908625800000) STEP( 27
, 0x000000000005178bUL, 0.135, 0.019890532631250) STEP( 28, 0x000000000005a1cfUL
, 0.140, 0.022000294400000) STEP( 29, 0x0000000000063498UL, 0.145
, 0.024240074668750) STEP( 30, 0x000000000006d009UL, 0.150, 0.026611875000000
) STEP( 31, 0x000000000007743fUL, 0.155, 0.029117537206250) STEP
( 32, 0x0000000000082157UL, 0.160, 0.031758745600000) STEP( 33
, 0x000000000008d76bUL, 0.165, 0.034537029243750) STEP( 34, 0x0000000000099691UL
, 0.170, 0.037453764200000) STEP( 35, 0x00000000000a5edfUL, 0.175
, 0.040510175781250) STEP( 36, 0x00000000000b3067UL, 0.180, 0.043707340800000
) STEP( 37, 0x00000000000c0b38UL, 0.185, 0.047046189818750) STEP
( 38, 0x00000000000cef5eUL, 0.190, 0.050527509400000) STEP( 39
, 0x00000000000ddce6UL, 0.195, 0.054151944356250) STEP( 40, 0x00000000000ed3d8UL
, 0.200, 0.057920000000000) STEP( 41, 0x00000000000fd439UL, 0.205
, 0.061832044393750) STEP( 42, 0x000000000010de0eUL, 0.210, 0.065888310600000
) STEP( 43, 0x000000000011f158UL, 0.215, 0.070088898931250) STEP
( 44, 0x0000000000130e17UL, 0.220, 0.074433779200000) STEP( 45
, 0x0000000000143448UL, 0.225, 0.078922792968750) STEP( 46, 0x00000000001563e7UL
, 0.230, 0.083555655800000) STEP( 47, 0x0000000000169cecUL, 0.235
, 0.088331959506250) STEP( 48, 0x000000000017df4fUL, 0.240, 0.093251174400000
) STEP( 49, 0x0000000000192b04UL, 0.245, 0.098312651543750) STEP
( 50, 0x00000000001a8000UL, 0.250, 0.103515625000000) STEP( 51
, 0x00000000001bde32UL, 0.255, 0.108859214081250) STEP( 52, 0x00000000001d458bUL
, 0.260, 0.114342425600000) STEP( 53, 0x00000000001eb5f8UL, 0.265
, 0.119964156118750) STEP( 54, 0x0000000000202f65UL, 0.270, 0.125723194200000
) STEP( 55, 0x000000000021b1bbUL, 0.275, 0.131618222656250) STEP
( 56, 0x0000000000233ce3UL, 0.280, 0.137647820800000) STEP( 57
, 0x000000000024d0c3UL, 0.285, 0.143810466693750) STEP( 58, 0x0000000000266d40UL
, 0.290, 0.150104539400000) STEP( 59, 0x000000000028123dUL, 0.295
, 0.156528321231250) STEP( 60, 0x000000000029bf9cUL, 0.300, 0.163080000000000
) STEP( 61, 0x00000000002b753dUL, 0.305, 0.169757671268750) STEP
( 62, 0x00000000002d32feUL, 0.310, 0.176559340600000) STEP( 63
, 0x00000000002ef8bcUL, 0.315, 0.183482925806250) STEP( 64, 0x000000000030c654UL
, 0.320, 0.190526259200000) STEP( 65, 0x0000000000329b9fUL, 0.325
, 0.197687089843750) STEP( 66, 0x0000000000347875UL, 0.330, 0.204963085800000
) STEP( 67, 0x0000000000365cb0UL, 0.335, 0.212351836381250) STEP
( 68, 0x0000000000384825UL, 0.340, 0.219850854400000) STEP( 69
, 0x00000000003a3aa8UL, 0.345, 0.227457578418750) STEP( 70, 0x00000000003c340fUL
, 0.350, 0.235169375000000) STEP( 71, 0x00000000003e342bUL, 0.355
, 0.242983540956250) STEP( 72, 0x0000000000403aceUL, 0.360, 0.250897305600000
) STEP( 73, 0x00000000004247c8UL, 0.365, 0.258907832993750) STEP
( 74, 0x0000000000445ae9UL, 0.370, 0.267012224200000) STEP( 75
, 0x0000000000467400UL, 0.375, 0.275207519531250) STEP( 76, 0x00000000004892d8UL
, 0.380, 0.283490700800000) STEP( 77, 0x00000000004ab740UL, 0.385
, 0.291858693568750) STEP( 78, 0x00000000004ce102UL, 0.390, 0.300308369400000
) STEP( 79, 0x00000000004f0fe9UL, 0.395, 0.308836548106250) STEP
( 80, 0x00000000005143bfUL, 0.400, 0.317440000000000) STEP( 81
, 0x0000000000537c4dUL, 0.405, 0.326115448143750) STEP( 82, 0x000000000055b95bUL
, 0.410, 0.334859570600000) STEP( 83, 0x000000000057fab1UL, 0.415
, 0.343669002681250) STEP( 84, 0x00000000005a4015UL, 0.420, 0.352540339200000
) STEP( 85, 0x00000000005c894eUL, 0.425, 0.361470136718750) STEP
( 86, 0x00000000005ed622UL, 0.430, 0.370454915800000) STEP( 87
, 0x0000000000612655UL, 0.435, 0.379491163256250) STEP( 88, 0x00000000006379acUL
, 0.440, 0.388575334400000) STEP( 89, 0x000000000065cfebUL, 0.445
, 0.397703855293750) STEP( 90, 0x00000000006828d6UL, 0.450, 0.406873125000000
) STEP( 91, 0x00000000006a842fUL, 0.455, 0.416079517831250) STEP
( 92, 0x00000000006ce1bbUL, 0.460, 0.425319385600000) STEP( 93
, 0x00000000006f413aUL, 0.465, 0.434589059868750) STEP( 94, 0x000000000071a270UL
, 0.470, 0.443884854200000) STEP( 95, 0x000000000074051dUL, 0.475
, 0.453203066406250) STEP( 96, 0x0000000000766905UL, 0.480, 0.462539980800000
) STEP( 97, 0x000000000078cde7UL, 0.485, 0.471891870443750) STEP
( 98, 0x00000000007b3387UL, 0.490, 0.481254999400000) STEP( 99
, 0x00000000007d99a4UL, 0.495, 0.490625624981250) STEP( 100, 0x0000000000800000UL
, 0.500, 0.500000000000000) STEP( 101, 0x000000000082665bUL, 0.505
, 0.509374375018750) STEP( 102, 0x000000000084cc78UL, 0.510, 0.518745000600000
) STEP( 103, 0x0000000000873218UL, 0.515, 0.528108129556250) STEP
( 104, 0x00000000008996faUL, 0.520, 0.537460019200000) STEP( 105
, 0x00000000008bfae2UL, 0.525, 0.546796933593750) STEP( 106, 0x00000000008e5d8fUL
, 0.530, 0.556115145800000) STEP( 107, 0x000000000090bec5UL, 0.535
, 0.565410940131250) STEP( 108, 0x0000000000931e44UL, 0.540, 0.574680614400000
) STEP( 109, 0x0000000000957bd0UL, 0.545, 0.583920482168750) STEP
( 110, 0x000000000097d729UL, 0.550, 0.593126875000000) STEP( 111
, 0x00000000009a3014UL, 0.555, 0.602296144706250) STEP( 112, 0x00000000009c8653UL
, 0.560, 0.611424665600000) STEP( 113, 0x00000000009ed9aaUL, 0.565
, 0.620508836743750) STEP( 114, 0x0000000000a129ddUL, 0.570, 0.629545084200000
) STEP( 115, 0x0000000000a376b1UL, 0.575, 0.638529863281250) STEP
( 116, 0x0000000000a5bfeaUL, 0.580, 0.647459660800000) STEP( 117
, 0x0000000000a8054eUL, 0.585, 0.656330997318750) STEP( 118, 0x0000000000aa46a4UL
, 0.590, 0.665140429400000) STEP( 119, 0x0000000000ac83b2UL, 0.595
, 0.673884551856250) STEP( 120, 0x0000000000aebc40UL, 0.600, 0.682560000000000
) STEP( 121, 0x0000000000b0f016UL, 0.605, 0.691163451893750) STEP
( 122, 0x0000000000b31efdUL, 0.610, 0.699691630600000) STEP( 123
, 0x0000000000b548bfUL, 0.615, 0.708141306431250) STEP( 124, 0x0000000000b76d27UL
, 0.620, 0.716509299200000) STEP( 125, 0x0000000000b98c00UL, 0.625
, 0.724792480468750) STEP( 126, 0x0000000000bba516UL, 0.630, 0.732987775800000
) STEP( 127, 0x0000000000bdb837UL, 0.635, 0.741092167006250) STEP
( 128, 0x0000000000bfc531UL, 0.640, 0.749102694400000) STEP( 129
, 0x0000000000c1cbd4UL, 0.645, 0.757016459043750) STEP( 130, 0x0000000000c3cbf0UL
, 0.650, 0.764830625000000) STEP( 131, 0x0000000000c5c557UL, 0.655
, 0.772542421581250) STEP( 132, 0x0000000000c7b7daUL, 0.660, 0.780149145600000
) STEP( 133, 0x0000000000c9a34fUL, 0.665, 0.787648163618750) STEP
( 134, 0x0000000000cb878aUL, 0.670, 0.795036914200000) STEP( 135
, 0x0000000000cd6460UL, 0.675, 0.802312910156250) STEP( 136, 0x0000000000cf39abUL
, 0.680, 0.809473740800000) STEP( 137, 0x0000000000d10743UL, 0.685
, 0.816517074193750) STEP( 138, 0x0000000000d2cd01UL, 0.690, 0.823440659400000
) STEP( 139, 0x0000000000d48ac2UL, 0.695, 0.830242328731250) STEP
( 140, 0x0000000000d64063UL, 0.700, 0.836920000000000) STEP( 141
, 0x0000000000d7edc2UL, 0.705, 0.843471678768750) STEP( 142, 0x0000000000d992bfUL
, 0.710, 0.849895460600000) STEP( 143, 0x0000000000db2f3cUL, 0.715
, 0.856189533306250) STEP( 144, 0x0000000000dcc31cUL, 0.720, 0.862352179200000
) STEP( 145, 0x0000000000de4e44UL, 0.725, 0.868381777343750) STEP
( 146, 0x0000000000dfd09aUL, 0.730, 0.874276805800000) STEP( 147
, 0x0000000000e14a07UL, 0.735, 0.880035843881250) STEP( 148, 0x0000000000e2ba74UL
, 0.740, 0.885657574400000) STEP( 149, 0x0000000000e421cdUL, 0.745
, 0.891140785918750) STEP( 150, 0x0000000000e58000UL, 0.750, 0.896484375000000
) STEP( 151, 0x0000000000e6d4fbUL, 0.755, 0.901687348456250) STEP
( 152, 0x0000000000e820b0UL, 0.760, 0.906748825600000) STEP( 153
, 0x0000000000e96313UL, 0.765, 0.911668040493750) STEP( 154, 0x0000000000ea9c18UL
, 0.770, 0.916444344200000) STEP( 155, 0x0000000000ebcbb7UL, 0.775
, 0.921077207031250) STEP( 156, 0x0000000000ecf1e8UL, 0.780, 0.925566220800000
) STEP( 157, 0x0000000000ee0ea7UL, 0.785, 0.929911101068750) STEP
( 158, 0x0000000000ef21f1UL, 0.790, 0.934111689400000) STEP( 159
, 0x0000000000f02bc6UL, 0.795, 0.938167955606250) STEP( 160, 0x0000000000f12c27UL
, 0.800, 0.942080000000000) STEP( 161, 0x0000000000f22319UL, 0.805
, 0.945848055643750) STEP( 162, 0x0000000000f310a1UL, 0.810, 0.949472490600000
) STEP( 163, 0x0000000000f3f4c7UL, 0.815, 0.952953810181250) STEP
( 164, 0x0000000000f4cf98UL, 0.820, 0.956292659200000) STEP( 165
, 0x0000000000f5a120UL, 0.825, 0.959489824218750) STEP( 166, 0x0000000000f6696eUL
, 0.830, 0.962546235800000) STEP( 167, 0x0000000000f72894UL, 0.835
, 0.965462970756250) STEP( 168, 0x0000000000f7dea8UL, 0.840, 0.968241254400000
) STEP( 169, 0x0000000000f88bc0UL, 0.845, 0.970882462793750) STEP
( 170, 0x0000000000f92ff6UL, 0.850, 0.973388125000000) STEP( 171
, 0x0000000000f9cb67UL, 0.855, 0.975759925331250) STEP( 172, 0x0000000000fa5e30UL
, 0.860, 0.977999705600000) STEP( 173, 0x0000000000fae874UL, 0.865
, 0.980109467368750) STEP( 174, 0x0000000000fb6a57UL, 0.870, 0.982091374200000
) STEP( 175, 0x0000000000fbe400UL, 0.875, 0.983947753906250) STEP
( 176, 0x0000000000fc5598UL, 0.880, 0.985681100800000) STEP( 177
, 0x0000000000fcbf4eUL, 0.885, 0.987294077943750) STEP( 178, 0x0000000000fd214fUL
, 0.890, 0.988789519400000) STEP( 179, 0x0000000000fd7bcfUL, 0.895
, 0.990170432481250) STEP( 180, 0x0000000000fdcf03UL, 0.900, 0.991440000000000
) STEP( 181, 0x0000000000fe1b23UL, 0.905, 0.992601582518750) STEP
( 182, 0x0000000000fe606aUL, 0.910, 0.993658720600000) STEP( 183
, 0x0000000000fe9f18UL, 0.915, 0.994615137056250) STEP( 184, 0x0000000000fed76eUL
, 0.920, 0.995474739200000) STEP( 185, 0x0000000000ff09b0UL, 0.925
, 0.996241621093750) STEP( 186, 0x0000000000ff3627UL, 0.930, 0.996920065800000
) STEP( 187, 0x0000000000ff5d1dUL, 0.935, 0.997514547631250) STEP
( 188, 0x0000000000ff7ee0UL, 0.940, 0.998029734400000) STEP( 189
, 0x0000000000ff9bc3UL, 0.945, 0.998470489668750) STEP( 190, 0x0000000000ffb419UL
, 0.950, 0.998841875000000) STEP( 191, 0x0000000000ffc83dUL, 0.955
, 0.999149152206250) STEP( 192, 0x0000000000ffd888UL, 0.960, 0.999397785600000
) STEP( 193, 0x0000000000ffe55bUL, 0.965, 0.999593444243750) STEP
( 194, 0x0000000000ffef17UL, 0.970, 0.999742004200000) STEP( 195
, 0x0000000000fff623UL, 0.975, 0.999849550781250) STEP( 196, 0x0000000000fffae9UL
, 0.980, 0.999922380800000) STEP( 197, 0x0000000000fffdd6UL, 0.985
, 0.999967004818750) STEP( 198, 0x0000000000ffff5aUL, 0.990, 0.999990149400000
) STEP( 199, 0x0000000000ffffebUL, 0.995, 0.999998759356250) STEP
( 200, 0x0000000001000000UL, 1.000, 1.000000000000000)
40#undef STEP
41};
42
43static div_info_t arena_binind_div_info[NBINS39];
44
45/******************************************************************************/
46/*
47 * Function prototypes for static functions that are referenced prior to
48 * definition.
49 */
50
51static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
52 arena_decay_t *decay, extents_t *extents, bool_Bool all, size_t npages_limit,
53 size_t npages_decay_max, bool_Bool is_background_thread);
54static bool_Bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
55 bool_Bool is_background_thread, bool_Bool all);
56static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
57 bin_t *bin);
58static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
59 bin_t *bin);
60
61/******************************************************************************/
62
63void
64arena_basic_stats_merge(UNUSED__attribute__((unused)) tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
65 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
66 size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
67 *nthreads += arena_nthreads_get(arena, false0);
68 *dss = dss_prec_names[arena_dss_prec_get(arena)];
69 *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
70 *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
71 *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXEDatomic_memory_order_relaxed);
72 *ndirty += extents_npages_get(&arena->extents_dirty);
73 *nmuzzy += extents_npages_get(&arena->extents_muzzy);
74}
75
76void
77arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
78 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
79 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
80 bin_stats_t *bstats, arena_stats_large_t *lstats) {
81 cassert(config_stats)do { if (__builtin_expect(!!(!(config_stats)), 0)) { do { if (
config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 81); abort(); } __builtin_unreachable(); } while
(0); } } while (0)
;
82
83 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
84 muzzy_decay_ms, nactive, ndirty, nmuzzy);
85
86 size_t base_allocated, base_resident, base_mapped, metadata_thp;
87 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
88 &base_mapped, &metadata_thp);
89
90 arena_stats_lock(tsdn, &arena->stats);
91
92 arena_stats_accum_zu(&astats->mapped, base_mapped
93 + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
94 arena_stats_accum_zu(&astats->retained,
95 extents_npages_get(&arena->extents_retained) << LG_PAGE12);
96
97 arena_stats_accum_u64(&astats->decay_dirty.npurge,
98 arena_stats_read_u64(tsdn, &arena->stats,
99 &arena->stats.decay_dirty.npurge));
100 arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
101 arena_stats_read_u64(tsdn, &arena->stats,
102 &arena->stats.decay_dirty.nmadvise));
103 arena_stats_accum_u64(&astats->decay_dirty.purged,
104 arena_stats_read_u64(tsdn, &arena->stats,
105 &arena->stats.decay_dirty.purged));
106
107 arena_stats_accum_u64(&astats->decay_muzzy.npurge,
108 arena_stats_read_u64(tsdn, &arena->stats,
109 &arena->stats.decay_muzzy.npurge));
110 arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
111 arena_stats_read_u64(tsdn, &arena->stats,
112 &arena->stats.decay_muzzy.nmadvise));
113 arena_stats_accum_u64(&astats->decay_muzzy.purged,
114 arena_stats_read_u64(tsdn, &arena->stats,
115 &arena->stats.decay_muzzy.purged));
116
117 arena_stats_accum_zu(&astats->base, base_allocated);
118 arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
119 arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
120 arena_stats_accum_zu(&astats->resident, base_resident +
121 (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXEDatomic_memory_order_relaxed) +
122 extents_npages_get(&arena->extents_dirty) +
123 extents_npages_get(&arena->extents_muzzy)) << LG_PAGE12)));
124
125 for (szind_t i = 0; i < NSIZES235 - NBINS39; i++) {
126 uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
127 &arena->stats.lstats[i].nmalloc);
128 arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
129 arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
130
131 uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
132 &arena->stats.lstats[i].ndalloc);
133 arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
134 arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
135
136 uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
137 &arena->stats.lstats[i].nrequests);
138 arena_stats_accum_u64(&lstats[i].nrequests,
139 nmalloc + nrequests);
140 arena_stats_accum_u64(&astats->nrequests_large,
141 nmalloc + nrequests);
142
143 assert(nmalloc >= ndalloc)do { if (__builtin_expect(!!(config_debug && !(nmalloc
>= ndalloc)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 143, "nmalloc >= ndalloc"); abort(); } } while
(0)
;
144 assert(nmalloc - ndalloc <= SIZE_T_MAX)do { if (__builtin_expect(!!(config_debug && !(nmalloc
- ndalloc <= (18446744073709551615UL))), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 144, "nmalloc - ndalloc <= SIZE_T_MAX"); abort(); } } while
(0)
;
145 size_t curlextents = (size_t)(nmalloc - ndalloc);
146 lstats[i].curlextents += curlextents;
147 arena_stats_accum_zu(&astats->allocated_large,
148 curlextents * sz_index2size(NBINS39 + i));
149 }
150
151 arena_stats_unlock(tsdn, &arena->stats);
152
153 /* tcache_bytes counts currently cached bytes. */
154 atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
155 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
156 cache_bin_array_descriptor_t *descriptor;
157 ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link)for (((descriptor)) = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first)); ((descriptor)) != ((void*)0); ((descriptor
)) = ((((descriptor))->link.qre_next != (((&arena->
cache_bin_array_descriptor_ql)->qlh_first))) ? ((descriptor
))->link.qre_next : ((void*)0)))
{
158 szind_t i = 0;
159 for (; i < NBINS39; i++) {
160 cache_bin_t *tbin = &descriptor->bins_small[i];
161 arena_stats_accum_zu(&astats->tcache_bytes,
162 tbin->ncached * sz_index2size(i));
163 }
164 for (; i < nhbins; i++) {
165 cache_bin_t *tbin = &descriptor->bins_large[i];
166 arena_stats_accum_zu(&astats->tcache_bytes,
167 tbin->ncached * sz_index2size(i));
168 }
169 }
170 malloc_mutex_prof_read(tsdn,
171 &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
172 &arena->tcache_ql_mtx);
173 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
174
175#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
176 malloc_mutex_lock(tsdn, &arena->mtx); \
177 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
178 &arena->mtx); \
179 malloc_mutex_unlock(tsdn, &arena->mtx);
180
181 /* Gather per arena mutex profiling data. */
182 READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
183 READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
184 arena_prof_mutex_extent_avail)
185 READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
186 arena_prof_mutex_extents_dirty)
187 READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
188 arena_prof_mutex_extents_muzzy)
189 READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
190 arena_prof_mutex_extents_retained)
191 READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
192 arena_prof_mutex_decay_dirty)
193 READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
194 arena_prof_mutex_decay_muzzy)
195 READ_ARENA_MUTEX_PROF_DATA(base->mtx,
196 arena_prof_mutex_base)
197#undef READ_ARENA_MUTEX_PROF_DATA
198
199 nstime_copy(&astats->uptime, &arena->create_time);
200 nstime_update(&astats->uptime);
201 nstime_subtract(&astats->uptime, &arena->create_time);
202
203 for (szind_t i = 0; i < NBINS39; i++) {
204 bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
205 }
206}
207
208void
209arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
210 extent_hooks_t **r_extent_hooks, extent_t *extent) {
211 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
212 WITNESS_RANK_CORE11U, 0);
213
214 extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
215 extent);
216 if (arena_dirty_decay_ms_get(arena) == 0) {
217 arena_decay_dirty(tsdn, arena, false0, true1);
218 } else {
219 arena_background_thread_inactivity_check(tsdn, arena, false0);
220 }
221}
222
223static void *
224arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
225 void *ret;
226 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
227 size_t regind;
228
229 assert(extent_nfree_get(slab) > 0)do { if (__builtin_expect(!!(config_debug && !(extent_nfree_get
(slab) > 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 229, "extent_nfree_get(slab) > 0"); abort
(); } } while (0)
;
230 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info))do { if (__builtin_expect(!!(config_debug && !(!bitmap_full
(slab_data->bitmap, &bin_info->bitmap_info))), 0)) {
malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 230, "!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)"
); abort(); } } while (0)
;
231
232 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
233 ret = (void *)((uintptr_t)extent_addr_get(slab) +
234 (uintptr_t)(bin_info->reg_size * regind));
235 extent_nfree_dec(slab);
236 return ret;
237}
238
239#ifndef JEMALLOC_JET
240static
241#endif
242size_t
243arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
244 size_t diff, regind;
245
246 /* Freeing a pointer outside the slab can cause assertion failure. */
247 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab))do { if (__builtin_expect(!!(config_debug && !((uintptr_t
)ptr >= (uintptr_t)extent_addr_get(slab))), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 247, "(uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)"
); abort(); } } while (0)
;
248 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab))do { if (__builtin_expect(!!(config_debug && !((uintptr_t
)ptr < (uintptr_t)extent_past_get(slab))), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 248, "(uintptr_t)ptr < (uintptr_t)extent_past_get(slab)"
); abort(); } } while (0)
;
249 /* Freeing an interior pointer can cause assertion failure. */
250 assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %do { if (__builtin_expect(!!(config_debug && !(((uintptr_t
)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos
[binind].reg_size == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 251, "((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos[binind].reg_size == 0"
); abort(); } } while (0)
251 (uintptr_t)bin_infos[binind].reg_size == 0)do { if (__builtin_expect(!!(config_debug && !(((uintptr_t
)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos
[binind].reg_size == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 251, "((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos[binind].reg_size == 0"
); abort(); } } while (0)
;
252
253 diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
254
255 /* Avoid doing division with a variable divisor. */
256 regind = div_compute(&arena_binind_div_info[binind], diff);
257
258 assert(regind < bin_infos[binind].nregs)do { if (__builtin_expect(!!(config_debug && !(regind
< bin_infos[binind].nregs)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 258, "regind < bin_infos[binind].nregs");
abort(); } } while (0)
;
259
260 return regind;
261}
262
263static void
264arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
265 szind_t binind = extent_szind_get(slab);
266 const bin_info_t *bin_info = &bin_infos[binind];
267 size_t regind = arena_slab_regind(slab, binind, ptr);
268
269 assert(extent_nfree_get(slab) < bin_info->nregs)do { if (__builtin_expect(!!(config_debug && !(extent_nfree_get
(slab) < bin_info->nregs)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 269, "extent_nfree_get(slab) < bin_info->nregs"
); abort(); } } while (0)
;
270 /* Freeing an unallocated pointer can cause assertion failure. */
271 assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind))do { if (__builtin_expect(!!(config_debug && !(bitmap_get
(slab_data->bitmap, &bin_info->bitmap_info, regind)
)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 271, "bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)"
); abort(); } } while (0)
;
272
273 bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
274 extent_nfree_inc(slab);
275}
276
277static void
278arena_nactive_add(arena_t *arena, size_t add_pages) {
279 atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXEDatomic_memory_order_relaxed);
280}
281
282static void
283arena_nactive_sub(arena_t *arena, size_t sub_pages) {
284 assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages)do { if (__builtin_expect(!!(config_debug && !(atomic_load_zu
(&arena->nactive, atomic_memory_order_relaxed) >= sub_pages
)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 284, "atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages"
); abort(); } } while (0)
;
285 atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXEDatomic_memory_order_relaxed);
286}
287
288static void
289arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
290 szind_t index, hindex;
291
292 cassert(config_stats)do { if (__builtin_expect(!!(!(config_stats)), 0)) { do { if (
config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 292); abort(); } __builtin_unreachable(); } while
(0); } } while (0)
;
293
294 if (usize < LARGE_MINCLASS(((size_t)1) << 14)) {
295 usize = LARGE_MINCLASS(((size_t)1) << 14);
296 }
297 index = sz_size2index(usize);
298 hindex = (index >= NBINS39) ? index - NBINS39 : 0;
299
300 arena_stats_add_u64(tsdn, &arena->stats,
301 &arena->stats.lstats[hindex].nmalloc, 1);
302}
303
304static void
305arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
306 szind_t index, hindex;
307
308 cassert(config_stats)do { if (__builtin_expect(!!(!(config_stats)), 0)) { do { if (
config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 308); abort(); } __builtin_unreachable(); } while
(0); } } while (0)
;
309
310 if (usize < LARGE_MINCLASS(((size_t)1) << 14)) {
311 usize = LARGE_MINCLASS(((size_t)1) << 14);
312 }
313 index = sz_size2index(usize);
314 hindex = (index >= NBINS39) ? index - NBINS39 : 0;
315
316 arena_stats_add_u64(tsdn, &arena->stats,
317 &arena->stats.lstats[hindex].ndalloc, 1);
318}
319
320static void
321arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
322 size_t usize) {
323 arena_large_dalloc_stats_update(tsdn, arena, oldusize);
324 arena_large_malloc_stats_update(tsdn, arena, usize);
325}
326
327extent_t *
328arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
329 size_t alignment, bool_Bool *zero) {
330 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER((void*)0);
331
332 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
333 WITNESS_RANK_CORE11U, 0);
334
335 szind_t szind = sz_size2index(usize);
336 size_t mapped_add;
337 bool_Bool commit = true1;
338 extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
339 &arena->extents_dirty, NULL((void*)0), usize, sz_large_pad, alignment, false0,
340 szind, zero, &commit);
341 if (extent == NULL((void*)0)) {
342 extent = extents_alloc(tsdn, arena, &extent_hooks,
343 &arena->extents_muzzy, NULL((void*)0), usize, sz_large_pad, alignment,
344 false0, szind, zero, &commit);
345 }
346 size_t size = usize + sz_large_pad;
347 if (extent == NULL((void*)0)) {
348 extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL((void*)0),
349 usize, sz_large_pad, alignment, false0, szind, zero,
350 &commit);
351 if (config_stats) {
352 /*
353 * extent may be NULL on OOM, but in that case
354 * mapped_add isn't used below, so there's no need to
355 * conditionlly set it to 0 here.
356 */
357 mapped_add = size;
358 }
359 } else if (config_stats) {
360 mapped_add = 0;
361 }
362
363 if (extent != NULL((void*)0)) {
364 if (config_stats) {
365 arena_stats_lock(tsdn, &arena->stats);
366 arena_large_malloc_stats_update(tsdn, arena, usize);
367 if (mapped_add != 0) {
368 arena_stats_add_zu(tsdn, &arena->stats,
369 &arena->stats.mapped, mapped_add);
370 }
371 arena_stats_unlock(tsdn, &arena->stats);
372 }
373 arena_nactive_add(arena, size >> LG_PAGE12);
374 }
375
376 return extent;
377}
378
379void
380arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
381 if (config_stats) {
382 arena_stats_lock(tsdn, &arena->stats);
383 arena_large_dalloc_stats_update(tsdn, arena,
384 extent_usize_get(extent));
385 arena_stats_unlock(tsdn, &arena->stats);
386 }
387 arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE12);
388}
389
390void
391arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
392 size_t oldusize) {
393 size_t usize = extent_usize_get(extent);
394 size_t udiff = oldusize - usize;
395
396 if (config_stats) {
397 arena_stats_lock(tsdn, &arena->stats);
398 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
399 arena_stats_unlock(tsdn, &arena->stats);
400 }
401 arena_nactive_sub(arena, udiff >> LG_PAGE12);
402}
403
404void
405arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
406 size_t oldusize) {
407 size_t usize = extent_usize_get(extent);
408 size_t udiff = usize - oldusize;
409
410 if (config_stats) {
411 arena_stats_lock(tsdn, &arena->stats);
412 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
413 arena_stats_unlock(tsdn, &arena->stats);
414 }
415 arena_nactive_add(arena, udiff >> LG_PAGE12);
416}
417
418static ssize_t
419arena_decay_ms_read(arena_decay_t *decay) {
420 return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXEDatomic_memory_order_relaxed);
421}
422
423static void
424arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
425 atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXEDatomic_memory_order_relaxed);
426}
427
428static void
429arena_decay_deadline_init(arena_decay_t *decay) {
430 /*
431 * Generate a new deadline that is uniformly random within the next
432 * epoch after the current one.
433 */
434 nstime_copy(&decay->deadline, &decay->epoch);
435 nstime_add(&decay->deadline, &decay->interval);
436 if (arena_decay_ms_read(decay) > 0) {
437 nstime_t jitter;
438
439 nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
440 nstime_ns(&decay->interval)));
441 nstime_add(&decay->deadline, &jitter);
442 }
443}
444
445static bool_Bool
446arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
447 return (nstime_compare(&decay->deadline, time) <= 0);
448}
449
450static size_t
451arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
452 uint64_t sum;
453 size_t npages_limit_backlog;
454 unsigned i;
455
456 /*
457 * For each element of decay_backlog, multiply by the corresponding
458 * fixed-point smoothstep decay factor. Sum the products, then divide
459 * to round down to the nearest whole number of pages.
460 */
461 sum = 0;
462 for (i = 0; i < SMOOTHSTEP_NSTEPS200; i++) {
463 sum += decay->backlog[i] * h_steps[i];
464 }
465 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP24);
466
467 return npages_limit_backlog;
468}
469
470static void
471arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
472 size_t npages_delta = (current_npages > decay->nunpurged) ?
473 current_npages - decay->nunpurged : 0;
474 decay->backlog[SMOOTHSTEP_NSTEPS200-1] = npages_delta;
475
476 if (config_debug) {
477 if (current_npages > decay->ceil_npages) {
478 decay->ceil_npages = current_npages;
479 }
480 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
481 assert(decay->ceil_npages >= npages_limit)do { if (__builtin_expect(!!(config_debug && !(decay->
ceil_npages >= npages_limit)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 481, "decay->ceil_npages >= npages_limit"
); abort(); } } while (0)
;
482 if (decay->ceil_npages > npages_limit) {
483 decay->ceil_npages = npages_limit;
484 }
485 }
486}
487
488static void
489arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
490 size_t current_npages) {
491 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS200) {
492 memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS200-1) *
493 sizeof(size_t));
494 } else {
495 size_t nadvance_z = (size_t)nadvance_u64;
496
497 assert((uint64_t)nadvance_z == nadvance_u64)do { if (__builtin_expect(!!(config_debug && !((uint64_t
)nadvance_z == nadvance_u64)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 497, "(uint64_t)nadvance_z == nadvance_u64")
; abort(); } } while (0)
;
498
499 memmove(decay->backlog, &decay->backlog[nadvance_z],
500 (SMOOTHSTEP_NSTEPS200 - nadvance_z) * sizeof(size_t));
501 if (nadvance_z > 1) {
502 memset(&decay->backlog[SMOOTHSTEP_NSTEPS200 -
503 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
504 }
505 }
506
507 arena_decay_backlog_update_last(decay, current_npages);
508}
509
510static void
511arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
512 extents_t *extents, size_t current_npages, size_t npages_limit,
513 bool_Bool is_background_thread) {
514 if (current_npages > npages_limit) {
515 arena_decay_to_limit(tsdn, arena, decay, extents, false0,
516 npages_limit, current_npages - npages_limit,
517 is_background_thread);
518 }
519}
520
521static void
522arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
523 size_t current_npages) {
524 assert(arena_decay_deadline_reached(decay, time))do { if (__builtin_expect(!!(config_debug && !(arena_decay_deadline_reached
(decay, time))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 524, "arena_decay_deadline_reached(decay, time)"
); abort(); } } while (0)
;
525
526 nstime_t delta;
527 nstime_copy(&delta, time);
528 nstime_subtract(&delta, &decay->epoch);
529
530 uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
531 assert(nadvance_u64 > 0)do { if (__builtin_expect(!!(config_debug && !(nadvance_u64
> 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 531, "nadvance_u64 > 0"); abort(); } } while
(0)
;
532
533 /* Add nadvance_u64 decay intervals to epoch. */
534 nstime_copy(&delta, &decay->interval);
535 nstime_imultiply(&delta, nadvance_u64);
536 nstime_add(&decay->epoch, &delta);
537
538 /* Set a new deadline. */
539 arena_decay_deadline_init(decay);
540
541 /* Update the backlog. */
542 arena_decay_backlog_update(decay, nadvance_u64, current_npages);
543}
544
545static void
546arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
547 extents_t *extents, const nstime_t *time, bool_Bool is_background_thread) {
548 size_t current_npages = extents_npages_get(extents);
549 arena_decay_epoch_advance_helper(decay, time, current_npages);
550
551 size_t npages_limit = arena_decay_backlog_npages_limit(decay);
552 /* We may unlock decay->mtx when try_purge(). Finish logging first. */
553 decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
554 current_npages;
555
556 if (!background_thread_enabled() || is_background_thread) {
557 arena_decay_try_purge(tsdn, arena, decay, extents,
558 current_npages, npages_limit, is_background_thread);
559 }
560}
561
562static void
563arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
564 arena_decay_ms_write(decay, decay_ms);
565 if (decay_ms > 0) {
566 nstime_init(&decay->interval, (uint64_t)decay_ms *
567 KQU(1000000)((uint64_t)1000000ULL));
568 nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS200);
569 }
570
571 nstime_init(&decay->epoch, 0);
572 nstime_update(&decay->epoch);
573 decay->jitter_state = (uint64_t)(uintptr_t)decay;
574 arena_decay_deadline_init(decay);
575 decay->nunpurged = 0;
576 memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS200 * sizeof(size_t));
577}
578
579static bool_Bool
580arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
581 arena_stats_decay_t *stats) {
582 if (config_debug) {
583 for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
584 assert(((char *)decay)[i] == 0)do { if (__builtin_expect(!!(config_debug && !(((char
*)decay)[i] == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 584, "((char *)decay)[i] == 0"); abort(); } }
while (0)
;
585 }
586 decay->ceil_npages = 0;
587 }
588 if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY11U,
589 malloc_mutex_rank_exclusive)) {
590 return true1;
591 }
592 decay->purging = false0;
593 arena_decay_reinit(decay, decay_ms);
594 /* Memory is zeroed, so there is no need to clear stats. */
595 if (config_stats) {
596 decay->stats = stats;
597 }
598 return false0;
599}
600
601static bool_Bool
602arena_decay_ms_valid(ssize_t decay_ms) {
603 if (decay_ms < -1) {
604 return false0;
605 }
606 if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX((uint64_t)18446744072ULL) *
607 KQU(1000)((uint64_t)1000ULL)) {
608 return true1;
609 }
610 return false0;
611}
612
613static bool_Bool
614arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
615 extents_t *extents, bool_Bool is_background_thread) {
616 malloc_mutex_assert_owner(tsdn, &decay->mtx);
617
618 /* Purge all or nothing if the option is disabled. */
619 ssize_t decay_ms = arena_decay_ms_read(decay);
620 if (decay_ms <= 0) {
621 if (decay_ms == 0) {
622 arena_decay_to_limit(tsdn, arena, decay, extents, false0,
623 0, extents_npages_get(extents),
624 is_background_thread);
625 }
626 return false0;
627 }
628
629 nstime_t time;
630 nstime_init(&time, 0);
631 nstime_update(&time);
632 if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)__builtin_expect(!!(!nstime_monotonic() && nstime_compare
(&decay->epoch, &time) > 0), 0)
633 > 0)__builtin_expect(!!(!nstime_monotonic() && nstime_compare
(&decay->epoch, &time) > 0), 0)
) {
634 /*
635 * Time went backwards. Move the epoch back in time and
636 * generate a new deadline, with the expectation that time
637 * typically flows forward for long enough periods of time that
638 * epochs complete. Unfortunately, this strategy is susceptible
639 * to clock jitter triggering premature epoch advances, but
640 * clock jitter estimation and compensation isn't feasible here
641 * because calls into this code are event-driven.
642 */
643 nstime_copy(&decay->epoch, &time);
644 arena_decay_deadline_init(decay);
645 } else {
646 /* Verify that time does not go backwards. */
647 assert(nstime_compare(&decay->epoch, &time) <= 0)do { if (__builtin_expect(!!(config_debug && !(nstime_compare
(&decay->epoch, &time) <= 0)), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 647, "nstime_compare(&decay->epoch, &time) <= 0"
); abort(); } } while (0)
;
648 }
649
650 /*
651 * If the deadline has been reached, advance to the current epoch and
652 * purge to the new limit if necessary. Note that dirty pages created
653 * during the current epoch are not subject to purge until a future
654 * epoch, so as a result purging only happens during epoch advances, or
655 * being triggered by background threads (scheduled event).
656 */
657 bool_Bool advance_epoch = arena_decay_deadline_reached(decay, &time);
658 if (advance_epoch) {
659 arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
660 is_background_thread);
661 } else if (is_background_thread) {
662 arena_decay_try_purge(tsdn, arena, decay, extents,
663 extents_npages_get(extents),
664 arena_decay_backlog_npages_limit(decay),
665 is_background_thread);
666 }
667
668 return advance_epoch;
669}
670
671static ssize_t
672arena_decay_ms_get(arena_decay_t *decay) {
673 return arena_decay_ms_read(decay);
674}
675
676ssize_t
677arena_dirty_decay_ms_get(arena_t *arena) {
678 return arena_decay_ms_get(&arena->decay_dirty);
679}
680
681ssize_t
682arena_muzzy_decay_ms_get(arena_t *arena) {
683 return arena_decay_ms_get(&arena->decay_muzzy);
684}
685
686static bool_Bool
687arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
688 extents_t *extents, ssize_t decay_ms) {
689 if (!arena_decay_ms_valid(decay_ms)) {
690 return true1;
691 }
692
693 malloc_mutex_lock(tsdn, &decay->mtx);
694 /*
695 * Restart decay backlog from scratch, which may cause many dirty pages
696 * to be immediately purged. It would conceptually be possible to map
697 * the old backlog onto the new backlog, but there is no justification
698 * for such complexity since decay_ms changes are intended to be
699 * infrequent, either between the {-1, 0, >0} states, or a one-time
700 * arbitrary change during initial arena configuration.
701 */
702 arena_decay_reinit(decay, decay_ms);
703 arena_maybe_decay(tsdn, arena, decay, extents, false0);
704 malloc_mutex_unlock(tsdn, &decay->mtx);
705
706 return false0;
707}
708
709bool_Bool
710arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
711 ssize_t decay_ms) {
712 return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
713 &arena->extents_dirty, decay_ms);
714}
715
716bool_Bool
717arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
718 ssize_t decay_ms) {
719 return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
720 &arena->extents_muzzy, decay_ms);
721}
722
723static size_t
724arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
725 extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
726 size_t npages_decay_max, extent_list_t *decay_extents) {
727 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
728 WITNESS_RANK_CORE11U, 0);
729
730 /* Stash extents according to npages_limit. */
731 size_t nstashed = 0;
732 extent_t *extent;
733 while (nstashed < npages_decay_max &&
734 (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
735 npages_limit)) != NULL((void*)0)) {
736 extent_list_append(decay_extents, extent);
737 nstashed += extent_size_get(extent) >> LG_PAGE12;
738 }
739 return nstashed;
740}
741
742static size_t
743arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
744 extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
745 bool_Bool all, extent_list_t *decay_extents, bool_Bool is_background_thread) {
746 UNUSED__attribute__((unused)) size_t nmadvise, nunmapped;
747 size_t npurged;
748
749 if (config_stats) {
750 nmadvise = 0;
751 nunmapped = 0;
752 }
753 npurged = 0;
754
755 ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
756 for (extent_t *extent = extent_list_first(decay_extents); extent !=
757 NULL((void*)0); extent = extent_list_first(decay_extents)) {
758 if (config_stats) {
759 nmadvise++;
760 }
761 size_t npages = extent_size_get(extent) >> LG_PAGE12;
762 npurged += npages;
763 extent_list_remove(decay_extents, extent);
764 switch (extents_state_get(extents)) {
765 case extent_state_active:
766 not_reached()do { if (config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 766); abort(); } __builtin_unreachable(); } while
(0)
;
767 case extent_state_dirty:
768 if (!all && muzzy_decay_ms != 0 &&
769 !extent_purge_lazy_wrapper(tsdn, arena,
770 r_extent_hooks, extent, 0,
771 extent_size_get(extent))) {
772 extents_dalloc(tsdn, arena, r_extent_hooks,
773 &arena->extents_muzzy, extent);
774 arena_background_thread_inactivity_check(tsdn,
775 arena, is_background_thread);
776 break;
777 }
778 /* Fall through. */
779 case extent_state_muzzy:
780 extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
781 extent);
782 if (config_stats) {
783 nunmapped += npages;
784 }
785 break;
786 case extent_state_retained:
787 default:
788 not_reached()do { if (config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 788); abort(); } __builtin_unreachable(); } while
(0)
;
789 }
790 }
791
792 if (config_stats) {
793 arena_stats_lock(tsdn, &arena->stats);
794 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
795 1);
796 arena_stats_add_u64(tsdn, &arena->stats,
797 &decay->stats->nmadvise, nmadvise);
798 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
799 npurged);
800 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
801 nunmapped << LG_PAGE12);
802 arena_stats_unlock(tsdn, &arena->stats);
803 }
804
805 return npurged;
806}
807
808/*
809 * npages_limit: Decay at most npages_decay_max pages without violating the
810 * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
811 * bound on number of pages in order to prevent unbounded growth (namely in
812 * stashed), otherwise unbounded new pages could be added to extents during the
813 * current decay run, so that the purging thread never finishes.
814 */
815static void
816arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
817 extents_t *extents, bool_Bool all, size_t npages_limit, size_t npages_decay_max,
818 bool_Bool is_background_thread) {
819 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
820 WITNESS_RANK_CORE11U, 1);
821 malloc_mutex_assert_owner(tsdn, &decay->mtx);
822
823 if (decay->purging) {
824 return;
825 }
826 decay->purging = true1;
827 malloc_mutex_unlock(tsdn, &decay->mtx);
828
829 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
830
831 extent_list_t decay_extents;
832 extent_list_init(&decay_extents);
833
834 size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
835 npages_limit, npages_decay_max, &decay_extents);
836 if (npurge != 0) {
837 UNUSED__attribute__((unused)) size_t npurged = arena_decay_stashed(tsdn, arena,
838 &extent_hooks, decay, extents, all, &decay_extents,
839 is_background_thread);
840 assert(npurged == npurge)do { if (__builtin_expect(!!(config_debug && !(npurged
== npurge)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 840, "npurged == npurge"); abort(); } } while
(0)
;
841 }
842
843 malloc_mutex_lock(tsdn, &decay->mtx);
844 decay->purging = false0;
845}
846
847static bool_Bool
848arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
849 extents_t *extents, bool_Bool is_background_thread, bool_Bool all) {
850 if (all) {
851 malloc_mutex_lock(tsdn, &decay->mtx);
852 arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
853 extents_npages_get(extents), is_background_thread);
854 malloc_mutex_unlock(tsdn, &decay->mtx);
855
856 return false0;
857 }
858
859 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
860 /* No need to wait if another thread is in progress. */
861 return true1;
862 }
863
864 bool_Bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
865 is_background_thread);
866 UNUSED__attribute__((unused)) size_t npages_new;
867 if (epoch_advanced) {
868 /* Backlog is updated on epoch advance. */
869 npages_new = decay->backlog[SMOOTHSTEP_NSTEPS200-1];
870 }
871 malloc_mutex_unlock(tsdn, &decay->mtx);
872
873 if (have_background_thread && background_thread_enabled() &&
874 epoch_advanced && !is_background_thread) {
875 background_thread_interval_check(tsdn, arena, decay,
876 npages_new);
877 }
878
879 return false0;
880}
881
882static bool_Bool
883arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool_Bool is_background_thread,
884 bool_Bool all) {
885 return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
886 &arena->extents_dirty, is_background_thread, all);
887}
888
889static bool_Bool
890arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool_Bool is_background_thread,
891 bool_Bool all) {
892 return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
893 &arena->extents_muzzy, is_background_thread, all);
894}
895
896void
897arena_decay(tsdn_t *tsdn, arena_t *arena, bool_Bool is_background_thread, bool_Bool all) {
898 if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
899 return;
900 }
901 arena_decay_muzzy(tsdn, arena, is_background_thread, all);
902}
903
904static void
905arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
906 arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE12);
907
908 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER((void*)0);
909 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
910}
911
912static void
913arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
914 assert(extent_nfree_get(slab) > 0)do { if (__builtin_expect(!!(config_debug && !(extent_nfree_get
(slab) > 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 914, "extent_nfree_get(slab) > 0"); abort
(); } } while (0)
;
915 extent_heap_insert(&bin->slabs_nonfull, slab);
916}
917
918static void
919arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
920 extent_heap_remove(&bin->slabs_nonfull, slab);
921}
922
923static extent_t *
924arena_bin_slabs_nonfull_tryget(bin_t *bin) {
925 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
926 if (slab == NULL((void*)0)) {
927 return NULL((void*)0);
928 }
929 if (config_stats) {
930 bin->stats.reslabs++;
931 }
932 return slab;
933}
934
935static void
936arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
937 assert(extent_nfree_get(slab) == 0)do { if (__builtin_expect(!!(config_debug && !(extent_nfree_get
(slab) == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 937, "extent_nfree_get(slab) == 0"); abort()
; } } while (0)
;
938 /*
939 * Tracking extents is required by arena_reset, which is not allowed
940 * for auto arenas. Bypass this step to avoid touching the extent
941 * linkage (often results in cache misses) for auto arenas.
942 */
943 if (arena_is_auto(arena)) {
944 return;
945 }
946 extent_list_append(&bin->slabs_full, slab);
947}
948
949static void
950arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
951 if (arena_is_auto(arena)) {
952 return;
953 }
954 extent_list_remove(&bin->slabs_full, slab);
955}
956
957void
958arena_reset(tsd_t *tsd, arena_t *arena) {
959 /*
960 * Locking in this function is unintuitive. The caller guarantees that
961 * no concurrent operations are happening in this arena, but there are
962 * still reasons that some locking is necessary:
963 *
964 * - Some of the functions in the transitive closure of calls assume
965 * appropriate locks are held, and in some cases these locks are
966 * temporarily dropped to avoid lock order reversal or deadlock due to
967 * reentry.
968 * - mallctl("epoch", ...) may concurrently refresh stats. While
969 * strictly speaking this is a "concurrent operation", disallowing
970 * stats refreshes would impose an inconvenient burden.
971 */
972
973 /* Large allocations. */
974 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
975
976 for (extent_t *extent = extent_list_first(&arena->large); extent !=
977 NULL((void*)0); extent = extent_list_first(&arena->large)) {
978 void *ptr = extent_base_get(extent);
979 size_t usize;
980
981 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
982 alloc_ctx_t alloc_ctx;
983 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
984 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
985 (uintptr_t)ptr, true1, &alloc_ctx.szind, &alloc_ctx.slab);
986 assert(alloc_ctx.szind != NSIZES)do { if (__builtin_expect(!!(config_debug && !(alloc_ctx
.szind != 235)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 986, "alloc_ctx.szind != NSIZES"); abort(); }
} while (0)
;
987
988 if (config_stats || (config_prof && opt_prof)) {
989 usize = sz_index2size(alloc_ctx.szind);
Value stored to 'usize' is never read
990 assert(usize == isalloc(tsd_tsdn(tsd), ptr))do { if (__builtin_expect(!!(config_debug && !(usize ==
isalloc(tsd_tsdn(tsd), ptr))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 990, "usize == isalloc(tsd_tsdn(tsd), ptr)")
; abort(); } } while (0)
;
991 }
992 /* Remove large allocation from prof sample set. */
993 if (config_prof && opt_prof) {
994 prof_free(tsd, ptr, usize, &alloc_ctx);
995 }
996 large_dalloc(tsd_tsdn(tsd), extent);
997 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
998 }
999 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1000
1001 /* Bins. */
1002 for (unsigned i = 0; i < NBINS39; i++) {
1003 extent_t *slab;
1004 bin_t *bin = &arena->bins[i];
1005 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1006 if (bin->slabcur != NULL((void*)0)) {
1007 slab = bin->slabcur;
1008 bin->slabcur = NULL((void*)0);
1009 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1010 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1011 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1012 }
1013 while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
1014 NULL((void*)0)) {
1015 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1016 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1017 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1018 }
1019 for (slab = extent_list_first(&bin->slabs_full); slab != NULL((void*)0);
1020 slab = extent_list_first(&bin->slabs_full)) {
1021 arena_bin_slabs_full_remove(arena, bin, slab);
1022 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1023 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1024 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1025 }
1026 if (config_stats) {
1027 bin->stats.curregs = 0;
1028 bin->stats.curslabs = 0;
1029 }
1030 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1031 }
1032
1033 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1034}
1035
1036static void
1037arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1038 /*
1039 * Iterate over the retained extents and destroy them. This gives the
1040 * extent allocator underlying the extent hooks an opportunity to unmap
1041 * all retained memory without having to keep its own metadata
1042 * structures. In practice, virtual memory for dss-allocated extents is
1043 * leaked here, so best practice is to avoid dss for arenas to be
1044 * destroyed, or provide custom extent hooks that track retained
1045 * dss-based extents for later reuse.
1046 */
1047 extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1048 extent_t *extent;
1049 while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1050 &arena->extents_retained, 0)) != NULL((void*)0)) {
1051 extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1052 }
1053}
1054
1055void
1056arena_destroy(tsd_t *tsd, arena_t *arena) {
1057 assert(base_ind_get(arena->base) >= narenas_auto)do { if (__builtin_expect(!!(config_debug && !(base_ind_get
(arena->base) >= narenas_auto)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1057, "base_ind_get(arena->base) >= narenas_auto"
); abort(); } } while (0)
;
1058 assert(arena_nthreads_get(arena, false) == 0)do { if (__builtin_expect(!!(config_debug && !(arena_nthreads_get
(arena, 0) == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1058, "arena_nthreads_get(arena, false) == 0"
); abort(); } } while (0)
;
1059 assert(arena_nthreads_get(arena, true) == 0)do { if (__builtin_expect(!!(config_debug && !(arena_nthreads_get
(arena, 1) == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1059, "arena_nthreads_get(arena, true) == 0"
); abort(); } } while (0)
;
1060
1061 /*
1062 * No allocations have occurred since arena_reset() was called.
1063 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1064 * extents, so only retained extents may remain.
1065 */
1066 assert(extents_npages_get(&arena->extents_dirty) == 0)do { if (__builtin_expect(!!(config_debug && !(extents_npages_get
(&arena->extents_dirty) == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1066, "extents_npages_get(&arena->extents_dirty) == 0"
); abort(); } } while (0)
;
1067 assert(extents_npages_get(&arena->extents_muzzy) == 0)do { if (__builtin_expect(!!(config_debug && !(extents_npages_get
(&arena->extents_muzzy) == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1067, "extents_npages_get(&arena->extents_muzzy) == 0"
); abort(); } } while (0)
;
1068
1069 /* Deallocate retained memory. */
1070 arena_destroy_retained(tsd_tsdn(tsd), arena);
1071
1072 /*
1073 * Remove the arena pointer from the arenas array. We rely on the fact
1074 * that there is no way for the application to get a dirty read from the
1075 * arenas array unless there is an inherent race in the application
1076 * involving access of an arena being concurrently destroyed. The
1077 * application must synchronize knowledge of the arena's validity, so as
1078 * long as we use an atomic write to update the arenas array, the
1079 * application will get a clean read any time after it synchronizes
1080 * knowledge that the arena is no longer valid.
1081 */
1082 arena_set(base_ind_get(arena->base), NULL((void*)0));
1083
1084 /*
1085 * Destroy the base allocator, which manages all metadata ever mapped by
1086 * this arena.
1087 */
1088 base_delete(tsd_tsdn(tsd), arena->base);
1089}
1090
1091static extent_t *
1092arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1093 extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1094 szind_t szind) {
1095 extent_t *slab;
1096 bool_Bool zero, commit;
1097
1098 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1099 WITNESS_RANK_CORE11U, 0);
1100
1101 zero = false0;
1102 commit = true1;
1103 slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL((void*)0),
1104 bin_info->slab_size, 0, PAGE((size_t)(1U << 12)), true1, szind, &zero, &commit);
1105
1106 if (config_stats && slab != NULL((void*)0)) {
1107 arena_stats_mapped_add(tsdn, &arena->stats,
1108 bin_info->slab_size);
1109 }
1110
1111 return slab;
1112}
1113
1114static extent_t *
1115arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1116 const bin_info_t *bin_info) {
1117 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1118 WITNESS_RANK_CORE11U, 0);
1119
1120 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER((void*)0);
1121 szind_t szind = sz_size2index(bin_info->reg_size);
1122 bool_Bool zero = false0;
1123 bool_Bool commit = true1;
1124 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1125 &arena->extents_dirty, NULL((void*)0), bin_info->slab_size, 0, PAGE((size_t)(1U << 12)), true1,
1126 binind, &zero, &commit);
1127 if (slab == NULL((void*)0)) {
1128 slab = extents_alloc(tsdn, arena, &extent_hooks,
1129 &arena->extents_muzzy, NULL((void*)0), bin_info->slab_size, 0, PAGE((size_t)(1U << 12)),
1130 true1, binind, &zero, &commit);
1131 }
1132 if (slab == NULL((void*)0)) {
1133 slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1134 bin_info, szind);
1135 if (slab == NULL((void*)0)) {
1136 return NULL((void*)0);
1137 }
1138 }
1139 assert(extent_slab_get(slab))do { if (__builtin_expect(!!(config_debug && !(extent_slab_get
(slab))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1139, "extent_slab_get(slab)"); abort(); } }
while (0)
;
1140
1141 /* Initialize slab internals. */
1142 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1143 extent_nfree_set(slab, bin_info->nregs);
1144 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false0);
1145
1146 arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE12);
1147
1148 return slab;
1149}
1150
1151static extent_t *
1152arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1153 szind_t binind) {
1154 extent_t *slab;
1155 const bin_info_t *bin_info;
1156
1157 /* Look for a usable slab. */
1158 slab = arena_bin_slabs_nonfull_tryget(bin);
1159 if (slab != NULL((void*)0)) {
1160 return slab;
1161 }
1162 /* No existing slabs have any space available. */
1163
1164 bin_info = &bin_infos[binind];
1165
1166 /* Allocate a new slab. */
1167 malloc_mutex_unlock(tsdn, &bin->lock);
1168 /******************************/
1169 slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
1170 /********************************/
1171 malloc_mutex_lock(tsdn, &bin->lock);
1172 if (slab != NULL((void*)0)) {
1173 if (config_stats) {
1174 bin->stats.nslabs++;
1175 bin->stats.curslabs++;
1176 }
1177 return slab;
1178 }
1179
1180 /*
1181 * arena_slab_alloc() failed, but another thread may have made
1182 * sufficient memory available while this one dropped bin->lock above,
1183 * so search one more time.
1184 */
1185 slab = arena_bin_slabs_nonfull_tryget(bin);
1186 if (slab != NULL((void*)0)) {
1187 return slab;
1188 }
1189
1190 return NULL((void*)0);
1191}
1192
1193/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1194static void *
1195arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1196 szind_t binind) {
1197 const bin_info_t *bin_info;
1198 extent_t *slab;
1199
1200 bin_info = &bin_infos[binind];
1201 if (!arena_is_auto(arena) && bin->slabcur != NULL((void*)0)) {
1202 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1203 bin->slabcur = NULL((void*)0);
1204 }
1205 slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
1206 if (bin->slabcur != NULL((void*)0)) {
1207 /*
1208 * Another thread updated slabcur while this one ran without the
1209 * bin lock in arena_bin_nonfull_slab_get().
1210 */
1211 if (extent_nfree_get(bin->slabcur) > 0) {
1212 void *ret = arena_slab_reg_alloc(bin->slabcur,
1213 bin_info);
1214 if (slab != NULL((void*)0)) {
1215 /*
1216 * arena_slab_alloc() may have allocated slab,
1217 * or it may have been pulled from
1218 * slabs_nonfull. Therefore it is unsafe to
1219 * make any assumptions about how slab has
1220 * previously been used, and
1221 * arena_bin_lower_slab() must be called, as if
1222 * a region were just deallocated from the slab.
1223 */
1224 if (extent_nfree_get(slab) == bin_info->nregs) {
1225 arena_dalloc_bin_slab(tsdn, arena, slab,
1226 bin);
1227 } else {
1228 arena_bin_lower_slab(tsdn, arena, slab,
1229 bin);
1230 }
1231 }
1232 return ret;
1233 }
1234
1235 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1236 bin->slabcur = NULL((void*)0);
1237 }
1238
1239 if (slab == NULL((void*)0)) {
1240 return NULL((void*)0);
1241 }
1242 bin->slabcur = slab;
1243
1244 assert(extent_nfree_get(bin->slabcur) > 0)do { if (__builtin_expect(!!(config_debug && !(extent_nfree_get
(bin->slabcur) > 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1244, "extent_nfree_get(bin->slabcur) > 0"
); abort(); } } while (0)
;
1245
1246 return arena_slab_reg_alloc(slab, bin_info);
1247}
1248
1249void
1250arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1251 cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1252 unsigned i, nfill;
1253 bin_t *bin;
1254
1255 assert(tbin->ncached == 0)do { if (__builtin_expect(!!(config_debug && !(tbin->
ncached == 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1255, "tbin->ncached == 0"); abort(); } }
while (0)
;
1256
1257 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1258 prof_idump(tsdn);
1259 }
1260 bin = &arena->bins[binind];
1261 malloc_mutex_lock(tsdn, &bin->lock);
1262 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1263 tcache->lg_fill_div[binind]); i < nfill; i++) {
1264 extent_t *slab;
1265 void *ptr;
1266 if ((slab = bin->slabcur) != NULL((void*)0) && extent_nfree_get(slab) >
1267 0) {
1268 ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1269 } else {
1270 ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1271 }
1272 if (ptr == NULL((void*)0)) {
1273 /*
1274 * OOM. tbin->avail isn't yet filled down to its first
1275 * element, so the successful allocations (if any) must
1276 * be moved just before tbin->avail before bailing out.
1277 */
1278 if (i > 0) {
1279 memmove(tbin->avail - i, tbin->avail - nfill,
1280 i * sizeof(void *));
1281 }
1282 break;
1283 }
1284 if (config_fill && unlikely(opt_junk_alloc)__builtin_expect(!!(opt_junk_alloc), 0)) {
1285 arena_alloc_junk_small(ptr, &bin_infos[binind], true1);
1286 }
1287 /* Insert such that low regions get used first. */
1288 *(tbin->avail - nfill + i) = ptr;
1289 }
1290 if (config_stats) {
1291 bin->stats.nmalloc += i;
1292 bin->stats.nrequests += tbin->tstats.nrequests;
1293 bin->stats.curregs += i;
1294 bin->stats.nfills++;
1295 tbin->tstats.nrequests = 0;
1296 }
1297 malloc_mutex_unlock(tsdn, &bin->lock);
1298 tbin->ncached = i;
1299 arena_decay_tick(tsdn, arena);
1300}
1301
1302void
1303arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool_Bool zero) {
1304 if (!zero) {
1305 memset(ptr, JEMALLOC_ALLOC_JUNK((uint8_t)0xa5), bin_info->reg_size);
1306 }
1307}
1308
1309static void
1310arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1311 memset(ptr, JEMALLOC_FREE_JUNK((uint8_t)0x5a), bin_info->reg_size);
1312}
1313arena_dalloc_junk_small_t *JET_MUTABLEconst arena_dalloc_junk_small =
1314 arena_dalloc_junk_small_impl;
1315
1316static void *
1317arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool_Bool zero) {
1318 void *ret;
1319 bin_t *bin;
1320 size_t usize;
1321 extent_t *slab;
1322
1323 assert(binind < NBINS)do { if (__builtin_expect(!!(config_debug && !(binind
< 39)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1323, "binind < NBINS"); abort(); } } while
(0)
;
1324 bin = &arena->bins[binind];
1325 usize = sz_index2size(binind);
1326
1327 malloc_mutex_lock(tsdn, &bin->lock);
1328 if ((slab = bin->slabcur) != NULL((void*)0) && extent_nfree_get(slab) > 0) {
1329 ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1330 } else {
1331 ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1332 }
1333
1334 if (ret == NULL((void*)0)) {
1335 malloc_mutex_unlock(tsdn, &bin->lock);
1336 return NULL((void*)0);
1337 }
1338
1339 if (config_stats) {
1340 bin->stats.nmalloc++;
1341 bin->stats.nrequests++;
1342 bin->stats.curregs++;
1343 }
1344 malloc_mutex_unlock(tsdn, &bin->lock);
1345 if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1346 prof_idump(tsdn);
1347 }
1348
1349 if (!zero) {
1350 if (config_fill) {
1351 if (unlikely(opt_junk_alloc)__builtin_expect(!!(opt_junk_alloc), 0)) {
1352 arena_alloc_junk_small(ret,
1353 &bin_infos[binind], false0);
1354 } else if (unlikely(opt_zero)__builtin_expect(!!(opt_zero), 0)) {
1355 memset(ret, 0, usize);
1356 }
1357 }
1358 } else {
1359 if (config_fill && unlikely(opt_junk_alloc)__builtin_expect(!!(opt_junk_alloc), 0)) {
1360 arena_alloc_junk_small(ret, &bin_infos[binind],
1361 true1);
1362 }
1363 memset(ret, 0, usize);
1364 }
1365
1366 arena_decay_tick(tsdn, arena);
1367 return ret;
1368}
1369
1370void *
1371arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1372 bool_Bool zero) {
1373 assert(!tsdn_null(tsdn) || arena != NULL)do { if (__builtin_expect(!!(config_debug && !(!tsdn_null
(tsdn) || arena != ((void*)0))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1373, "!tsdn_null(tsdn) || arena != NULL"); abort
(); } } while (0)
;
1374
1375 if (likely(!tsdn_null(tsdn))__builtin_expect(!!(!tsdn_null(tsdn)), 1)) {
1376 arena = arena_choose(tsdn_tsd(tsdn), arena);
1377 }
1378 if (unlikely(arena == NULL)__builtin_expect(!!(arena == ((void*)0)), 0)) {
1379 return NULL((void*)0);
1380 }
1381
1382 if (likely(size <= SMALL_MAXCLASS)__builtin_expect(!!(size <= ((((size_t)1) << 13) + (
((size_t)3) << 11))), 1)
) {
1383 return arena_malloc_small(tsdn, arena, ind, zero);
1384 }
1385 return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1386}
1387
1388void *
1389arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1390 bool_Bool zero, tcache_t *tcache) {
1391 void *ret;
1392
1393 if (usize <= SMALL_MAXCLASS((((size_t)1) << 13) + (((size_t)3) << 11)) && (alignment < PAGE((size_t)(1U << 12)) || (alignment == PAGE((size_t)(1U << 12))
1394 && (usize & PAGE_MASK((size_t)(((size_t)(1U << 12)) - 1))) == 0))) {
1395 /* Small; alignment doesn't require special slab placement. */
1396 ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1397 zero, tcache, true1);
1398 } else {
1399 if (likely(alignment <= CACHELINE)__builtin_expect(!!(alignment <= 64), 1)) {
1400 ret = large_malloc(tsdn, arena, usize, zero);
1401 } else {
1402 ret = large_palloc(tsdn, arena, usize, alignment, zero);
1403 }
1404 }
1405 return ret;
1406}
1407
1408void
1409arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1410 cassert(config_prof)do { if (__builtin_expect(!!(!(config_prof)), 0)) { do { if (
config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 1410); abort(); } __builtin_unreachable(); }
while (0); } } while (0)
;
1411 assert(ptr != NULL)do { if (__builtin_expect(!!(config_debug && !(ptr !=
((void*)0))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1411, "ptr != NULL"); abort(); } } while (0)
;
1412 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS)do { if (__builtin_expect(!!(config_debug && !(isalloc
(tsdn, ptr) == (((size_t)1) << 14))), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 1412, "isalloc(tsdn, ptr) == LARGE_MINCLASS"); abort(); } }
while (0)
;
1413 assert(usize <= SMALL_MAXCLASS)do { if (__builtin_expect(!!(config_debug && !(usize <=
((((size_t)1) << 13) + (((size_t)3) << 11)))), 0
)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1413, "usize <= SMALL_MAXCLASS"); abort()
; } } while (0)
;
1414
1415 rtree_ctx_t rtree_ctx_fallback;
1416 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1417
1418 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1419 (uintptr_t)ptr, true1);
1420 arena_t *arena = extent_arena_get(extent);
1421
1422 szind_t szind = sz_size2index(usize);
1423 extent_szind_set(extent, szind);
1424 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1425 szind, false0);
1426
1427 prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1428
1429 assert(isalloc(tsdn, ptr) == usize)do { if (__builtin_expect(!!(config_debug && !(isalloc
(tsdn, ptr) == usize)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1429, "isalloc(tsdn, ptr) == usize"); abort(
); } } while (0)
;
1430}
1431
1432static size_t
1433arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1434 cassert(config_prof)do { if (__builtin_expect(!!(!(config_prof)), 0)) { do { if (
config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 1434); abort(); } __builtin_unreachable(); }
while (0); } } while (0)
;
1435 assert(ptr != NULL)do { if (__builtin_expect(!!(config_debug && !(ptr !=
((void*)0))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1435, "ptr != NULL"); abort(); } } while (0)
;
1436
1437 extent_szind_set(extent, NBINS39);
1438 rtree_ctx_t rtree_ctx_fallback;
1439 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1440 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1441 NBINS39, false0);
1442
1443 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS)do { if (__builtin_expect(!!(config_debug && !(isalloc
(tsdn, ptr) == (((size_t)1) << 14))), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 1443, "isalloc(tsdn, ptr) == LARGE_MINCLASS"); abort(); } }
while (0)
;
1444
1445 return LARGE_MINCLASS(((size_t)1) << 14);
1446}
1447
1448void
1449arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1450 bool_Bool slow_path) {
1451 cassert(config_prof)do { if (__builtin_expect(!!(!(config_prof)), 0)) { do { if (
config_debug) { malloc_printf( "<jemalloc>: %s:%d: Unreachable code reached\n"
, "src/arena.c", 1451); abort(); } __builtin_unreachable(); }
while (0); } } while (0)
;
1452 assert(opt_prof)do { if (__builtin_expect(!!(config_debug && !(opt_prof
)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1452, "opt_prof"); abort(); } } while (0)
;
1453
1454 extent_t *extent = iealloc(tsdn, ptr);
1455 size_t usize = arena_prof_demote(tsdn, extent, ptr);
1456 if (usize <= tcache_maxclass) {
1457 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1458 sz_size2index(usize), slow_path);
1459 } else {
1460 large_dalloc(tsdn, extent);
1461 }
1462}
1463
1464static void
1465arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1466 /* Dissociate slab from bin. */
1467 if (slab == bin->slabcur) {
1468 bin->slabcur = NULL((void*)0);
1469 } else {
1470 szind_t binind = extent_szind_get(slab);
1471 const bin_info_t *bin_info = &bin_infos[binind];
1472
1473 /*
1474 * The following block's conditional is necessary because if the
1475 * slab only contains one region, then it never gets inserted
1476 * into the non-full slabs heap.
1477 */
1478 if (bin_info->nregs == 1) {
1479 arena_bin_slabs_full_remove(arena, bin, slab);
1480 } else {
1481 arena_bin_slabs_nonfull_remove(bin, slab);
1482 }
1483 }
1484}
1485
1486static void
1487arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1488 bin_t *bin) {
1489 assert(slab != bin->slabcur)do { if (__builtin_expect(!!(config_debug && !(slab !=
bin->slabcur)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1489, "slab != bin->slabcur"); abort(); }
} while (0)
;
1490
1491 malloc_mutex_unlock(tsdn, &bin->lock);
1492 /******************************/
1493 arena_slab_dalloc(tsdn, arena, slab);
1494 /****************************/
1495 malloc_mutex_lock(tsdn, &bin->lock);
1496 if (config_stats) {
1497 bin->stats.curslabs--;
1498 }
1499}
1500
1501static void
1502arena_bin_lower_slab(UNUSED__attribute__((unused)) tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1503 bin_t *bin) {
1504 assert(extent_nfree_get(slab) > 0)do { if (__builtin_expect(!!(config_debug && !(extent_nfree_get
(slab) > 0)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1504, "extent_nfree_get(slab) > 0"); abort
(); } } while (0)
;
1505
1506 /*
1507 * Make sure that if bin->slabcur is non-NULL, it refers to the
1508 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
1509 * than proactively keeping it pointing at the oldest/lowest non-full
1510 * slab.
1511 */
1512 if (bin->slabcur != NULL((void*)0) && extent_snad_comp(bin->slabcur, slab) > 0) {
1513 /* Switch slabcur. */
1514 if (extent_nfree_get(bin->slabcur) > 0) {
1515 arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1516 } else {
1517 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1518 }
1519 bin->slabcur = slab;
1520 if (config_stats) {
1521 bin->stats.reslabs++;
1522 }
1523 } else {
1524 arena_bin_slabs_nonfull_insert(bin, slab);
1525 }
1526}
1527
1528static void
1529arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1530 void *ptr, bool_Bool junked) {
1531 arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1532 szind_t binind = extent_szind_get(slab);
1533 bin_t *bin = &arena->bins[binind];
1534 const bin_info_t *bin_info = &bin_infos[binind];
1535
1536 if (!junked && config_fill && unlikely(opt_junk_free)__builtin_expect(!!(opt_junk_free), 0)) {
1537 arena_dalloc_junk_small(ptr, bin_info);
1538 }
1539
1540 arena_slab_reg_dalloc(slab, slab_data, ptr);
1541 unsigned nfree = extent_nfree_get(slab);
1542 if (nfree == bin_info->nregs) {
1543 arena_dissociate_bin_slab(arena, slab, bin);
1544 arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1545 } else if (nfree == 1 && slab != bin->slabcur) {
1546 arena_bin_slabs_full_remove(arena, bin, slab);
1547 arena_bin_lower_slab(tsdn, arena, slab, bin);
1548 }
1549
1550 if (config_stats) {
1551 bin->stats.ndalloc++;
1552 bin->stats.curregs--;
1553 }
1554}
1555
1556void
1557arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
1558 void *ptr) {
1559 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true1);
1560}
1561
1562static void
1563arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1564 szind_t binind = extent_szind_get(extent);
1565 bin_t *bin = &arena->bins[binind];
1566
1567 malloc_mutex_lock(tsdn, &bin->lock);
1568 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false0);
1569 malloc_mutex_unlock(tsdn, &bin->lock);
1570}
1571
1572void
1573arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1574 extent_t *extent = iealloc(tsdn, ptr);
1575 arena_t *arena = extent_arena_get(extent);
1576
1577 arena_dalloc_bin(tsdn, arena, extent, ptr);
1578 arena_decay_tick(tsdn, arena);
1579}
1580
1581bool_Bool
1582arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1583 size_t extra, bool_Bool zero) {
1584 /* Calls with non-zero extra had to clamp extra. */
1585 assert(extra == 0 || size + extra <= LARGE_MAXCLASS)do { if (__builtin_expect(!!(config_debug && !(extra ==
0 || size + extra <= ((((size_t)1) << 62) + (((size_t
)3) << 60)))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1585, "extra == 0 || size + extra <= LARGE_MAXCLASS"
); abort(); } } while (0)
;
1586
1587 if (unlikely(size > LARGE_MAXCLASS)__builtin_expect(!!(size > ((((size_t)1) << 62) + ((
(size_t)3) << 60))), 0)
) {
1588 return true1;
1589 }
1590
1591 extent_t *extent = iealloc(tsdn, ptr);
1592 size_t usize_min = sz_s2u(size);
1593 size_t usize_max = sz_s2u(size + extra);
1594 if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)__builtin_expect(!!(oldsize <= ((((size_t)1) << 13) +
(((size_t)3) << 11)) && usize_min <= ((((size_t
)1) << 13) + (((size_t)3) << 11))), 1)
) {
1595 /*
1596 * Avoid moving the allocation if the size class can be left the
1597 * same.
1598 */
1599 assert(bin_infos[sz_size2index(oldsize)].reg_size ==do { if (__builtin_expect(!!(config_debug && !(bin_infos
[sz_size2index(oldsize)].reg_size == oldsize)), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 1600, "bin_infos[sz_size2index(oldsize)].reg_size == oldsize"
); abort(); } } while (0)
1600 oldsize)do { if (__builtin_expect(!!(config_debug && !(bin_infos
[sz_size2index(oldsize)].reg_size == oldsize)), 0)) { malloc_printf
( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", "src/arena.c"
, 1600, "bin_infos[sz_size2index(oldsize)].reg_size == oldsize"
); abort(); } } while (0)
;
1601 if ((usize_max > SMALL_MAXCLASS((((size_t)1) << 13) + (((size_t)3) << 11)) || sz_size2index(usize_max) !=
1602 sz_size2index(oldsize)) && (size > oldsize || usize_max <
1603 oldsize)) {
1604 return true1;
1605 }
1606
1607 arena_decay_tick(tsdn, extent_arena_get(extent));
1608 return false0;
1609 } else if (oldsize >= LARGE_MINCLASS(((size_t)1) << 14) && usize_max >= LARGE_MINCLASS(((size_t)1) << 14)) {
1610 return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1611 zero);
1612 }
1613
1614 return true1;
1615}
1616
1617static void *
1618arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1619 size_t alignment, bool_Bool zero, tcache_t *tcache) {
1620 if (alignment == 0) {
1621 return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1622 zero, tcache, true1);
1623 }
1624 usize = sz_sa2u(usize, alignment);
1625 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)__builtin_expect(!!(usize == 0 || usize > ((((size_t)1) <<
62) + (((size_t)3) << 60))), 0)
) {
1626 return NULL((void*)0);
1627 }
1628 return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1629}
1630
1631void *
1632arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1633 size_t size, size_t alignment, bool_Bool zero, tcache_t *tcache) {
1634 size_t usize = sz_s2u(size);
1635 if (unlikely(usize == 0 || size > LARGE_MAXCLASS)__builtin_expect(!!(usize == 0 || size > ((((size_t)1) <<
62) + (((size_t)3) << 60))), 0)
) {
1636 return NULL((void*)0);
1637 }
1638
1639 if (likely(usize <= SMALL_MAXCLASS)__builtin_expect(!!(usize <= ((((size_t)1) << 13) + (
((size_t)3) << 11))), 1)
) {
1640 /* Try to avoid moving the allocation. */
1641 if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
1642 return ptr;
1643 }
1644 }
1645
1646 if (oldsize >= LARGE_MINCLASS(((size_t)1) << 14) && usize >= LARGE_MINCLASS(((size_t)1) << 14)) {
1647 return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
1648 alignment, zero, tcache);
1649 }
1650
1651 /*
1652 * size and oldsize are different enough that we need to move the
1653 * object. In that case, fall back to allocating new space and copying.
1654 */
1655 void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1656 zero, tcache);
1657 if (ret == NULL((void*)0)) {
1658 return NULL((void*)0);
1659 }
1660
1661 /*
1662 * Junk/zero-filling were already done by
1663 * ipalloc()/arena_malloc().
1664 */
1665
1666 size_t copysize = (usize < oldsize) ? usize : oldsize;
1667 memcpy(ret, ptr, copysize);
1668 isdalloct(tsdn, ptr, oldsize, tcache, NULL((void*)0), true1);
1669 return ret;
1670}
1671
1672dss_prec_t
1673arena_dss_prec_get(arena_t *arena) {
1674 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIREatomic_memory_order_acquire);
1675}
1676
1677bool_Bool
1678arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1679 if (!have_dss) {
1680 return (dss_prec != dss_prec_disabled);
1681 }
1682 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASEatomic_memory_order_release);
1683 return false0;
1684}
1685
1686ssize_t
1687arena_dirty_decay_ms_default_get(void) {
1688 return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1689}
1690
1691bool_Bool
1692arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1693 if (!arena_decay_ms_valid(decay_ms)) {
1694 return true1;
1695 }
1696 atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1697 return false0;
1698}
1699
1700ssize_t
1701arena_muzzy_decay_ms_default_get(void) {
1702 return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1703}
1704
1705bool_Bool
1706arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1707 if (!arena_decay_ms_valid(decay_ms)) {
1708 return true1;
1709 }
1710 atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1711 return false0;
1712}
1713
1714bool_Bool
1715arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1716 size_t *new_limit) {
1717 assert(opt_retain)do { if (__builtin_expect(!!(config_debug && !(opt_retain
)), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1717, "opt_retain"); abort(); } } while (0)
;
1718
1719 pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0)= 0;
1720 if (new_limit != NULL((void*)0)) {
1721 size_t limit = *new_limit;
1722 /* Grow no more than the new limit. */
1723 if ((new_ind = sz_psz2ind(limit + 1) - 1) >
1724 EXTENT_GROW_MAX_PIND(199 - 1)) {
1725 return true1;
1726 }
1727 }
1728
1729 malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1730 if (old_limit != NULL((void*)0)) {
1731 *old_limit = sz_pind2sz(arena->retain_grow_limit);
1732 }
1733 if (new_limit != NULL((void*)0)) {
1734 arena->retain_grow_limit = new_ind;
1735 }
1736 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1737
1738 return false0;
1739}
1740
1741unsigned
1742arena_nthreads_get(arena_t *arena, bool_Bool internal) {
1743 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXEDatomic_memory_order_relaxed);
1744}
1745
1746void
1747arena_nthreads_inc(arena_t *arena, bool_Bool internal) {
1748 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1749}
1750
1751void
1752arena_nthreads_dec(arena_t *arena, bool_Bool internal) {
1753 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1754}
1755
1756size_t
1757arena_extent_sn_next(arena_t *arena) {
1758 return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1759}
1760
1761arena_t *
1762arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1763 arena_t *arena;
1764 base_t *base;
1765 unsigned i;
1766
1767 if (ind == 0) {
1768 base = b0get();
1769 } else {
1770 base = base_new(tsdn, ind, extent_hooks);
1771 if (base == NULL((void*)0)) {
1772 return NULL((void*)0);
1773 }
1774 }
1775
1776 arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE64);
1777 if (arena == NULL((void*)0)) {
1778 goto label_error;
1779 }
1780
1781 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1782 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1783 arena->last_thd = NULL((void*)0);
1784
1785 if (config_stats) {
1786 if (arena_stats_init(tsdn, &arena->stats)) {
1787 goto label_error;
1788 }
1789
1790 ql_new(&arena->tcache_ql)do { (&arena->tcache_ql)->qlh_first = ((void*)0); }
while (0)
;
1791 ql_new(&arena->cache_bin_array_descriptor_ql)do { (&arena->cache_bin_array_descriptor_ql)->qlh_first
= ((void*)0); } while (0)
;
1792 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1793 WITNESS_RANK_TCACHE_QL12U, malloc_mutex_rank_exclusive)) {
1794 goto label_error;
1795 }
1796 }
1797
1798 if (config_prof) {
1799 if (prof_accum_init(tsdn, &arena->prof_accum)) {
1800 goto label_error;
1801 }
1802 }
1803
1804 if (config_cache_oblivious) {
1805 /*
1806 * A nondeterministic seed based on the address of arena reduces
1807 * the likelihood of lockstep non-uniform cache index
1808 * utilization among identical concurrent processes, but at the
1809 * cost of test repeatability. For debug builds, instead use a
1810 * deterministic seed.
1811 */
1812 atomic_store_zu(&arena->offset_state, config_debug ? ind :
1813 (size_t)(uintptr_t)arena, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1814 }
1815
1816 atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1817
1818 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1819 ATOMIC_RELAXEDatomic_memory_order_relaxed);
1820
1821 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
1822
1823 extent_list_init(&arena->large);
1824 if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1825 WITNESS_RANK_ARENA_LARGE19U, malloc_mutex_rank_exclusive)) {
1826 goto label_error;
1827 }
1828
1829 /*
1830 * Delay coalescing for dirty extents despite the disruptive effect on
1831 * memory layout for best-fit extent allocation, since cached extents
1832 * are likely to be reused soon after deallocation, and the cost of
1833 * merging/splitting extents is non-trivial.
1834 */
1835 if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1836 true1)) {
1837 goto label_error;
1838 }
1839 /*
1840 * Coalesce muzzy extents immediately, because operations on them are in
1841 * the critical path much less often than for dirty extents.
1842 */
1843 if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1844 false0)) {
1845 goto label_error;
1846 }
1847 /*
1848 * Coalesce retained extents immediately, in part because they will
1849 * never be evicted (and therefore there's no opportunity for delayed
1850 * coalescing), but also because operations on retained extents are not
1851 * in the critical path.
1852 */
1853 if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
1854 false0)) {
1855 goto label_error;
1856 }
1857
1858 if (arena_decay_init(&arena->decay_dirty,
1859 arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
1860 goto label_error;
1861 }
1862 if (arena_decay_init(&arena->decay_muzzy,
1863 arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
1864 goto label_error;
1865 }
1866
1867 arena->extent_grow_next = sz_psz2ind(HUGEPAGE((size_t)(1U << 21)));
1868 arena->retain_grow_limit = EXTENT_GROW_MAX_PIND(199 - 1);
1869 if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
1870 WITNESS_RANK_EXTENT_GROW13U, malloc_mutex_rank_exclusive)) {
1871 goto label_error;
1872 }
1873
1874 extent_avail_new(&arena->extent_avail);
1875 if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
1876 WITNESS_RANK_EXTENT_AVAIL15U, malloc_mutex_rank_exclusive)) {
1877 goto label_error;
1878 }
1879
1880 /* Initialize bins. */
1881 for (i = 0; i < NBINS39; i++) {
1882 bool_Bool err = bin_init(&arena->bins[i]);
1883 if (err) {
1884 goto label_error;
1885 }
1886 }
1887
1888 arena->base = base;
1889 /* Set arena before creating background threads. */
1890 arena_set(ind, arena);
1891
1892 nstime_init(&arena->create_time, 0);
1893 nstime_update(&arena->create_time);
1894
1895 /* We don't support reentrancy for arena 0 bootstrapping. */
1896 if (ind != 0) {
1897 /*
1898 * If we're here, then arena 0 already exists, so bootstrapping
1899 * is done enough that we should have tsd.
1900 */
1901 assert(!tsdn_null(tsdn))do { if (__builtin_expect(!!(config_debug && !(!tsdn_null
(tsdn))), 0)) { malloc_printf( "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n"
, "src/arena.c", 1901, "!tsdn_null(tsdn)"); abort(); } } while
(0)
;
1902 pre_reentrancy(tsdn_tsd(tsdn), arena);
1903 if (hooks_arena_new_hook) {
1904 hooks_arena_new_hook();
1905 }
1906 post_reentrancy(tsdn_tsd(tsdn));
1907 }
1908
1909 return arena;
1910label_error:
1911 if (ind != 0) {
1912 base_delete(tsdn, base);
1913 }
1914 return NULL((void*)0);
1915}
1916
1917void
1918arena_boot(void) {
1919 arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
1920 arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
1921#define REGIND_bin_yes(index, reg_size) \
1922 div_init(&arena_binind_div_info[(index)], (reg_size));
1923#define REGIND_bin_no(index, reg_size)
1924#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
1925 lg_delta_lookup) \
1926 REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
1927 SIZE_CLASSESSC( 0, 3, 3, 0, no, yes, 1, 3) SC( 1, 3, 3, 1, no, yes, 1, 3)
SC( 2, 3, 3, 2, no, yes, 3, 3) SC( 3, 3, 3, 3, no, yes, 1, 3
) SC( 4, 5, 3, 1, no, yes, 5, 3) SC( 5, 5, 3, 2, no, yes, 3, 3
) SC( 6, 5, 3, 3, no, yes, 7, 3) SC( 7, 5, 3, 4, no, yes, 1, 3
) SC( 8, 6, 4, 1, no, yes, 5, 4) SC( 9, 6, 4, 2, no, yes, 3, 4
) SC( 10, 6, 4, 3, no, yes, 7, 4) SC( 11, 6, 4, 4, no, yes, 1
, 4) SC( 12, 7, 5, 1, no, yes, 5, 5) SC( 13, 7, 5, 2, no, yes
, 3, 5) SC( 14, 7, 5, 3, no, yes, 7, 5) SC( 15, 7, 5, 4, no, yes
, 1, 5) SC( 16, 8, 6, 1, no, yes, 5, 6) SC( 17, 8, 6, 2, no, yes
, 3, 6) SC( 18, 8, 6, 3, no, yes, 7, 6) SC( 19, 8, 6, 4, no, yes
, 1, 6) SC( 20, 9, 7, 1, no, yes, 5, 7) SC( 21, 9, 7, 2, no, yes
, 3, 7) SC( 22, 9, 7, 3, no, yes, 7, 7) SC( 23, 9, 7, 4, no, yes
, 1, 7) SC( 24, 10, 8, 1, no, yes, 5, 8) SC( 25, 10, 8, 2, no
, yes, 3, 8) SC( 26, 10, 8, 3, no, yes, 7, 8) SC( 27, 10, 8, 4
, no, yes, 1, 8) SC( 28, 11, 9, 1, no, yes, 5, 9) SC( 29, 11,
9, 2, no, yes, 3, 9) SC( 30, 11, 9, 3, no, yes, 7, 9) SC( 31
, 11, 9, 4, yes, yes, 1, 9) SC( 32, 12, 10, 1, no, yes, 5, no
) SC( 33, 12, 10, 2, no, yes, 3, no) SC( 34, 12, 10, 3, no, yes
, 7, no) SC( 35, 12, 10, 4, yes, yes, 2, no) SC( 36, 13, 11, 1
, no, yes, 5, no) SC( 37, 13, 11, 2, yes, yes, 3, no) SC( 38,
13, 11, 3, no, yes, 7, no) SC( 39, 13, 11, 4, yes, no, 0, no
) SC( 40, 14, 12, 1, yes, no, 0, no) SC( 41, 14, 12, 2, yes, no
, 0, no) SC( 42, 14, 12, 3, yes, no, 0, no) SC( 43, 14, 12, 4
, yes, no, 0, no) SC( 44, 15, 13, 1, yes, no, 0, no) SC( 45, 15
, 13, 2, yes, no, 0, no) SC( 46, 15, 13, 3, yes, no, 0, no) SC
( 47, 15, 13, 4, yes, no, 0, no) SC( 48, 16, 14, 1, yes, no, 0
, no) SC( 49, 16, 14, 2, yes, no, 0, no) SC( 50, 16, 14, 3, yes
, no, 0, no) SC( 51, 16, 14, 4, yes, no, 0, no) SC( 52, 17, 15
, 1, yes, no, 0, no) SC( 53, 17, 15, 2, yes, no, 0, no) SC( 54
, 17, 15, 3, yes, no, 0, no) SC( 55, 17, 15, 4, yes, no, 0, no
) SC( 56, 18, 16, 1, yes, no, 0, no) SC( 57, 18, 16, 2, yes, no
, 0, no) SC( 58, 18, 16, 3, yes, no, 0, no) SC( 59, 18, 16, 4
, yes, no, 0, no) SC( 60, 19, 17, 1, yes, no, 0, no) SC( 61, 19
, 17, 2, yes, no, 0, no) SC( 62, 19, 17, 3, yes, no, 0, no) SC
( 63, 19, 17, 4, yes, no, 0, no) SC( 64, 20, 18, 1, yes, no, 0
, no) SC( 65, 20, 18, 2, yes, no, 0, no) SC( 66, 20, 18, 3, yes
, no, 0, no) SC( 67, 20, 18, 4, yes, no, 0, no) SC( 68, 21, 19
, 1, yes, no, 0, no) SC( 69, 21, 19, 2, yes, no, 0, no) SC( 70
, 21, 19, 3, yes, no, 0, no) SC( 71, 21, 19, 4, yes, no, 0, no
) SC( 72, 22, 20, 1, yes, no, 0, no) SC( 73, 22, 20, 2, yes, no
, 0, no) SC( 74, 22, 20, 3, yes, no, 0, no) SC( 75, 22, 20, 4
, yes, no, 0, no) SC( 76, 23, 21, 1, yes, no, 0, no) SC( 77, 23
, 21, 2, yes, no, 0, no) SC( 78, 23, 21, 3, yes, no, 0, no) SC
( 79, 23, 21, 4, yes, no, 0, no) SC( 80, 24, 22, 1, yes, no, 0
, no) SC( 81, 24, 22, 2, yes, no, 0, no) SC( 82, 24, 22, 3, yes
, no, 0, no) SC( 83, 24, 22, 4, yes, no, 0, no) SC( 84, 25, 23
, 1, yes, no, 0, no) SC( 85, 25, 23, 2, yes, no, 0, no) SC( 86
, 25, 23, 3, yes, no, 0, no) SC( 87, 25, 23, 4, yes, no, 0, no
) SC( 88, 26, 24, 1, yes, no, 0, no) SC( 89, 26, 24, 2, yes, no
, 0, no) SC( 90, 26, 24, 3, yes, no, 0, no) SC( 91, 26, 24, 4
, yes, no, 0, no) SC( 92, 27, 25, 1, yes, no, 0, no) SC( 93, 27
, 25, 2, yes, no, 0, no) SC( 94, 27, 25, 3, yes, no, 0, no) SC
( 95, 27, 25, 4, yes, no, 0, no) SC( 96, 28, 26, 1, yes, no, 0
, no) SC( 97, 28, 26, 2, yes, no, 0, no) SC( 98, 28, 26, 3, yes
, no, 0, no) SC( 99, 28, 26, 4, yes, no, 0, no) SC(100, 29, 27
, 1, yes, no, 0, no) SC(101, 29, 27, 2, yes, no, 0, no) SC(102
, 29, 27, 3, yes, no, 0, no) SC(103, 29, 27, 4, yes, no, 0, no
) SC(104, 30, 28, 1, yes, no, 0, no) SC(105, 30, 28, 2, yes, no
, 0, no) SC(106, 30, 28, 3, yes, no, 0, no) SC(107, 30, 28, 4
, yes, no, 0, no) SC(108, 31, 29, 1, yes, no, 0, no) SC(109, 31
, 29, 2, yes, no, 0, no) SC(110, 31, 29, 3, yes, no, 0, no) SC
(111, 31, 29, 4, yes, no, 0, no) SC(112, 32, 30, 1, yes, no, 0
, no) SC(113, 32, 30, 2, yes, no, 0, no) SC(114, 32, 30, 3, yes
, no, 0, no) SC(115, 32, 30, 4, yes, no, 0, no) SC(116, 33, 31
, 1, yes, no, 0, no) SC(117, 33, 31, 2, yes, no, 0, no) SC(118
, 33, 31, 3, yes, no, 0, no) SC(119, 33, 31, 4, yes, no, 0, no
) SC(120, 34, 32, 1, yes, no, 0, no) SC(121, 34, 32, 2, yes, no
, 0, no) SC(122, 34, 32, 3, yes, no, 0, no) SC(123, 34, 32, 4
, yes, no, 0, no) SC(124, 35, 33, 1, yes, no, 0, no) SC(125, 35
, 33, 2, yes, no, 0, no) SC(126, 35, 33, 3, yes, no, 0, no) SC
(127, 35, 33, 4, yes, no, 0, no) SC(128, 36, 34, 1, yes, no, 0
, no) SC(129, 36, 34, 2, yes, no, 0, no) SC(130, 36, 34, 3, yes
, no, 0, no) SC(131, 36, 34, 4, yes, no, 0, no) SC(132, 37, 35
, 1, yes, no, 0, no) SC(133, 37, 35, 2, yes, no, 0, no) SC(134
, 37, 35, 3, yes, no, 0, no) SC(135, 37, 35, 4, yes, no, 0, no
) SC(136, 38, 36, 1, yes, no, 0, no) SC(137, 38, 36, 2, yes, no
, 0, no) SC(138, 38, 36, 3, yes, no, 0, no) SC(139, 38, 36, 4
, yes, no, 0, no) SC(140, 39, 37, 1, yes, no, 0, no) SC(141, 39
, 37, 2, yes, no, 0, no) SC(142, 39, 37, 3, yes, no, 0, no) SC
(143, 39, 37, 4, yes, no, 0, no) SC(144, 40, 38, 1, yes, no, 0
, no) SC(145, 40, 38, 2, yes, no, 0, no) SC(146, 40, 38, 3, yes
, no, 0, no) SC(147, 40, 38, 4, yes, no, 0, no) SC(148, 41, 39
, 1, yes, no, 0, no) SC(149, 41, 39, 2, yes, no, 0, no) SC(150
, 41, 39, 3, yes, no, 0, no) SC(151, 41, 39, 4, yes, no, 0, no
) SC(152, 42, 40, 1, yes, no, 0, no) SC(153, 42, 40, 2, yes, no
, 0, no) SC(154, 42, 40, 3, yes, no, 0, no) SC(155, 42, 40, 4
, yes, no, 0, no) SC(156, 43, 41, 1, yes, no, 0, no) SC(157, 43
, 41, 2, yes, no, 0, no) SC(158, 43, 41, 3, yes, no, 0, no) SC
(159, 43, 41, 4, yes, no, 0, no) SC(160, 44, 42, 1, yes, no, 0
, no) SC(161, 44, 42, 2, yes, no, 0, no) SC(162, 44, 42, 3, yes
, no, 0, no) SC(163, 44, 42, 4, yes, no, 0, no) SC(164, 45, 43
, 1, yes, no, 0, no) SC(165, 45, 43, 2, yes, no, 0, no) SC(166
, 45, 43, 3, yes, no, 0, no) SC(167, 45, 43, 4, yes, no, 0, no
) SC(168, 46, 44, 1, yes, no, 0, no) SC(169, 46, 44, 2, yes, no
, 0, no) SC(170, 46, 44, 3, yes, no, 0, no) SC(171, 46, 44, 4
, yes, no, 0, no) SC(172, 47, 45, 1, yes, no, 0, no) SC(173, 47
, 45, 2, yes, no, 0, no) SC(174, 47, 45, 3, yes, no, 0, no) SC
(175, 47, 45, 4, yes, no, 0, no) SC(176, 48, 46, 1, yes, no, 0
, no) SC(177, 48, 46, 2, yes, no, 0, no) SC(178, 48, 46, 3, yes
, no, 0, no) SC(179, 48, 46, 4, yes, no, 0, no) SC(180, 49, 47
, 1, yes, no, 0, no) SC(181, 49, 47, 2, yes, no, 0, no) SC(182
, 49, 47, 3, yes, no, 0, no) SC(183, 49, 47, 4, yes, no, 0, no
) SC(184, 50, 48, 1, yes, no, 0, no) SC(185, 50, 48, 2, yes, no
, 0, no) SC(186, 50, 48, 3, yes, no, 0, no) SC(187, 50, 48, 4
, yes, no, 0, no) SC(188, 51, 49, 1, yes, no, 0, no) SC(189, 51
, 49, 2, yes, no, 0, no) SC(190, 51, 49, 3, yes, no, 0, no) SC
(191, 51, 49, 4, yes, no, 0, no) SC(192, 52, 50, 1, yes, no, 0
, no) SC(193, 52, 50, 2, yes, no, 0, no) SC(194, 52, 50, 3, yes
, no, 0, no) SC(195, 52, 50, 4, yes, no, 0, no) SC(196, 53, 51
, 1, yes, no, 0, no) SC(197, 53, 51, 2, yes, no, 0, no) SC(198
, 53, 51, 3, yes, no, 0, no) SC(199, 53, 51, 4, yes, no, 0, no
) SC(200, 54, 52, 1, yes, no, 0, no) SC(201, 54, 52, 2, yes, no
, 0, no) SC(202, 54, 52, 3, yes, no, 0, no) SC(203, 54, 52, 4
, yes, no, 0, no) SC(204, 55, 53, 1, yes, no, 0, no) SC(205, 55
, 53, 2, yes, no, 0, no) SC(206, 55, 53, 3, yes, no, 0, no) SC
(207, 55, 53, 4, yes, no, 0, no) SC(208, 56, 54, 1, yes, no, 0
, no) SC(209, 56, 54, 2, yes, no, 0, no) SC(210, 56, 54, 3, yes
, no, 0, no) SC(211, 56, 54, 4, yes, no, 0, no) SC(212, 57, 55
, 1, yes, no, 0, no) SC(213, 57, 55, 2, yes, no, 0, no) SC(214
, 57, 55, 3, yes, no, 0, no) SC(215, 57, 55, 4, yes, no, 0, no
) SC(216, 58, 56, 1, yes, no, 0, no) SC(217, 58, 56, 2, yes, no
, 0, no) SC(218, 58, 56, 3, yes, no, 0, no) SC(219, 58, 56, 4
, yes, no, 0, no) SC(220, 59, 57, 1, yes, no, 0, no) SC(221, 59
, 57, 2, yes, no, 0, no) SC(222, 59, 57, 3, yes, no, 0, no) SC
(223, 59, 57, 4, yes, no, 0, no) SC(224, 60, 58, 1, yes, no, 0
, no) SC(225, 60, 58, 2, yes, no, 0, no) SC(226, 60, 58, 3, yes
, no, 0, no) SC(227, 60, 58, 4, yes, no, 0, no) SC(228, 61, 59
, 1, yes, no, 0, no) SC(229, 61, 59, 2, yes, no, 0, no) SC(230
, 61, 59, 3, yes, no, 0, no) SC(231, 61, 59, 4, yes, no, 0, no
) SC(232, 62, 60, 1, yes, no, 0, no) SC(233, 62, 60, 2, yes, no
, 0, no) SC(234, 62, 60, 3, yes, no, 0, no)
1928#undef REGIND_bin_yes
1929#undef REGIND_bin_no
1930#undef SC
1931}
1932
1933void
1934arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
1935 malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
1936 malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
1937}
1938
1939void
1940arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
1941 if (config_stats) {
1942 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
1943 }
1944}
1945
1946void
1947arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
1948 malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
1949}
1950
1951void
1952arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
1953 extents_prefork(tsdn, &arena->extents_dirty);
1954 extents_prefork(tsdn, &arena->extents_muzzy);
1955 extents_prefork(tsdn, &arena->extents_retained);
1956}
1957
1958void
1959arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
1960 malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
1961}
1962
1963void
1964arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
1965 base_prefork(tsdn, arena->base);
1966}
1967
1968void
1969arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
1970 malloc_mutex_prefork(tsdn, &arena->large_mtx);
1971}
1972
1973void
1974arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
1975 for (unsigned i = 0; i < NBINS39; i++) {
1976 bin_prefork(tsdn, &arena->bins[i]);
1977 }
1978}
1979
1980void
1981arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
1982 unsigned i;
1983
1984 for (i = 0; i < NBINS39; i++) {
1985 bin_postfork_parent(tsdn, &arena->bins[i]);
1986 }
1987 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
1988 base_postfork_parent(tsdn, arena->base);
1989 malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
1990 extents_postfork_parent(tsdn, &arena->extents_dirty);
1991 extents_postfork_parent(tsdn, &arena->extents_muzzy);
1992 extents_postfork_parent(tsdn, &arena->extents_retained);
1993 malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
1994 malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
1995 malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
1996 if (config_stats) {
1997 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
1998 }
1999}
2000
2001void
2002arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2003 unsigned i;
2004
2005 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
2006 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXEDatomic_memory_order_relaxed);
2007 if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2008 arena_nthreads_inc(arena, false0);
2009 }
2010 if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2011 arena_nthreads_inc(arena, true1);
2012 }
2013 if (config_stats) {
2014 ql_new(&arena->tcache_ql)do { (&arena->tcache_ql)->qlh_first = ((void*)0); }
while (0)
;
2015 ql_new(&arena->cache_bin_array_descriptor_ql)do { (&arena->cache_bin_array_descriptor_ql)->qlh_first
= ((void*)0); } while (0)
;
2016 tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2017 if (tcache != NULL((void*)0) && tcache->arena == arena) {
2018 ql_elm_new(tcache, link)do { ((tcache))->link.qre_next = ((tcache)); ((tcache))->
link.qre_prev = ((tcache)); } while (0)
;
2019 ql_tail_insert(&arena->tcache_ql, tcache, link)do { if (((&arena->tcache_ql)->qlh_first) != ((void
*)0)) { do { ((tcache))->link.qre_prev = (((&arena->
tcache_ql)->qlh_first))->link.qre_prev; ((tcache))->
link.qre_next = (((&arena->tcache_ql)->qlh_first));
((tcache))->link.qre_prev->link.qre_next = ((tcache));
(((&arena->tcache_ql)->qlh_first))->link.qre_prev
= ((tcache)); } while (0); } ((&arena->tcache_ql)->
qlh_first) = (((tcache))->link.qre_next); } while (0)
;
2020 cache_bin_array_descriptor_init(
2021 &tcache->cache_bin_array_descriptor,
2022 tcache->bins_small, tcache->bins_large);
2023 ql_tail_insert(&arena->cache_bin_array_descriptor_ql,do { if (((&arena->cache_bin_array_descriptor_ql)->
qlh_first) != ((void*)0)) { do { ((&tcache->cache_bin_array_descriptor
))->link.qre_prev = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first))->link.qre_prev; ((&tcache->cache_bin_array_descriptor
))->link.qre_next = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first)); ((&tcache->cache_bin_array_descriptor
))->link.qre_prev->link.qre_next = ((&tcache->cache_bin_array_descriptor
)); (((&arena->cache_bin_array_descriptor_ql)->qlh_first
))->link.qre_prev = ((&tcache->cache_bin_array_descriptor
)); } while (0); } ((&arena->cache_bin_array_descriptor_ql
)->qlh_first) = (((&tcache->cache_bin_array_descriptor
))->link.qre_next); } while (0)
2024 &tcache->cache_bin_array_descriptor, link)do { if (((&arena->cache_bin_array_descriptor_ql)->
qlh_first) != ((void*)0)) { do { ((&tcache->cache_bin_array_descriptor
))->link.qre_prev = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first))->link.qre_prev; ((&tcache->cache_bin_array_descriptor
))->link.qre_next = (((&arena->cache_bin_array_descriptor_ql
)->qlh_first)); ((&tcache->cache_bin_array_descriptor
))->link.qre_prev->link.qre_next = ((&tcache->cache_bin_array_descriptor
)); (((&arena->cache_bin_array_descriptor_ql)->qlh_first
))->link.qre_prev = ((&tcache->cache_bin_array_descriptor
)); } while (0); } ((&arena->cache_bin_array_descriptor_ql
)->qlh_first) = (((&tcache->cache_bin_array_descriptor
))->link.qre_next); } while (0)
;
2025 }
2026 }
2027
2028 for (i = 0; i < NBINS39; i++) {
2029 bin_postfork_child(tsdn, &arena->bins[i]);
2030 }
2031 malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2032 base_postfork_child(tsdn, arena->base);
2033 malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2034 extents_postfork_child(tsdn, &arena->extents_dirty);
2035 extents_postfork_child(tsdn, &arena->extents_muzzy);
2036 extents_postfork_child(tsdn, &arena->extents_retained);
2037 malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2038 malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2039 malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2040 if (config_stats) {
2041 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2042 }
2043}