-
Notifications
You must be signed in to change notification settings - Fork 116
/
allocator.h
145 lines (119 loc) · 3.71 KB
/
allocator.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#ifndef _NDB_ALLOCATOR_H_
#define _NDB_ALLOCATOR_H_
#include <cstdint>
#include <iterator>
#include <mutex>
#include "util.h"
#include "core.h"
#include "macros.h"
#include "spinlock.h"
class allocator {
public:
// our allocator doesn't let allocations exceed maxpercore over a single core
//
// Initialize can be called many times- but only the first call has effect.
//
// w/o calling Initialize(), behavior for this class is undefined
static void Initialize(size_t ncpus, size_t maxpercore);
static void DumpStats();
// returns an arena linked-list
static void *
AllocateArenas(size_t cpu, size_t sz);
// allocates nhugepgs * hugepagesize contiguous bytes from CPU's region and
// returns the raw, unmanaged pointer.
//
// Note that memory returned from here cannot be released back to the
// allocator, so this should only be used for data structures which live
// throughput the duration of the system (ie log buffers)
static void *
AllocateUnmanaged(size_t cpu, size_t nhugepgs);
static void
ReleaseArenas(void **arenas);
static const size_t LgAllocAlignment = 4; // all allocations aligned to 2^4 = 16
static const size_t AllocAlignment = 1 << LgAllocAlignment;
static const size_t MAX_ARENAS = 32;
static inline std::pair<size_t, size_t>
ArenaSize(size_t sz)
{
const size_t allocsz = util::round_up<size_t, LgAllocAlignment>(sz);
const size_t arena = allocsz / AllocAlignment - 1;
return std::make_pair(allocsz, arena);
}
// slow, but only needs to be called on initialization
static void
FaultRegion(size_t cpu);
// returns true if managed by this allocator, false otherwise
static inline bool
ManagesPointer(const void *p)
{
return p >= g_memstart && p < g_memend;
}
// assumes p is managed by this allocator- returns the CPU from which this pointer
// was allocated
static inline size_t
PointerToCpu(const void *p)
{
ALWAYS_ASSERT(p >= g_memstart);
ALWAYS_ASSERT(p < g_memend);
const size_t ret =
(reinterpret_cast<const char *>(p) -
reinterpret_cast<const char *>(g_memstart)) / g_maxpercore;
ALWAYS_ASSERT(ret < g_ncpus);
return ret;
}
#ifdef MEMCHECK_MAGIC
struct pgmetadata {
uint32_t unit_; // 0-indexed
} PACKED;
// returns nullptr if p is not managed, or has not been allocated yet.
// p does not have to be properly aligned
static const pgmetadata *
PointerToPgMetadata(const void *p);
#endif
static size_t
GetPageSize()
{
static const size_t sz = GetPageSizeImpl();
return sz;
}
static size_t
GetHugepageSize()
{
static const size_t sz = GetHugepageSizeImpl();
return sz;
}
private:
static size_t GetPageSizeImpl();
static size_t GetHugepageSizeImpl();
static bool UseMAdvWillNeed();
struct regionctx {
regionctx()
: region_begin(nullptr),
region_end(nullptr),
region_faulted(false)
{
NDB_MEMSET(arenas, 0, sizeof(arenas));
}
regionctx(const regionctx &) = delete;
regionctx(regionctx &&) = delete;
regionctx &operator=(const regionctx &) = delete;
// set by Initialize()
void *region_begin;
void *region_end;
bool region_faulted;
spinlock lock;
std::mutex fault_lock; // XXX: hacky
void *arenas[MAX_ARENAS];
};
// assumes caller has the regionctx lock held, and
// will release the lock.
static void *
AllocateUnmanagedWithLock(regionctx &pc, size_t nhugepgs);
// [g_memstart, g_memstart + ncpus * maxpercore) is the region of memory mmap()-ed
static void *g_memstart;
static void *g_memend; // g_memstart + ncpus * maxpercore
static size_t g_ncpus;
static size_t g_maxpercore;
static percore<regionctx> g_regions CACHE_ALIGNED;
};
#endif /* _NDB_ALLOCATOR_H_ */