-
Notifications
You must be signed in to change notification settings - Fork 124
Expand file tree
/
Copy pathbackend.h
More file actions
341 lines (299 loc) · 10.1 KB
/
backend.h
File metadata and controls
341 lines (299 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
#pragma once
#include "../mem/allocconfig.h"
#include "../pal/pal.h"
#include "commitrange.h"
#include "commonconfig.h"
#include "decayrange.h"
#include "empty_range.h"
#include "globalrange.h"
#include "largebuddyrange.h"
#include "metatypes.h"
#include "pagemap.h"
#include "pagemapregisterrange.h"
#include "palrange.h"
#include "range_helpers.h"
#include "smallbuddyrange.h"
#include "statsrange.h"
#include "subrange.h"
#if defined(SNMALLOC_CHECK_CLIENT) && !defined(OPEN_ENCLAVE)
/**
* Protect meta data blocks by allocating separate from chunks for
* user allocations. This involves leaving gaps in address space.
* This is less efficient, so should only be applied for the checked
* build.
*
* On Open Enclave the address space is limited, so we disable this
* feature.
*/
# define SNMALLOC_META_PROTECTED
#endif
namespace snmalloc
{
/**
* This class implements the standard backend for handling allocations.
* It abstracts page table management and address space management.
*/
template<
SNMALLOC_CONCEPT(ConceptPAL) PAL,
bool fixed_range,
typename PageMapEntry = MetaEntry>
class BackendAllocator : public CommonConfig
{
public:
using Pal = PAL;
class Pagemap
{
friend class BackendAllocator;
SNMALLOC_REQUIRE_CONSTINIT
static inline FlatPagemap<MIN_CHUNK_BITS, PageMapEntry, PAL, fixed_range>
concretePagemap;
public:
/**
* Get the metadata associated with a chunk.
*
* Set template parameter to true if it not an error
* to access a location that is not backed by a chunk.
*/
template<typename Ret = MetaEntry, bool potentially_out_of_range = false>
SNMALLOC_FAST_PATH static const Ret& get_metaentry(address_t p)
{
static_assert(
std::is_base_of_v<MetaEntry, Ret> && sizeof(MetaEntry) == sizeof(Ret),
"Backend Pagemap get_metaentry return must look like MetaEntry");
return static_cast<const Ret&>(
concretePagemap.template get<potentially_out_of_range>(p));
}
/**
* Get the metadata associated with a chunk.
*
* Set template parameter to true if it not an error
* to access a location that is not backed by a chunk.
*/
template<bool potentially_out_of_range = false>
SNMALLOC_FAST_PATH static MetaEntry& get_metaentry_mut(address_t p)
{
return concretePagemap.template get_mut<potentially_out_of_range>(p);
}
/**
* Set the metadata associated with a chunk.
*/
SNMALLOC_FAST_PATH
static void set_metaentry(address_t p, size_t size, const MetaEntry& t)
{
for (address_t a = p; a < p + size; a += MIN_CHUNK_SIZE)
{
concretePagemap.set(a, t);
}
}
static void register_range(address_t p, size_t sz)
{
concretePagemap.register_range(p, sz);
}
/**
* Return the bounds of the memory this back-end manages as a pair of
* addresses (start then end). This is available iff this is a
* fixed-range Backend.
*/
template<bool fixed_range_ = fixed_range>
static SNMALLOC_FAST_PATH
std::enable_if_t<fixed_range_, std::pair<address_t, address_t>>
get_bounds()
{
static_assert(
fixed_range_ == fixed_range, "Don't set SFINAE parameter!");
return concretePagemap.get_bounds();
}
static bool is_initialised()
{
return concretePagemap.is_initialised();
}
};
#if defined(_WIN32) || defined(__CHERI_PURE_CAPABILITY__)
static constexpr bool CONSOLIDATE_PAL_ALLOCS = false;
#else
static constexpr bool CONSOLIDATE_PAL_ALLOCS = true;
#endif
#if defined(OPEN_ENCLAVE)
// Single global buddy allocator is used on open enclave due to
// the limited address space.
using StatsR = StatsRange<SmallBuddyRange<
LargeBuddyRange<EmptyRange, bits::BITS - 1, bits::BITS - 1, Pagemap>>>;
using GlobalR = GlobalRange<StatsR>;
using ObjectRange = GlobalR;
using GlobalMetaRange = ObjectRange;
#else
// Set up source of memory
using P = PalRange<DefaultPal>;
using Base = std::
conditional_t<fixed_range, EmptyRange, PagemapRegisterRange<Pagemap, P>>;
// Global range of memory
using StatsR = StatsRange<LargeBuddyRange<
Base,
24,
bits::BITS - 1,
Pagemap,
CONSOLIDATE_PAL_ALLOCS>>;
using GlobalR = GlobalRange<StatsR>;
# ifdef SNMALLOC_META_PROTECTED
using CommittedRange =
DecayRange<CommitRange<GlobalR, DefaultPal>, DefaultPal, Pagemap>;
// Source for object allocations
using ObjectRange = LargeBuddyRange<CommittedRange, 21, 21, Pagemap>;
// Set up protected range for metadata
using SubR = CommitRange<SubRange<GlobalR, DefaultPal, 6>, DefaultPal>;
using MetaRange =
SmallBuddyRange<LargeBuddyRange<SubR, 21 - 6, bits::BITS - 1, Pagemap>>;
using GlobalMetaRange = GlobalRange<MetaRange>;
# else
// Source for object allocations and metadata
// No separation between the two
using CommittedRange =
DecayRange<CommitRange<GlobalR, DefaultPal>, DefaultPal, Pagemap>;
using ObjectRange =
SmallBuddyRange<LargeBuddyRange<CommittedRange, 21, 21, Pagemap>>;
using GlobalMetaRange = GlobalRange<ObjectRange>;
# endif
#endif
struct LocalState
{
typename ObjectRange::State object_range;
#ifdef SNMALLOC_META_PROTECTED
typename MetaRange::State meta_range;
typename MetaRange::State& get_meta_range()
{
return meta_range;
}
#else
typename ObjectRange::State& get_meta_range()
{
return object_range;
}
#endif
};
public:
template<bool fixed_range_ = fixed_range>
static std::enable_if_t<!fixed_range_> init()
{
static_assert(fixed_range_ == fixed_range, "Don't set SFINAE parameter!");
Pagemap::concretePagemap.init();
}
template<bool fixed_range_ = fixed_range>
static std::enable_if_t<fixed_range_> init(void* base, size_t length)
{
static_assert(fixed_range_ == fixed_range, "Don't set SFINAE parameter!");
auto [heap_base, heap_length] =
Pagemap::concretePagemap.init(base, length);
Pagemap::register_range(address_cast(heap_base), heap_length);
// Push memory into the global range.
range_to_pow_2_blocks<MIN_CHUNK_BITS>(
capptr::Chunk<void>(heap_base),
heap_length,
[&](capptr::Chunk<void> p, size_t sz, bool) {
typename GlobalR::State g;
g->dealloc_range(p, sz);
});
}
/**
* Provide a block of meta-data with size and align.
*
* Backend allocator may use guard pages and separate area of
* address space to protect this from corruption.
*
* The template argument is the type of the metadata being allocated. This
* allows the backend to allocate different types of metadata in different
* places or with different policies. The default implementation, here,
* does not avail itself of this degree of freedom.
*/
template<typename T>
static capptr::Chunk<void>
alloc_meta_data(LocalState* local_state, size_t size)
{
capptr::Chunk<void> p;
if (local_state != nullptr)
{
p = local_state->get_meta_range()->alloc_range_with_leftover(size);
}
else
{
static_assert(
GlobalMetaRange::ConcurrencySafe,
"Global meta data range needs to be concurrency safe.");
typename GlobalMetaRange::State global_state;
p = global_state->alloc_range(bits::next_pow2(size));
}
if (p == nullptr)
errno = ENOMEM;
return p;
}
/**
* Returns a chunk of memory with alignment and size of `size`, and a
* metaslab block.
*
* It additionally set the meta-data for this chunk of memory to
* be
* (remote, sizeclass, metaslab)
* where metaslab, is the second element of the pair return.
*/
static std::pair<capptr::Chunk<void>, Metaslab*>
alloc_chunk(LocalState& local_state, size_t size, uintptr_t ras)
{
SNMALLOC_ASSERT(bits::is_pow2(size));
SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE);
SNMALLOC_ASSERT((ras & MetaEntry::REMOTE_BACKEND_MARKER) == 0);
ras &= ~MetaEntry::REMOTE_BACKEND_MARKER;
auto meta_cap =
local_state.get_meta_range()->alloc_range(PAGEMAP_METADATA_STRUCT_SIZE);
auto meta = meta_cap.template as_reinterpret<Metaslab>().unsafe_ptr();
if (meta == nullptr)
{
errno = ENOMEM;
return {nullptr, nullptr};
}
auto p = local_state.object_range->alloc_range(size);
#ifdef SNMALLOC_TRACING
message<1024>("Alloc chunk: {} ({})", p.unsafe_ptr(), size);
#endif
if (p == nullptr)
{
local_state.get_meta_range()->dealloc_range(
meta_cap, PAGEMAP_METADATA_STRUCT_SIZE);
errno = ENOMEM;
#ifdef SNMALLOC_TRACING
message<1024>("Out of memory");
#endif
return {p, nullptr};
}
meta->meta_common.chunk = p;
MetaEntry t(&meta->meta_common, ras);
Pagemap::set_metaentry(address_cast(p), size, t);
p = Aal::capptr_bound<void, capptr::bounds::Chunk>(p, size);
return {p, meta};
}
static void
dealloc_chunk(LocalState& local_state, MetaCommon& meta_common, size_t size)
{
auto chunk = meta_common.chunk;
/*
* The backend takes possession of these chunks now, by disassociating
* any existing remote allocator and metadata structure. If
* interrogated, the sizeclass reported by the MetaEntry is 0, which has
* size 0.
*/
MetaEntry t(nullptr, MetaEntry::REMOTE_BACKEND_MARKER);
Pagemap::set_metaentry(address_cast(chunk), size, t);
local_state.get_meta_range()->dealloc_range(
capptr::Chunk<void>(&meta_common), PAGEMAP_METADATA_STRUCT_SIZE);
local_state.object_range->dealloc_range(chunk, size);
}
static size_t get_current_usage()
{
typename StatsR::State stats_state;
return stats_state->get_current_usage();
}
static size_t get_peak_usage()
{
typename StatsR::State stats_state;
return stats_state->get_peak_usage();
}
};
} // namespace snmalloc