-
Notifications
You must be signed in to change notification settings - Fork 14
/
ps_ns.c
114 lines (97 loc) · 3.14 KB
/
ps_ns.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
/***
* Copyright 2015 by Gabriel Parmer. All rights reserved.
* Redistribution of this file is permitted under the BSD 2 clause license.
*
* Authors: Gabriel Parmer, [email protected], 2015
*/
#include <ps_ns.h>
/* The slab allocator for slab heads that are not internal to the slab itself */
PS_SLAB_CREATE_DEF(slabhead, sizeof(struct ps_slab))
/*
* Namespace allocators make sure that the slab head is allocated
* separately from the memory itself so that all lookups within the
* lookup tree are properly aligned.
*
* FIXME: this is a scalability bottleneck. A single list for
* balancing all namespaces that are freed (i.e. when a slab is
* deallocated). This makes balancing faster, which is a significant
* benefit.
*/
struct ps_slab *
ps_slab_nsalloc(struct ps_mem *m, size_t sz, coreid_t coreid)
{
ps_desc_t id = 0, range = m->ns_info.desc_range;
struct ps_slab *s;
int newslab = 0;
void *mem;
struct ps_ns_info *nsi;
(void)coreid;
ps_lock_take(&m->ns_info.lock);
s = m->ns_info.fl.list;
if (s) __slab_freelist_rem(&m->ns_info.fl, s);
ps_lock_release(&m->ns_info.lock);
if (!s) {
id = ps_faa(&m->ns_info.frontier, range);
if (unlikely(id >= m->ns_info.desc_max)) goto reset_frontier;
s = ps_slab_alloc_slabhead();
if (unlikely(!s)) goto reset_frontier;
s->start = id;
s->end = s->start + range;
newslab = 1;
}
assert(!s->memory);
mem = ps_plat_alloc(sz, coreid);
if (unlikely(!mem)) goto free_slab;
memset(mem, 0, sz);
s->memory = mem;
/* Add the slab's identities to the lookup table */
nsi = &m->ns_info;
assert(!nsi->lkupfn(nsi->ert, s->start, nsi->ert_depth, NULL));
if (nsi->expandfn(nsi->ert, s->start, nsi->ert_depth, NULL, mem, NULL) != 0) goto free_mem;
assert(nsi->lkupfn(nsi->ert, s->start, nsi->ert_depth, NULL) == mem);
return s;
free_mem:
ps_plat_free(mem, sz, coreid);
free_slab:
ps_slab_free_slabhead(s);
reset_frontier:
/* possible to leak namespace if many threads race between faa and here */
if (newslab) ps_cas(&m->ns_info.frontier, id+range, id);
return NULL;
}
void
ps_slab_nsfree(struct ps_mem *m, struct ps_slab *s, size_t sz, coreid_t coreid)
{
struct ps_ns_info *nsi;
struct ert_intern *intern;
ps_plat_free(s->memory, sz, coreid);
/* Remove the reference in the lookup table to the slab */
nsi = &m->ns_info;
if (nsi->ert_depth > 1) {
intern = nsi->lkupfn(nsi->ert, s->start, nsi->ert_depth-1, NULL);
assert(intern->next == s->memory);
intern->next = NULL;
assert(!nsi->lkupfn(nsi->ert, s->start, nsi->ert_depth, NULL));
}
s->memory = NULL;
ps_lock_take(&m->ns_info.lock);
__slab_freelist_add(&m->ns_info.fl, s);
ps_lock_release(&m->ns_info.lock);
}
void
ps_ns_init(struct ps_mem *m, void *ert, ps_lkupan_fn_t lkup, ps_expand_fn_t expand, size_t depth, ps_desc_t maxid, size_t range)
{
struct ps_ns_info *ni;
static unsigned long executed = 0;
if (executed == 0 && ps_faa(&executed, 1) == 0) ps_slab_init_slabhead();
ni = &m->ns_info;
ni->desc_max = maxid;
ni->desc_range = range;
ni->fl.list = NULL;
ni->frontier = 0;
ni->ert = ert;
ni->ert_depth = depth;
ni->lkupfn = lkup;
ni->expandfn = expand;
ps_lock_init(&ni->lock);
}