Statistics
| Branch: | Revision:

iof-bird-daemon / lib / slab.c @ 5cc1e1f8

History | View | Annotate | Download (6.7 KB)

1
/*
2
 *        BIRD Resource Manager -- A SLAB-like Memory Allocator
3
 *
4
 *        Heavily inspired by the original SLAB paper by Jeff Bonwick.
5
 *
6
 *        (c) 1998--2000 Martin Mares <mj@ucw.cz>
7
 *
8
 *        Can be freely distributed and used under the terms of the GNU GPL.
9
 */
10

    
11
/**
12
 * DOC: Slabs
13
 *
14
 * Slabs are collections of memory blocks of a fixed size.
15
 * They support very fast allocation and freeing of such blocks, prevent memory
16
 * fragmentation and optimize L2 cache usage. Slabs have been invented by Jeff Bonwick
17
 * and published in USENIX proceedings as `The Slab Allocator: An Object-Caching Kernel
18
 * Memory Allocator'. Our implementation follows this article except that we don't use
19
 * constructors and destructors.
20
 *
21
 * When the |DEBUGGING| switch is turned on, we automatically fill all
22
 * newly allocated and freed blocks with a special patterns to make detection
23
 * of use of uninitialized or already freed memory easier.
24
 *
25
 * Example: Nodes of a FIB are allocated from a Slab.
26
 */
27

    
28
#include <stdlib.h>
29

    
30
#include "nest/bird.h"
31
#include "lib/resource.h"
32
#include "lib/string.h"
33

    
34
#undef FAKE_SLAB        /* Turn on if you want to debug memory allocations */
35

    
36
#ifdef DEBUGGING
37
#define POISON                /* Poison all regions after they are freed */
38
#endif
39

    
40
static void slab_free(resource *r);
41
static void slab_dump(resource *r);
42
static resource *slab_lookup(resource *r, unsigned long addr);
43

    
44
#ifdef FAKE_SLAB
45

    
46
/*
47
 *  Fake version used for debugging.
48
 */
49

    
50
struct slab {
51
  resource r;
52
  unsigned size;
53
  list objs;
54
};
55

    
56
static struct resclass sl_class = {
57
  "FakeSlab",
58
  sizeof(struct slab),
59
  slab_free,
60
  slab_dump
61
};
62

    
63
struct sl_obj {
64
  node n;
65
  byte data[0];
66
};
67

    
68
slab *
69
sl_new(pool *p, unsigned size)
70
{
71
  slab *s = ralloc(p, &sl_class);
72
  s->size = size;
73
  init_list(&s->objs);
74
  return s;
75
}
76

    
77
void *
78
sl_alloc(slab *s)
79
{
80
  struct sl_obj *o = xmalloc(sizeof(struct sl_obj) + s->size);
81

    
82
  add_tail(&s->objs, &o->n);
83
  return o->data;
84
}
85

    
86
void
87
sl_free(slab *s, void *oo)
88
{
89
  struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
90

    
91
  rem_node(&o->n);
92
  xfree(o);
93
}
94

    
95
static void
96
slab_free(resource *r)
97
{
98
  slab *s = (slab *) r;
99
  struct sl_obj *o, *p;
100

    
101
  for(o = HEAD(s->objs); p = (struct sl_obj *) o->n.next; o = p)
102
    xfree(o);
103
}
104

    
105
static void
106
slab_dump(resource *r)
107
{
108
  slab *s = (slab *) r;
109
  int cnt = 0;
110
  struct sl_obj *o;
111

    
112
  WALK_LIST(o, s->objs)
113
    cnt++;
114
  debug("(%d objects per %d bytes)\n", cnt, s->size);
115
}
116

    
117
#else
118

    
119
/*
120
 *  Real efficient version.
121
 */
122

    
123
#define SLAB_SIZE 4096
124
#define MAX_EMPTY_HEADS 1
125

    
126
struct slab {
127
  resource r;
128
  unsigned obj_size, head_size, objs_per_slab, num_empty_heads, data_size;
129
  list empty_heads, partial_heads, full_heads;
130
};
131

    
132
static struct resclass sl_class = {
133
  "Slab",
134
  sizeof(struct slab),
135
  slab_free,
136
  slab_dump,
137
  slab_lookup
138
};
139

    
140
struct sl_head {
141
  node n;
142
  struct sl_obj *first_free;
143
  int num_full;
144
};
145

    
146
struct sl_obj {
147
  struct sl_head *slab;
148
  union {
149
    struct sl_obj *next;
150
    byte data[0];
151
  } u;
152
};
153

    
154
struct sl_alignment {                        /* Magic structure for testing of alignment */
155
  byte data;
156
  int x[0];
157
};
158

    
159
/**
160
 * sl_new - create a new Slab
161
 * @p: resource pool
162
 * @size: block size
163
 *
164
 * This function creates a new Slab resource from which
165
 * objects of size @size can be allocated.
166
 */
167
slab *
168
sl_new(pool *p, unsigned size)
169
{
170
  slab *s = ralloc(p, &sl_class);
171
  unsigned int align = sizeof(struct sl_alignment);
172
  if (align < sizeof(int))
173
    align = sizeof(int);
174
  s->data_size = size;
175
  size += OFFSETOF(struct sl_obj, u.data);
176
  if (size < sizeof(struct sl_obj))
177
    size = sizeof(struct sl_obj);
178
  size = (size + align - 1) / align * align;
179
  s->obj_size = size;
180
  s->head_size = (sizeof(struct sl_head) + align - 1) / align * align;
181
  s->objs_per_slab = (SLAB_SIZE - s->head_size) / size;
182
  if (!s->objs_per_slab)
183
    bug("Slab: object too large");
184
  s->num_empty_heads = 0;
185
  init_list(&s->empty_heads);
186
  init_list(&s->partial_heads);
187
  init_list(&s->full_heads);
188
  return s;
189
}
190

    
191
struct sl_head *
192
sl_new_head(slab *s)
193
{
194
  struct sl_head *h = xmalloc(SLAB_SIZE);
195
  struct sl_obj *o = (struct sl_obj *)((byte *)h+s->head_size);
196
  struct sl_obj *no;
197
  unsigned int n = s->objs_per_slab;
198

    
199
  h->first_free = o;
200
  h->num_full = 0;
201
  while (n--)
202
    {
203
      o->slab = h;
204
      no = (struct sl_obj *)((char *) o+s->obj_size);
205
      o->u.next = n ? no : NULL;
206
      o = no;
207
    }
208
  return h;
209
}
210

    
211
/**
212
 * sl_alloc - allocate an object from Slab
213
 * @s: slab
214
 *
215
 * sl_alloc() allocates space for a single object from the
216
 * Slab and returns a pointer to the object.
217
 */
218
void *
219
sl_alloc(slab *s)
220
{
221
  struct sl_head *h;
222
  struct sl_obj *o;
223

    
224
redo:
225
  h = HEAD(s->partial_heads);
226
  if (!h->n.next)
227
    goto no_partial;
228
okay:
229
  o = h->first_free;
230
  if (!o)
231
    goto full_partial;
232
  h->first_free = o->u.next;
233
  h->num_full++;
234
#ifdef POISON
235
  memset(o->u.data, 0xcd, s->data_size);
236
#endif
237
  return o->u.data;
238

    
239
full_partial:
240
  rem_node(&h->n);
241
  add_tail(&s->full_heads, &h->n);
242
  goto redo;
243

    
244
no_partial:
245
  h = HEAD(s->empty_heads);
246
  if (h->n.next)
247
    {
248
      rem_node(&h->n);
249
      add_head(&s->partial_heads, &h->n);
250
      s->num_empty_heads--;
251
      goto okay;
252
    }
253
  h = sl_new_head(s);
254
  add_head(&s->partial_heads, &h->n);
255
  goto okay;
256
}
257

    
258
/**
259
 * sl_free - return a free object back to a Slab
260
 * @s: slab
261
 * @oo: object returned by sl_alloc()
262
 *
263
 * This function frees memory associated with the object @oo
264
 * and returns it back to the Slab @s.
265
 */
266
void
267
sl_free(slab *s, void *oo)
268
{
269
  struct sl_obj *o = SKIP_BACK(struct sl_obj, u.data, oo);
270
  struct sl_head *h = o->slab;
271

    
272
#ifdef POISON
273
  memset(oo, 0xdb, s->data_size);
274
#endif
275
  o->u.next = h->first_free;
276
  h->first_free = o;
277
  if (!--h->num_full)
278
    {
279
      rem_node(&h->n);
280
      if (s->num_empty_heads >= MAX_EMPTY_HEADS)
281
        xfree(h);
282
      else
283
        {
284
          add_head(&s->empty_heads, &h->n);
285
          s->num_empty_heads++;
286
        }
287
    }
288
  else if (!o->u.next)
289
    {
290
      rem_node(&h->n);
291
      add_head(&s->partial_heads, &h->n);
292
    }
293
}
294

    
295
static void
296
slab_free(resource *r)
297
{
298
  slab *s = (slab *) r;
299
  struct sl_head *h, *g;
300

    
301
  WALK_LIST_DELSAFE(h, g, s->empty_heads)
302
    xfree(h);
303
  WALK_LIST_DELSAFE(h, g, s->partial_heads)
304
    xfree(h);
305
  WALK_LIST_DELSAFE(h, g, s->full_heads)
306
    xfree(h);
307
}
308

    
309
static void
310
slab_dump(resource *r)
311
{
312
  slab *s = (slab *) r;
313
  int ec=0, pc=0, fc=0;
314
  struct sl_head *h;
315

    
316
  WALK_LIST(h, s->empty_heads)
317
    ec++;
318
  WALK_LIST(h, s->partial_heads)
319
    pc++;
320
  WALK_LIST(h, s->full_heads)
321
    fc++;
322
  debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
323
}
324

    
325
static resource *
326
slab_lookup(resource *r, unsigned long a)
327
{
328
  slab *s = (slab *) r;
329
  struct sl_head *h;
330

    
331
  WALK_LIST(h, s->partial_heads)
332
    if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a)
333
      return r;
334
  WALK_LIST(h, s->full_heads)
335
    if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a)
336
      return r;
337
  return NULL;
338
}
339

    
340
#endif