Statistics
| Branch: | Revision:

iof-bird / bird-2.0.1 / lib / slab.c @ 6b3f1a54

History | View | Annotate | Download (7.35 KB)

1
/*
2
 *        BIRD Resource Manager -- A SLAB-like Memory Allocator
3
 *
4
 *        Heavily inspired by the original SLAB paper by Jeff Bonwick.
5
 *
6
 *        (c) 1998--2000 Martin Mares <mj@ucw.cz>
7
 *
8
 *        Can be freely distributed and used under the terms of the GNU GPL.
9
 */
10

    
11
/**
12
 * DOC: Slabs
13
 *
14
 * Slabs are collections of memory blocks of a fixed size.
15
 * They support very fast allocation and freeing of such blocks, prevent memory
16
 * fragmentation and optimize L2 cache usage. Slabs have been invented by Jeff Bonwick
17
 * and published in USENIX proceedings as `The Slab Allocator: An Object-Caching Kernel
18
 * Memory Allocator'. Our implementation follows this article except that we don't use
19
 * constructors and destructors.
20
 *
21
 * When the |DEBUGGING| switch is turned on, we automatically fill all
22
 * newly allocated and freed blocks with a special pattern to make detection
23
 * of use of uninitialized or already freed memory easier.
24
 *
25
 * Example: Nodes of a FIB are allocated from a per-FIB Slab.
26
 */
27

    
28
#include <stdlib.h>
29
#include <stdint.h>
30

    
31
#include "nest/bird.h"
32
#include "lib/resource.h"
33
#include "lib/string.h"
34

    
35
#undef FAKE_SLAB        /* Turn on if you want to debug memory allocations */
36

    
37
#ifdef DEBUGGING
38
#define POISON                /* Poison all regions after they are freed */
39
#endif
40

    
41
static void slab_free(resource *r);
42
static void slab_dump(resource *r);
43
static resource *slab_lookup(resource *r, unsigned long addr);
44
static size_t slab_memsize(resource *r);
45

    
46
#ifdef FAKE_SLAB
47

    
48
/*
49
 *  Fake version used for debugging.
50
 */
51

    
52
struct slab {
53
  resource r;
54
  uint size;
55
  list objs;
56
};
57

    
58
static struct resclass sl_class = {
59
  "FakeSlab",
60
  sizeof(struct slab),
61
  slab_free,
62
  slab_dump,
63
  NULL,
64
  slab_memsize
65
};
66

    
67
struct sl_obj {
68
  node n;
69
  uintptr_t data_align[0];
70
  byte data[0];
71
};
72

    
73
slab *
74
sl_new(pool *p, uint size)
75
{
76
  slab *s = ralloc(p, &sl_class);
77
  s->size = size;
78
  init_list(&s->objs);
79
  return s;
80
}
81

    
82
void *
83
sl_alloc(slab *s)
84
{
85
  struct sl_obj *o = xmalloc(sizeof(struct sl_obj) + s->size);
86

    
87
  add_tail(&s->objs, &o->n);
88
  return o->data;
89
}
90

    
91
void
92
sl_free(slab *s, void *oo)
93
{
94
  struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
95

    
96
  rem_node(&o->n);
97
  xfree(o);
98
}
99

    
100
static void
101
slab_free(resource *r)
102
{
103
  slab *s = (slab *) r;
104
  struct sl_obj *o, *p;
105

    
106
  for(o = HEAD(s->objs); p = (struct sl_obj *) o->n.next; o = p)
107
    xfree(o);
108
}
109

    
110
static void
111
slab_dump(resource *r)
112
{
113
  slab *s = (slab *) r;
114
  int cnt = 0;
115
  struct sl_obj *o;
116

    
117
  WALK_LIST(o, s->objs)
118
    cnt++;
119
  debug("(%d objects per %d bytes)\n", cnt, s->size);
120
}
121

    
122
static size_t
123
slab_memsize(resource *r)
124
{
125
  slab *s = (slab *) r;
126
  size_t cnt = 0;
127
  struct sl_obj *o;
128

    
129
  WALK_LIST(o, s->objs)
130
    cnt++;
131

    
132
  return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size);
133
}
134

    
135

    
136
#else
137

    
138
/*
139
 *  Real efficient version.
140
 */
141

    
142
#define SLAB_SIZE 4096
143
#define MAX_EMPTY_HEADS 1
144

    
145
struct slab {
146
  resource r;
147
  uint obj_size, head_size, objs_per_slab, num_empty_heads, data_size;
148
  list empty_heads, partial_heads, full_heads;
149
};
150

    
151
static struct resclass sl_class = {
152
  "Slab",
153
  sizeof(struct slab),
154
  slab_free,
155
  slab_dump,
156
  slab_lookup,
157
  slab_memsize
158
};
159

    
160
struct sl_head {
161
  node n;
162
  struct sl_obj *first_free;
163
  int num_full;
164
};
165

    
166
struct sl_obj {
167
  struct sl_head *slab;
168
  union {
169
    struct sl_obj *next;
170
    byte data[0];
171
  } u;
172
};
173

    
174
struct sl_alignment {                        /* Magic structure for testing of alignment */
175
  byte data;
176
  int x[0];
177
};
178

    
179
/**
180
 * sl_new - create a new Slab
181
 * @p: resource pool
182
 * @size: block size
183
 *
184
 * This function creates a new Slab resource from which
185
 * objects of size @size can be allocated.
186
 */
187
slab *
188
sl_new(pool *p, uint size)
189
{
190
  slab *s = ralloc(p, &sl_class);
191
  uint align = sizeof(struct sl_alignment);
192
  if (align < sizeof(int))
193
    align = sizeof(int);
194
  s->data_size = size;
195
  size += OFFSETOF(struct sl_obj, u.data);
196
  if (size < sizeof(struct sl_obj))
197
    size = sizeof(struct sl_obj);
198
  size = (size + align - 1) / align * align;
199
  s->obj_size = size;
200
  s->head_size = (sizeof(struct sl_head) + align - 1) / align * align;
201
  s->objs_per_slab = (SLAB_SIZE - s->head_size) / size;
202
  if (!s->objs_per_slab)
203
    bug("Slab: object too large");
204
  s->num_empty_heads = 0;
205
  init_list(&s->empty_heads);
206
  init_list(&s->partial_heads);
207
  init_list(&s->full_heads);
208
  return s;
209
}
210

    
211
static struct sl_head *
212
sl_new_head(slab *s)
213
{
214
  struct sl_head *h = xmalloc(SLAB_SIZE);
215
  struct sl_obj *o = (struct sl_obj *)((byte *)h+s->head_size);
216
  struct sl_obj *no;
217
  uint n = s->objs_per_slab;
218

    
219
  h->first_free = o;
220
  h->num_full = 0;
221
  while (n--)
222
    {
223
      o->slab = h;
224
      no = (struct sl_obj *)((char *) o+s->obj_size);
225
      o->u.next = n ? no : NULL;
226
      o = no;
227
    }
228
  return h;
229
}
230

    
231
/**
232
 * sl_alloc - allocate an object from Slab
233
 * @s: slab
234
 *
235
 * sl_alloc() allocates space for a single object from the
236
 * Slab and returns a pointer to the object.
237
 */
238
void *
239
sl_alloc(slab *s)
240
{
241
  struct sl_head *h;
242
  struct sl_obj *o;
243

    
244
redo:
245
  h = HEAD(s->partial_heads);
246
  if (!h->n.next)
247
    goto no_partial;
248
okay:
249
  o = h->first_free;
250
  if (!o)
251
    goto full_partial;
252
  h->first_free = o->u.next;
253
  h->num_full++;
254
#ifdef POISON
255
  memset(o->u.data, 0xcd, s->data_size);
256
#endif
257
  return o->u.data;
258

    
259
full_partial:
260
  rem_node(&h->n);
261
  add_tail(&s->full_heads, &h->n);
262
  goto redo;
263

    
264
no_partial:
265
  h = HEAD(s->empty_heads);
266
  if (h->n.next)
267
    {
268
      rem_node(&h->n);
269
      add_head(&s->partial_heads, &h->n);
270
      s->num_empty_heads--;
271
      goto okay;
272
    }
273
  h = sl_new_head(s);
274
  add_head(&s->partial_heads, &h->n);
275
  goto okay;
276
}
277

    
278
/**
279
 * sl_free - return a free object back to a Slab
280
 * @s: slab
281
 * @oo: object returned by sl_alloc()
282
 *
283
 * This function frees memory associated with the object @oo
284
 * and returns it back to the Slab @s.
285
 */
286
void
287
sl_free(slab *s, void *oo)
288
{
289
  struct sl_obj *o = SKIP_BACK(struct sl_obj, u.data, oo);
290
  struct sl_head *h = o->slab;
291

    
292
#ifdef POISON
293
  memset(oo, 0xdb, s->data_size);
294
#endif
295
  o->u.next = h->first_free;
296
  h->first_free = o;
297
  if (!--h->num_full)
298
    {
299
      rem_node(&h->n);
300
      if (s->num_empty_heads >= MAX_EMPTY_HEADS)
301
        xfree(h);
302
      else
303
        {
304
          add_head(&s->empty_heads, &h->n);
305
          s->num_empty_heads++;
306
        }
307
    }
308
  else if (!o->u.next)
309
    {
310
      rem_node(&h->n);
311
      add_head(&s->partial_heads, &h->n);
312
    }
313
}
314

    
315
static void
316
slab_free(resource *r)
317
{
318
  slab *s = (slab *) r;
319
  struct sl_head *h, *g;
320

    
321
  WALK_LIST_DELSAFE(h, g, s->empty_heads)
322
    xfree(h);
323
  WALK_LIST_DELSAFE(h, g, s->partial_heads)
324
    xfree(h);
325
  WALK_LIST_DELSAFE(h, g, s->full_heads)
326
    xfree(h);
327
}
328

    
329
static void
330
slab_dump(resource *r)
331
{
332
  slab *s = (slab *) r;
333
  int ec=0, pc=0, fc=0;
334
  struct sl_head *h;
335

    
336
  WALK_LIST(h, s->empty_heads)
337
    ec++;
338
  WALK_LIST(h, s->partial_heads)
339
    pc++;
340
  WALK_LIST(h, s->full_heads)
341
    fc++;
342
  debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
343
}
344

    
345
static size_t
346
slab_memsize(resource *r)
347
{
348
  slab *s = (slab *) r;
349
  size_t heads = 0;
350
  struct sl_head *h;
351

    
352
  WALK_LIST(h, s->empty_heads)
353
    heads++;
354
  WALK_LIST(h, s->partial_heads)
355
    heads++;
356
  WALK_LIST(h, s->full_heads)
357
    heads++;
358

    
359
  return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + SLAB_SIZE);
360
}
361

    
362
static resource *
363
slab_lookup(resource *r, unsigned long a)
364
{
365
  slab *s = (slab *) r;
366
  struct sl_head *h;
367

    
368
  WALK_LIST(h, s->partial_heads)
369
    if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a)
370
      return r;
371
  WALK_LIST(h, s->full_heads)
372
    if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a)
373
      return r;
374
  return NULL;
375
}
376

    
377
#endif