Statistics
| Branch: | Revision:

grapes / src / Cache / topocache.c @ 43b793e3

History | View | Annotate | Download (15.3 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12
#undef NDEBUG
13
#include <assert.h>
14

    
15
#include "net_helper.h"
16
#include "topocache.h"
17
#include "int_coding.h"
18

    
19
struct cache_entry {
20
  struct nodeID *id;
21
  uint32_t timestamp;
22
};
23

    
24
struct peer_cache {
25
  struct cache_entry *entries;
26
  int cache_size;
27
  int current_size;
28
  int metadata_size;
29
  uint8_t *metadata;
30
  int max_timestamp;
31
};
32

    
33
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
34
{
35
  int i, position;
36

    
37
  if (c->current_size == c->cache_size) {
38
    return -2;
39
  }
40
  position = 0;
41
  for (i = 0; i < c->current_size; i++) {
42
    assert(e->id);
43
    assert(c->entries[i].id);
44
    if (c->entries[i].timestamp <= e->timestamp) {
45
      position = i + 1;
46
    }
47
    if (nodeid_equal(e->id, c->entries[i].id)) {
48
      if (c->entries[i].timestamp > e->timestamp) {
49
        assert(i >= position);
50
        nodeid_free(c->entries[i].id);
51
        c->entries[i] = *e;
52
        memcpy(c->metadata + i * c->metadata_size, meta, c->metadata_size);
53
        if (position != i) {
54
          memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (i - position));
55
          memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (i -position) * c->metadata_size);
56
        }
57

    
58
        return position;
59
      }
60

    
61
      return -1;
62
    }
63
  }
64

    
65
  if (position != c->current_size) {
66
    memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
67
    memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
68
  }
69
  c->current_size++;
70
  c->entries[position] = *e;
71
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
72

    
73
  return position;
74
}
75

    
76
int cache_add_cache(struct peer_cache *dst, const struct peer_cache *src)
77
{
78
  struct cache_entry *e_orig;
79
  int count, j;
80
  struct cache_entry e_dup;
81
cache_check(dst);
82
cache_check(src);
83

    
84
  count = 0;
85
  j=0;
86
  while(dst->current_size < dst->cache_size && src->current_size > count) {
87
    count++;
88

    
89
    e_orig = src->entries + j;
90

    
91
    e_dup.id = nodeid_dup(e_orig->id);
92
    e_dup.timestamp = e_orig->timestamp;
93

    
94
    if (cache_insert(dst, &e_dup, src->metadata + src->metadata_size * j) < 0) {
95
      nodeid_free(e_dup.id);
96
    }
97
    j++;
98
  }
99
cache_check(dst);
100
cache_check(src);
101

    
102
  return dst->current_size;
103
}
104

    
105
struct nodeID *nodeid(const struct peer_cache *c, int i)
106
{
107
  if (i < c->current_size) {
108
    return c->entries[i].id;
109
  }
110

    
111
  return NULL;
112
}
113

    
114
const void *get_metadata(const struct peer_cache *c, int *size)
115
{
116
  *size = c->metadata_size;
117
  return c->metadata;
118
}
119

    
120
int cache_metadata_update(struct peer_cache *c, const struct nodeID *p, const void *meta, int meta_size)
121
{
122
  int i;
123

    
124
  if (!meta_size || meta_size != c->metadata_size) {
125
    return -3;
126
  }
127
  for (i = 0; i < c->current_size; i++) {
128
    if (nodeid_equal(c->entries[i].id, p)) {
129
      memcpy(c->metadata + i * meta_size, meta, meta_size);
130
      return 1;
131
    }
132
  }
133

    
134
  return 0;
135
}
136

    
137
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
138
{
139
  int i, pos = 0;
140

    
141
  if (meta_size && meta_size != c->metadata_size) {
142
    return -3;
143
  }
144
  for (i = 0; i < c->current_size; i++) {
145
    if (nodeid_equal(c->entries[i].id, neighbour)) {
146
      if (f != NULL) {
147
        cache_del(c,neighbour);
148
        if (i == c->current_size) break;
149
      } else {
150
          cache_metadata_update(c,neighbour,meta,meta_size);
151
          return -1;
152
      }
153
    }
154
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
155
      pos++;
156
    }
157
  }
158
  if (c->current_size == c->cache_size) {
159
    return -2;
160
  }
161
  if (c->metadata_size) {
162
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
163
    if (meta_size) {
164
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
165
    } else {
166
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
167
    }
168
  }
169
  for (i = c->current_size; i > pos; i--) {
170
    c->entries[i] = c->entries[i - 1];
171
  }
172
  c->entries[pos].id = nodeid_dup(neighbour);
173
  c->entries[pos].timestamp = 1;
174
  c->current_size++;
175

    
176
  return c->current_size;
177
}
178

    
179
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
180
{
181
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
182
}
183

    
184
int cache_del(struct peer_cache *c, const struct nodeID *neighbour)
185
{
186
  int i;
187
  int found = 0;
188

    
189
  for (i = 0; i < c->current_size; i++) {
190
    if (nodeid_equal(c->entries[i].id, neighbour)) {
191
      nodeid_free(c->entries[i].id);
192
      c->current_size--;
193
      found = 1;
194
      if (c->metadata_size && (i < c->current_size)) {
195
        memmove(c->metadata + c->metadata_size * i,
196
                c->metadata + c->metadata_size * (i + 1),
197
                c->metadata_size * (c->current_size - i));
198
      }
199
    }
200
    if (found && (i < c->current_size)) {
201
      c->entries[i] = c->entries[i + 1];
202
    }
203
  }
204

    
205
  return c->current_size;
206
}
207

    
208
void cache_update(struct peer_cache *c)
209
{
210
  int i;
211
  
212
  for (i = 0; i < c->current_size; i++) {
213
    if (c->max_timestamp && (c->entries[i].timestamp == c->max_timestamp)) {
214
      int j = i;
215

    
216
      while(j < c->current_size && c->entries[j].id) {
217
        nodeid_free(c->entries[j].id);
218
        c->entries[j++].id = NULL;
219
      }
220
      c->current_size = i;        /* The cache is ordered by timestamp...
221
                                   all the other entries wiil be older than
222
                                   this one, so remove all of them
223
                                */
224
    } else {
225
      c->entries[i].timestamp++;
226
    }
227
  }
228
}
229

    
230
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
231
{
232
  struct peer_cache *res;
233

    
234
  res = malloc(sizeof(struct peer_cache));
235
  if (res == NULL) {
236
    return NULL;
237
  }
238
  res->max_timestamp = max_timestamp;
239
  res->cache_size = n;
240
  res->current_size = 0;
241
  res->entries = malloc(sizeof(struct cache_entry) * n);
242
  if (res->entries == NULL) {
243
    free(res);
244

    
245
    return NULL;
246
  }
247
  
248
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
249
  if (metadata_size) {
250
    res->metadata = malloc(metadata_size * n);
251
  } else {
252
    res->metadata = NULL;
253
  }
254

    
255
  if (res->metadata) {
256
    res->metadata_size = metadata_size;
257
    memset(res->metadata, 0, metadata_size * n);
258
  } else {
259
    res->metadata_size = 0;
260
  }
261

    
262
  return res;
263
}
264

    
265
void cache_free(struct peer_cache *c)
266
{
267
  int i;
268

    
269
  for (i = 0; i < c->current_size; i++) {
270
    if(c->entries[i].id) {
271
      nodeid_free(c->entries[i].id);
272
    }
273
  }
274
  free(c->entries);
275
  free(c->metadata);
276
  free(c);
277
}
278

    
279
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
280
{
281
  int i;
282

    
283
  for (i = 0; i < c->current_size; i++) {
284
    if (nodeid_equal(c->entries[i].id, elem->id)) {
285
      return i;
286
    }
287
  }
288

    
289
  return -1;
290
}
291

    
292
struct nodeID *rand_peer(const struct peer_cache *c, void **meta, int max)
293
{
294
  int j;
295

    
296
  if (c->current_size == 0) {
297
    return NULL;
298
  }
299
  if (!max || max >= c->current_size)
300
    max = c->current_size;
301
  else
302
    ++max;
303
  j = ((double)rand() / (double)RAND_MAX) * max;
304

    
305
  if (meta) {
306
    *meta = c->metadata + (j * c->metadata_size);
307
  }
308

    
309
  return c->entries[j].id;
310
}
311

    
312
struct nodeID *last_peer(const struct peer_cache *c)
313
{
314
  if (c->current_size == 0) {
315
    return NULL;
316
  }
317

    
318
  return c->entries[c->current_size - 1].id;
319
}
320

    
321
struct peer_cache *rand_cache(struct peer_cache *c, int n)
322
{
323
  struct peer_cache *res;
324

    
325
cache_check(c);
326
  if (c->current_size < n) {
327
    n = c->current_size;
328
  }
329
  res = cache_init(n, c->metadata_size, c->max_timestamp);
330

    
331
  while(res->current_size < n) {
332
    int j;
333

    
334
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
335
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
336
    c->current_size--;
337
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
338
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
339
    c->entries[c->current_size].id = NULL;
340
cache_check(c);
341
  }
342

    
343
  return res;
344
}
345

    
346
struct peer_cache *entries_undump(const uint8_t *buff, int size)
347
{
348
  struct peer_cache *res;
349
  int i = 0;
350
  const uint8_t *p = buff;
351
  uint8_t *meta;
352
  int cache_size, metadata_size;
353

    
354
  cache_size = int_rcpy(buff);
355
  metadata_size = int_rcpy(buff + 4);
356
  p = buff + 8;
357
  res = cache_init(cache_size, metadata_size, 0);
358
  meta = res->metadata;
359
  while (p - buff < size) {
360
    int len;
361

    
362
    res->entries[i].timestamp = int_rcpy(p);
363
    p += sizeof(uint32_t);
364
    res->entries[i++].id = nodeid_undump(p, &len);
365
    p += len;
366
    if (metadata_size) {
367
      memcpy(meta, p, metadata_size);
368
      p += metadata_size;
369
      meta += metadata_size;
370
    }
371
  }
372
  res->current_size = i;
373
  assert(p - buff == size);
374

    
375
  return res;
376
}
377

    
378
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
379
{
380
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
381
  int_cpy(b + 4, c->metadata_size);
382

    
383
  return 8;
384
}
385

    
386
int entry_dump(uint8_t *b, const struct peer_cache *c, int i, size_t max_write_size)
387
{
388
  int res;
389
  int size = 0;
390
 
391
  if (i && (i >= c->cache_size - 1)) {
392
    return 0;
393
  }
394
  int_cpy(b, c->entries[i].timestamp);
395
  size = +4;
396
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
397
  if (res < 0 ) {
398
    return -1;
399
  }
400
  size += res;
401
  if (c->metadata_size) {
402
    if (c->metadata_size > max_write_size - size) {
403
      return -1;
404
    }
405
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
406
    size += c->metadata_size;
407
  }
408

    
409
  return size;
410
}
411

    
412
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
413
{
414
  struct peer_cache *res;
415
  int i,j,pos;
416

    
417
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
418
  if (res == NULL) {
419
    return res;
420
  }
421

    
422
  for (i = 0; i < c->current_size; i++) {
423
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
424
      pos = 0;
425
      for (j=0; j<res->current_size;j++) {
426
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
427
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
428
          pos++;
429
        }
430
      }
431
      if (c->metadata_size) {
432
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
433
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
434
      }
435
      for (j = res->current_size; j > pos; j--) {
436
        res->entries[j] = res->entries[j - 1];
437
      }
438
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
439
      res->entries[pos].timestamp = c->entries[i].timestamp;
440
      res->current_size++;
441
    }
442
  }
443

    
444
  return res;
445
}
446

    
447
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
448
{
449
  int n, pos;
450
  struct peer_cache *new_cache;
451
  uint8_t *meta;
452

    
453
  if (c1->metadata_size != c2->metadata_size) {
454
    return NULL;
455
  }
456

    
457
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
458
  if (new_cache == NULL) {
459
    return NULL;
460
  }
461

    
462
  meta = new_cache->metadata;
463

    
464
  for (n = 0; n < c1->current_size; n++) {
465
    if (new_cache->metadata_size) {
466
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
467
      meta += new_cache->metadata_size;
468
    }
469
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
470
    c1->entries[n].id = NULL;
471
  }
472
  
473
  for (n = 0; n < c2->current_size; n++) {
474
    pos = in_cache(new_cache, &c2->entries[n]);
475
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
476
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
477
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
478
    }
479
    if (pos < 0) {
480
      if (new_cache->metadata_size) {
481
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
482
        meta += new_cache->metadata_size;
483
      }
484
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
485
      c2->entries[n].id = NULL;
486
    }
487
  }
488
  *size = new_cache->current_size;
489

    
490
  return new_cache;
491
}
492

    
493
int cache_resize (struct peer_cache *c, int size)
494
{
495
  int dif = size - c->cache_size;
496

    
497
  if (!dif) {
498
    return c->current_size;
499
  }
500

    
501
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
502
  if (dif > 0) {
503
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
504
  } else if (c->current_size > size) {
505
    c->current_size = size;
506
  }
507

    
508
  if (c->metadata_size) {
509
    c->metadata = realloc(c->metadata, c->metadata_size * size);
510
    if (dif > 0) {
511
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
512
    }
513
  }
514

    
515
  c->cache_size = size;
516

    
517
  return c->current_size;
518
}
519
  
520
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
521
{
522
  int n1, n2;
523
  struct peer_cache *new_cache;
524
  uint8_t *meta;
525

    
526
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
527
  if (new_cache == NULL) {
528
    return NULL;
529
  }
530

    
531
  meta = new_cache->metadata;
532
  *source = 0;
533
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
534
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
535
      return new_cache;
536
    }
537
    if (n1 == c1->current_size) {
538
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
539
        if (new_cache->metadata_size) {
540
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
541
          meta += new_cache->metadata_size;
542
        }
543
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
544
        c2->entries[n2].id = NULL;
545
        *source |= 0x02;
546
      }
547
      n2++;
548
    } else if (n2 == c2->current_size) {
549
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
550
        if (new_cache->metadata_size) {
551
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
552
          meta += new_cache->metadata_size;
553
        }
554
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
555
        c1->entries[n1].id = NULL;
556
        *source |= 0x01;
557
      }
558
      n1++;
559
    } else {
560
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
561
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
562
          if (new_cache->metadata_size) {
563
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
564
            meta += new_cache->metadata_size;
565
          }
566
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
567
          c1->entries[n1].id = NULL;
568
          *source |= 0x01;
569
        }
570
        n1++;
571
      } else {
572
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
573
          if (new_cache->metadata_size) {
574
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
575
            meta += new_cache->metadata_size;
576
          }
577
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
578
          c2->entries[n2].id = NULL;
579
          *source |= 0x02;
580
        }
581
        n2++;
582
      }
583
    }
584
  }
585

    
586
  return new_cache;
587
}
588

    
589
void cache_check(const struct peer_cache *c)
590
{
591
  int i, j;
592

    
593
  for (i = 0; i < c->current_size; i++) {
594
    for (j = i + 1; j < c->current_size; j++) {
595
      assert(!nodeid_equal(c->entries[i].id, c->entries[j].id));
596
    }
597
  }
598
}