Statistics
| Branch: | Revision:

grapes / src / Cache / topocache.c @ 81320947

History | View | Annotate | Download (19.8 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12
#undef NDEBUG
13
#include <assert.h>
14

    
15
#include "net_helper.h"
16
#include "topocache.h"
17
#include "int_coding.h"
18

    
19
struct cache_entry {
20
  struct nodeID *id;
21
  uint32_t timestamp;
22
};
23

    
24
struct peer_cache {
25
  struct cache_entry *entries;
26
  int cache_size;
27
  int current_size;
28
  int metadata_size;
29
  uint8_t *metadata;
30
  int max_timestamp;
31
};
32

    
33
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
34
{
35
  int i, position;
36

    
37
  if (c->current_size == c->cache_size) {
38
    return -2;
39
  }
40
  position = 0;
41
  for (i = 0; i < c->current_size; i++) {
42
    assert(e->id);
43
    assert(c->entries[i].id);
44
    if (c->entries[i].timestamp <= e->timestamp) {
45
      position = i + 1;
46
    }
47
    if (nodeid_equal(e->id, c->entries[i].id)) {
48
      if (c->entries[i].timestamp > e->timestamp) {
49
        assert(i >= position);
50
        nodeid_free(c->entries[i].id);
51
        c->entries[i] = *e;
52
        memcpy(c->metadata + i * c->metadata_size, meta, c->metadata_size);
53
        if (position != i) {
54
          memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (i - position));
55
          memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (i -position) * c->metadata_size);
56
        }
57

    
58
        return position;
59
      }
60

    
61
      return -1;
62
    }
63
  }
64

    
65
  if (position != c->current_size) {
66
    memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
67
    memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
68
  }
69
  c->current_size++;
70
  c->entries[position] = *e;
71
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
72

    
73
  return position;
74
}
75

    
76
int cache_add_cache(struct peer_cache *dst, const struct peer_cache *src)
77
{
78
  struct cache_entry *e_orig;
79
  int count, j;
80
  struct cache_entry e_dup;
81
cache_check(dst);
82
cache_check(src);
83

    
84
  count = 0;
85
  j=0;
86
  while(dst->current_size < dst->cache_size && src->current_size > count) {
87
    count++;
88

    
89
    e_orig = src->entries + j;
90

    
91
    e_dup.id = nodeid_dup(e_orig->id);
92
    e_dup.timestamp = e_orig->timestamp;
93

    
94
    if (cache_insert(dst, &e_dup, src->metadata + src->metadata_size * j) < 0) {
95
      nodeid_free(e_dup.id);
96
    }
97
    j++;
98
  }
99
cache_check(dst);
100
cache_check(src);
101

    
102
  return dst->current_size;
103
}
104

    
105
struct nodeID *nodeid(const struct peer_cache *c, int i)
106
{
107
  if (i < c->current_size) {
108
    return c->entries[i].id;
109
  }
110

    
111
  return NULL;
112
}
113

    
114
const void *get_metadata(const struct peer_cache *c, int *size)
115
{
116
  *size = c->metadata_size;
117
  return c->metadata;
118
}
119

    
120
int cache_metadata_update(struct peer_cache *c, const struct nodeID *p, const void *meta, int meta_size)
121
{
122
  int i;
123

    
124
  if (!meta_size || meta_size != c->metadata_size) {
125
    return -3;
126
  }
127
  for (i = 0; i < c->current_size; i++) {
128
    if (nodeid_equal(c->entries[i].id, p)) {
129
      memcpy(c->metadata + i * meta_size, meta, meta_size);
130
      return 1;
131
    }
132
  }
133

    
134
  return 0;
135
}
136

    
137
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
138
{
139
  int i, pos = 0;
140

    
141
  if (meta_size && meta_size != c->metadata_size) {
142
    return -3;
143
  }
144
  for (i = 0; i < c->current_size; i++) {
145
    if (nodeid_equal(c->entries[i].id, neighbour)) {
146
      if (f != NULL) {
147
        cache_del(c,neighbour);
148
        if (i == c->current_size) break;
149
      } else {
150
          cache_metadata_update(c,neighbour,meta,meta_size);
151
          return -1;
152
      }
153
    }
154
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
155
      pos++;
156
    }
157
  }
158
  if (c->current_size == c->cache_size) {
159
    return -2;
160
  }
161
  if (c->metadata_size) {
162
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
163
    if (meta_size) {
164
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
165
    } else {
166
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
167
    }
168
  }
169
  for (i = c->current_size; i > pos; i--) {
170
    c->entries[i] = c->entries[i - 1];
171
  }
172
  c->entries[pos].id = nodeid_dup(neighbour);
173
  c->entries[pos].timestamp = 1;
174
  c->current_size++;
175

    
176
  return c->current_size;
177
}
178

    
179
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
180
{
181
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
182
}
183

    
184
int cache_del(struct peer_cache *c, const struct nodeID *neighbour)
185
{
186
  int i;
187
  int found = 0;
188

    
189
  for (i = 0; i < c->current_size; i++) {
190
    if (nodeid_equal(c->entries[i].id, neighbour)) {
191
      nodeid_free(c->entries[i].id);
192
      c->current_size--;
193
      found = 1;
194
      if (c->metadata_size && (i < c->current_size)) {
195
        memmove(c->metadata + c->metadata_size * i,
196
                c->metadata + c->metadata_size * (i + 1),
197
                c->metadata_size * (c->current_size - i));
198
      }
199
    }
200
    if (found && (i < c->current_size)) {
201
      c->entries[i] = c->entries[i + 1];
202
    }
203
  }
204

    
205
  return c->current_size;
206
}
207

    
208
void cache_update(struct peer_cache *c)
209
{
210
  int i;
211
  
212
  for (i = 0; i < c->current_size; i++) {
213
    if (c->max_timestamp && (c->entries[i].timestamp == c->max_timestamp)) {
214
      int j = i;
215

    
216
      while(j < c->current_size && c->entries[j].id) {
217
        nodeid_free(c->entries[j].id);
218
        c->entries[j++].id = NULL;
219
      }
220
      c->current_size = i;        /* The cache is ordered by timestamp...
221
                                   all the other entries wiil be older than
222
                                   this one, so remove all of them
223
                                */
224
    } else {
225
      c->entries[i].timestamp++;
226
    }
227
  }
228
}
229

    
230
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
231
{
232
  struct peer_cache *res;
233

    
234
  res = malloc(sizeof(struct peer_cache));
235
  if (res == NULL) {
236
    return NULL;
237
  }
238
  res->max_timestamp = max_timestamp;
239
  res->cache_size = n;
240
  res->current_size = 0;
241
  res->entries = malloc(sizeof(struct cache_entry) * n);
242
  if (res->entries == NULL) {
243
    free(res);
244

    
245
    return NULL;
246
  }
247
  
248
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
249
  if (metadata_size) {
250
    res->metadata = malloc(metadata_size * n);
251
  } else {
252
    res->metadata = NULL;
253
  }
254

    
255
  if (res->metadata) {
256
    res->metadata_size = metadata_size;
257
    memset(res->metadata, 0, metadata_size * n);
258
  } else {
259
    res->metadata_size = 0;
260
  }
261

    
262
  return res;
263
}
264

    
265
struct peer_cache *cache_copy(const struct peer_cache *c1)
266
{
267
  int n;
268
  struct peer_cache *new_cache;
269

    
270
  new_cache = cache_init(c1->current_size, c1->metadata_size, c1->max_timestamp);
271
  if (new_cache == NULL) {
272
    return NULL;
273
  }
274

    
275
  for (n = 0; n < c1->current_size; n++) {
276
    new_cache->entries[new_cache->current_size].id = nodeid_dup(c1->entries[n].id);
277
    new_cache->entries[new_cache->current_size++].timestamp = c1->entries[n].timestamp;
278
  }
279
  if (new_cache->metadata_size) {
280
    memcpy(new_cache->metadata, c1->metadata, c1->metadata_size * c1->current_size);
281
  }
282

    
283
  return new_cache;
284
}
285

    
286
void cache_free(struct peer_cache *c)
287
{
288
  int i;
289

    
290
  for (i = 0; i < c->current_size; i++) {
291
    if(c->entries[i].id) {
292
      nodeid_free(c->entries[i].id);
293
    }
294
  }
295
  free(c->entries);
296
  free(c->metadata);
297
  free(c);
298
}
299

    
300
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
301
{
302
  int i;
303

    
304
  for (i = 0; i < c->current_size; i++) {
305
    if (nodeid_equal(c->entries[i].id, elem->id)) {
306
      return i;
307
    }
308
  }
309

    
310
  return -1;
311
}
312

    
313
struct nodeID *rand_peer(const struct peer_cache *c, void **meta, int max)
314
{
315
  int j;
316

    
317
  if (c->current_size == 0) {
318
    return NULL;
319
  }
320
  if (!max || max >= c->current_size)
321
    max = c->current_size;
322
  else
323
    ++max;
324
  j = ((double)rand() / (double)RAND_MAX) * max;
325

    
326
  if (meta) {
327
    *meta = c->metadata + (j * c->metadata_size);
328
  }
329

    
330
  return c->entries[j].id;
331
}
332

    
333
struct nodeID *last_peer(const struct peer_cache *c)
334
{
335
  if (c->current_size == 0) {
336
    return NULL;
337
  }
338

    
339
  return c->entries[c->current_size - 1].id;
340
}
341

    
342

    
343
int cache_fill_ordered(struct peer_cache *dst, const struct peer_cache *src, int target_size)
344
{
345
  struct cache_entry *e_orig, e_dup;
346
  int count, j, err;
347
cache_check(dst);
348
cache_check(src);
349
  if (target_size <= 0 || target_size > dst->cache_size) {
350
    target_size = dst->cache_size;
351
  }
352

    
353
  if (dst->current_size >= target_size) return -1;
354

    
355
  count = 0;
356
  j=0;
357
  while(dst->current_size < target_size && src->current_size > count) {
358
    count++;
359

    
360
    e_orig = src->entries + j;
361
    e_dup.id = nodeid_dup(e_orig->id);
362
    e_dup.timestamp = e_orig->timestamp;
363

    
364
    err = cache_insert(dst, &e_dup, src->metadata + src->metadata_size * j);
365
    if (err == -1) {
366
      /* Cache entry is fresher */
367
      nodeid_free(e_dup.id);
368
    }
369

    
370
    j++;
371
  }
372
cache_check(dst);
373
cache_check(src);
374
 return dst->current_size;
375
}
376

    
377
int cache_fill_rand(struct peer_cache *dst, const struct peer_cache *src, int target_size)
378
{
379
  int added[src->current_size];
380
  struct cache_entry *e_orig, e_dup;
381
  int count, j, err;
382
cache_check(dst);
383
cache_check(src);
384
  if (target_size <= 0 || target_size > dst->cache_size) {
385
    target_size = dst->cache_size;
386
  }
387

    
388
  if (dst->current_size >= target_size) return -1;
389

    
390
  memset(added, 0, sizeof(int) * src->current_size);
391
  count = 0;
392
  while(dst->current_size < target_size && src->current_size > count) {
393
    j = ((double)rand() / (double)RAND_MAX) * src->current_size;
394
    if (added[j] != 0) continue;
395
    added[j] = 1;
396
    count++;
397

    
398
    e_orig = src->entries + j;
399

    
400
    e_dup.id = nodeid_dup(e_orig->id);
401
    e_dup.timestamp = e_orig->timestamp;
402

    
403
    err = cache_insert(dst, &e_dup, src->metadata + src->metadata_size * j);
404
    if (err == -1) {
405
      /* Cache entry is fresher */
406
      nodeid_free(e_dup.id);
407
    }
408
  }
409
cache_check(dst);
410
cache_check(src);
411
 return dst->current_size;
412
}
413

    
414

    
415
struct peer_cache *rand_cache_except(struct peer_cache *c, int n,
416
                                     struct nodeID *except[], int len)
417
{
418
  struct peer_cache *res;
419
  struct cache_entry *e;
420
  int present[len];
421
  int count = 0;
422

    
423
cache_check(c);
424

    
425
 memset(present, 0, sizeof(int) * len);
426

    
427
  if (c->current_size < n) {
428
    n = c->current_size;
429
  }
430
  res = cache_init(n, c->metadata_size, c->max_timestamp);
431
  while(res->current_size < (n - count)) {
432
    int j,i,flag;
433

    
434
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
435
    e = c->entries +j;
436

    
437
    flag = 0;
438
    for (i=0; i<len; i++) {
439
      if (nodeid_equal(e->id, except[i])) {
440
        if (present[i] == 0) {
441
          count++;
442
          present[i] = 1;
443
        }
444
        flag = 1;
445
        break;
446
      }
447
    }
448
    if (flag) continue;
449

    
450
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
451
    c->current_size--;
452
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
453
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
454
    c->entries[c->current_size].id = NULL;
455
cache_check(c);
456
  }
457

    
458
  return res;
459
}
460

    
461
struct peer_cache *rand_cache(struct peer_cache *c, int n)
462
{
463
  struct peer_cache *res;
464

    
465
cache_check(c);
466
  if (c->current_size < n) {
467
    n = c->current_size;
468
  }
469
  res = cache_init(n, c->metadata_size, c->max_timestamp);
470

    
471
  while(res->current_size < n) {
472
    int j;
473

    
474
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
475
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
476
    c->current_size--;
477
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
478
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
479
    c->entries[c->current_size].id = NULL;
480
cache_check(c);
481
  }
482

    
483
  return res;
484
}
485

    
486
struct peer_cache *entries_undump(const uint8_t *buff, int size)
487
{
488
  struct peer_cache *res;
489
  int i = 0;
490
  const uint8_t *p = buff;
491
  uint8_t *meta;
492
  int cache_size, metadata_size;
493

    
494
  cache_size = int_rcpy(buff);
495
  metadata_size = int_rcpy(buff + 4);
496
  p = buff + 8;
497
  res = cache_init(cache_size, metadata_size, 0);
498
  meta = res->metadata;
499
  while (p - buff < size) {
500
    int len;
501

    
502
    res->entries[i].timestamp = int_rcpy(p);
503
    p += sizeof(uint32_t);
504
    res->entries[i++].id = nodeid_undump(p, &len);
505
    p += len;
506
    if (metadata_size) {
507
      memcpy(meta, p, metadata_size);
508
      p += metadata_size;
509
      meta += metadata_size;
510
    }
511
  }
512
  res->current_size = i;
513
  assert(p - buff == size);
514

    
515
  return res;
516
}
517

    
518
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
519
{
520
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
521
  int_cpy(b + 4, c->metadata_size);
522

    
523
  return 8;
524
}
525

    
526
int entry_dump(uint8_t *b, const struct peer_cache *c, int i, size_t max_write_size)
527
{
528
  int res;
529
  int size = 0;
530
 
531
  if (i && (i >= c->cache_size - 1)) {
532
    return 0;
533
  }
534
  int_cpy(b, c->entries[i].timestamp);
535
  size = +4;
536
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
537
  if (res < 0 ) {
538
    return -1;
539
  }
540
  size += res;
541
  if (c->metadata_size) {
542
    if (c->metadata_size > max_write_size - size) {
543
      return -1;
544
    }
545
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
546
    size += c->metadata_size;
547
  }
548

    
549
  return size;
550
}
551

    
552
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
553
{
554
  struct peer_cache *res;
555
  int i,j,pos;
556

    
557
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
558
  if (res == NULL) {
559
    return res;
560
  }
561

    
562
  for (i = 0; i < c->current_size; i++) {
563
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
564
      pos = 0;
565
      for (j=0; j<res->current_size;j++) {
566
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
567
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
568
          pos++;
569
        }
570
      }
571
      if (c->metadata_size) {
572
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
573
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
574
      }
575
      for (j = res->current_size; j > pos; j--) {
576
        res->entries[j] = res->entries[j - 1];
577
      }
578
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
579
      res->entries[pos].timestamp = c->entries[i].timestamp;
580
      res->current_size++;
581
    }
582
  }
583

    
584
  return res;
585
}
586

    
587
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
588
{
589
  int n, pos;
590
  struct peer_cache *new_cache;
591
  uint8_t *meta;
592

    
593
  if (c1->metadata_size != c2->metadata_size) {
594
    return NULL;
595
  }
596

    
597
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
598
  if (new_cache == NULL) {
599
    return NULL;
600
  }
601

    
602
  meta = new_cache->metadata;
603

    
604
  for (n = 0; n < c1->current_size; n++) {
605
    if (new_cache->metadata_size) {
606
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
607
      meta += new_cache->metadata_size;
608
    }
609
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
610
    c1->entries[n].id = NULL;
611
  }
612
  
613
  for (n = 0; n < c2->current_size; n++) {
614
    pos = in_cache(new_cache, &c2->entries[n]);
615
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
616
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
617
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
618
    }
619
    if (pos < 0) {
620
      if (new_cache->metadata_size) {
621
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
622
        meta += new_cache->metadata_size;
623
      }
624
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
625
      c2->entries[n].id = NULL;
626
    }
627
  }
628
  *size = new_cache->current_size;
629

    
630
  return new_cache;
631
}
632

    
633

    
634
int cache_max_size(const struct peer_cache *c)
635
{
636
  return c->cache_size;
637
}
638

    
639
int cache_current_size(const struct peer_cache *c)
640
{
641
  return c->current_size;
642
}
643

    
644
int cache_resize (struct peer_cache *c, int size)
645
{
646
  int dif = size - c->cache_size;
647

    
648
  if (!dif) {
649
    return c->current_size;
650
  }
651

    
652
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
653
  if (dif > 0) {
654
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
655
  } else if (c->current_size > size) {
656
    c->current_size = size;
657
  }
658

    
659
  if (c->metadata_size) {
660
    c->metadata = realloc(c->metadata, c->metadata_size * size);
661
    if (dif > 0) {
662
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
663
    }
664
  }
665

    
666
  c->cache_size = size;
667

    
668
  return c->current_size;
669
}
670
  
671
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
672
{
673
  int n1, n2;
674
  struct peer_cache *new_cache;
675
  uint8_t *meta;
676

    
677
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
678
  if (new_cache == NULL) {
679
    return NULL;
680
  }
681

    
682
  meta = new_cache->metadata;
683
  *source = 0;
684
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
685
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
686
      return new_cache;
687
    }
688
    if (n1 == c1->current_size) {
689
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
690
        if (new_cache->metadata_size) {
691
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
692
          meta += new_cache->metadata_size;
693
        }
694
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
695
        c2->entries[n2].id = NULL;
696
        *source |= 0x02;
697
      }
698
      n2++;
699
    } else if (n2 == c2->current_size) {
700
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
701
        if (new_cache->metadata_size) {
702
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
703
          meta += new_cache->metadata_size;
704
        }
705
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
706
        c1->entries[n1].id = NULL;
707
        *source |= 0x01;
708
      }
709
      n1++;
710
    } else {
711
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
712
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
713
          if (new_cache->metadata_size) {
714
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
715
            meta += new_cache->metadata_size;
716
          }
717
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
718
          c1->entries[n1].id = NULL;
719
          *source |= 0x01;
720
        }
721
        n1++;
722
      } else {
723
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
724
          if (new_cache->metadata_size) {
725
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
726
            meta += new_cache->metadata_size;
727
          }
728
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
729
          c2->entries[n2].id = NULL;
730
          *source |= 0x02;
731
        }
732
        n2++;
733
      }
734
    }
735
  }
736

    
737
  return new_cache;
738
}
739

    
740
void cache_check(const struct peer_cache *c)
741
{
742
  int i, j;
743
  int ts = 0;
744

    
745
  for (i = 0; i < c->current_size; i++) {
746
    if (c->entries[i].timestamp < ts) {
747
      fprintf(stderr, "WTF!!!! %d.ts=%d > %d.ts=%d!!!\n",
748
              i-1, ts, i, c->entries[i].timestamp);
749
      *((char *)0) = 1;
750
    }
751
    ts = c->entries[i].timestamp;
752
    for (j = i + 1; j < c->current_size; j++) {
753
      assert(!nodeid_equal(c->entries[i].id, c->entries[j].id));
754
    }
755
  }
756
}
757

    
758
void cache_log(const struct peer_cache *c, const char *name){
759
  char addr[256];
760
  int i;
761
  const char default_name[] = "none";
762
  const char *actual_name = (name)? name : default_name;
763
  fprintf(stderr, "### dumping cache (%s)\n", actual_name);
764
  fprintf(stderr, "\tcache_size=%d, current_size=%d\n", c->cache_size, c->current_size);
765
  for (i = 0; i < c->current_size; i++) {
766
    node_addr(c->entries[i].id, addr, 256);
767
    fprintf(stderr, "\t%d: %s[%d]\n", i, addr, c->entries[i].timestamp);
768
  }
769
  fprintf(stderr, "\n-----------------------------\n");
770
}