Statistics
| Branch: | Revision:

grapes / src / Cache / topocache.c @ 2d4b0f20

History | View | Annotate | Download (19.9 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12

    
13
#include "net_helper.h"
14
#include "topocache.h"
15
#include "int_coding.h"
16

    
17
struct cache_entry {
18
  struct nodeID *id;
19
  uint32_t timestamp;
20
};
21

    
22
struct peer_cache {
23
  struct cache_entry *entries;
24
  int cache_size;
25
  int current_size;
26
  int metadata_size;
27
  uint8_t *metadata;
28
  int max_timestamp;
29
};
30

    
31

    
32
static int cache_insert_or_update(struct peer_cache *c, struct cache_entry *e, const void *meta)
33
{
34
  int i, position;
35

    
36
  if (c->current_size == c->cache_size) {
37
    return -2;
38
  }
39
  position = 0;
40
  for (i = 0; i < c->current_size; i++) {
41

    
42
if (e->id == NULL) {fprintf(stderr, "e->ID = NULL!!!\n"); *((char *)0) = 1;}
43
if (c->entries[i].id == NULL) {fprintf(stderr, "entries[%d]->ID = NULL!!!\n", i); exit(-1);}
44

    
45
    if (c->entries[i].timestamp <= e->timestamp) {
46
      position = i + 1;
47
    }
48
    if (nodeid_equal(e->id, c->entries[i].id)) {
49
      if (c->entries[i].timestamp > e->timestamp) {
50
        nodeid_free(c->entries[i].id);
51
        c->entries[position] = *e;
52
        memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
53

    
54
        if (position < i) {
55
          memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (i - position));
56
          memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (i -position) * c->metadata_size);
57
        }
58

    
59
        return position;
60
      } else return -1;
61
    }
62
  }
63

    
64
  if (position != c->current_size) {
65
    memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
66
    memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
67
  }
68
  c->current_size++;
69
  c->entries[position] = *e;
70
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
71

    
72
  return position;
73
}
74

    
75
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
76
{
77
  int i, position;
78

    
79
  if (c->current_size == c->cache_size) {
80
    return -2;
81
  }
82
  position = 0;
83
  for (i = 0; i < c->current_size; i++) {
84
if (e->id == NULL) {fprintf(stderr, "e->ID = NULL!!!\n"); *((char *)0) = 1;}
85
if (c->entries[i].id == NULL) {fprintf(stderr, "entries[%d]->ID = NULL!!!\n", i); exit(-1);}
86
    if (nodeid_equal(e->id, c->entries[i].id)) {
87
      return -1;
88
    }
89
    if (c->entries[i].timestamp <= e->timestamp) {
90
      position = i + 1;
91
    }
92
  }
93

    
94
  if (position != c->current_size) {
95
    memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
96
    memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
97
  }
98
  c->current_size++;
99
  c->entries[position] = *e;
100
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
101

    
102
  return position;
103
}
104

    
105
struct nodeID *nodeid(const struct peer_cache *c, int i)
106
{
107
  if (i < c->current_size) {
108
    return c->entries[i].id;
109
  }
110

    
111
  return NULL;
112
}
113

    
114
const void *get_metadata(const struct peer_cache *c, int *size)
115
{
116
  *size = c->metadata_size;
117
  return c->metadata;
118
}
119

    
120
int cache_metadata_update(struct peer_cache *c, const struct nodeID *p, const void *meta, int meta_size)
121
{
122
  int i;
123

    
124
  if (!meta_size || meta_size != c->metadata_size) {
125
    return -3;
126
  }
127
  for (i = 0; i < c->current_size; i++) {
128
    if (nodeid_equal(c->entries[i].id, p)) {
129
      memcpy(c->metadata + i * meta_size, meta, meta_size);
130
      return 1;
131
    }
132
  }
133

    
134
  return 0;
135
}
136

    
137
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
138
{
139
  int i, pos = 0;
140

    
141
  if (meta_size && meta_size != c->metadata_size) {
142
    return -3;
143
  }
144
  for (i = 0; i < c->current_size; i++) {
145
    if (nodeid_equal(c->entries[i].id, neighbour)) {
146
      if (f != NULL) {
147
        cache_del(c,neighbour);
148
        if (i == c->current_size) break;
149
      } else {
150
          cache_metadata_update(c,neighbour,meta,meta_size);
151
          return -1;
152
      }
153
    }
154
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
155
      pos++;
156
    }
157
  }
158
  if (c->current_size == c->cache_size) {
159
    return -2;
160
  }
161
  if (c->metadata_size) {
162
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
163
    if (meta_size) {
164
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
165
    } else {
166
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
167
    }
168
  }
169
  for (i = c->current_size; i > pos; i--) {
170
    c->entries[i] = c->entries[i - 1];
171
  }
172
  c->entries[pos].id = nodeid_dup(neighbour);
173
  c->entries[pos].timestamp = 1;
174
  c->current_size++;
175

    
176
  return c->current_size;
177
}
178

    
179
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
180
{
181
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
182
}
183

    
184
int cache_del(struct peer_cache *c, const struct nodeID *neighbour)
185
{
186
  int i;
187
  int found = 0;
188

    
189
  for (i = 0; i < c->current_size; i++) {
190
    if (nodeid_equal(c->entries[i].id, neighbour)) {
191
      nodeid_free(c->entries[i].id);
192
      c->current_size--;
193
      found = 1;
194
      if (c->metadata_size && (i < c->current_size)) {
195
        memmove(c->metadata + c->metadata_size * i,
196
                c->metadata + c->metadata_size * (i + 1),
197
                c->metadata_size * (c->current_size - i));
198
      }
199
    }
200
    if (found && (i < c->current_size)) {
201
      c->entries[i] = c->entries[i + 1];
202
    }
203
  }
204

    
205
  return c->current_size;
206
}
207

    
208
void cache_update(struct peer_cache *c)
209
{
210
  int i;
211

    
212
  for (i = 0; i < c->current_size; i++) {
213
    if (c->max_timestamp && (c->entries[i].timestamp == c->max_timestamp)) {
214
      int j = i;
215

    
216
      while(j < c->current_size && c->entries[j].id) {
217
        nodeid_free(c->entries[j].id);
218
        c->entries[j++].id = NULL;
219
      }
220
      c->current_size = i;      /* The cache is ordered by timestamp...
221
                                   all the other entries wiil be older than
222
                                   this one, so remove all of them
223
                                */
224
    } else {
225
      c->entries[i].timestamp++;
226
    }
227
  }
228
}
229

    
230
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
231
{
232
  struct peer_cache *res;
233

    
234
  res = malloc(sizeof(struct peer_cache));
235
  if (res == NULL) {
236
    return NULL;
237
  }
238
  res->max_timestamp = max_timestamp;
239
  res->cache_size = n;
240
  res->current_size = 0;
241
  res->entries = malloc(sizeof(struct cache_entry) * n);
242
  if (res->entries == NULL) {
243
    free(res);
244

    
245
    return NULL;
246
  }
247

    
248
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
249
  if (metadata_size) {
250
    res->metadata = malloc(metadata_size * n);
251
  } else {
252
    res->metadata = NULL;
253
  }
254

    
255
  if (res->metadata) {
256
    res->metadata_size = metadata_size;
257
    memset(res->metadata, 0, metadata_size * n);
258
  } else {
259
    res->metadata_size = 0;
260
  }
261

    
262
  return res;
263
}
264

    
265
void cache_free(struct peer_cache *c)
266
{
267
  int i;
268

    
269
  for (i = 0; i < c->current_size; i++) {
270
    if(c->entries[i].id) {
271
      nodeid_free(c->entries[i].id);
272
    }
273
  }
274
  free(c->entries);
275
  free(c->metadata);
276
  free(c);
277
}
278

    
279
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
280
{
281
  int i;
282

    
283
  for (i = 0; i < c->current_size; i++) {
284
    if (nodeid_equal(c->entries[i].id, elem->id)) {
285
      return i;
286
    }
287
  }
288

    
289
  return -1;
290
}
291

    
292
struct nodeID *rand_peer(const struct peer_cache *c, void **meta, int max)
293
{
294
  int j;
295

    
296
  if (c->current_size == 0) {
297
    return NULL;
298
  }
299
  if (!max || max >= c->current_size)
300
    max = c->current_size;
301
  else
302
    ++max;
303
  j = ((double)rand() / (double)RAND_MAX) * max;
304

    
305
  if (meta) {
306
    *meta = c->metadata + (j * c->metadata_size);
307
  }
308

    
309
  return c->entries[j].id;
310
}
311

    
312
struct nodeID *last_peer(const struct peer_cache *c)
313
{
314
  if (c->current_size == 0) {
315
    return NULL;
316
  }
317

    
318
  return c->entries[c->current_size - 1].id;
319
}
320

    
321

    
322
int cache_fill_ordered(struct peer_cache *dst, const struct peer_cache *src, int target_size)
323
{
324
  struct cache_entry *e_orig, e_dup;
325
  int count, j, err;
326
cache_check(dst);
327
cache_check(src);
328
  if (target_size <= 0 || target_size > dst->cache_size) {
329
    target_size = dst->cache_size;
330
  }
331

    
332
  if (dst->current_size >= target_size) return -1;
333

    
334
  count = 0;
335
  j=0;
336
  while(dst->current_size < target_size && src->current_size > count) {
337
    count++;
338

    
339
    e_orig = src->entries + j;
340
    e_dup.id = nodeid_dup(e_orig->id);
341
    e_dup.timestamp = e_orig->timestamp;
342

    
343
    err = cache_insert_or_update(dst, &e_dup, src->metadata + src->metadata_size * j);
344
    if (err == -1) {
345
      /* Cache entry is fresher */
346
      nodeid_free(e_dup.id);
347
    }
348

    
349
    j++;
350
  }
351
cache_check(dst);
352
cache_check(src);
353
 return dst->current_size;
354
}
355

    
356
int cache_fill_rand(struct peer_cache *dst, const struct peer_cache *src, int target_size)
357
{
358
  int added[src->current_size];
359
  struct cache_entry *e_orig, e_dup;
360
  int count, j, err;
361
cache_check(dst);
362
cache_check(src);
363
  if (target_size <= 0 || target_size > dst->cache_size) {
364
    target_size = dst->cache_size;
365
  }
366

    
367
  if (dst->current_size >= target_size) return -1;
368

    
369
  memset(added, 0, sizeof(int) * src->current_size);
370
  count = 0;
371
  while(dst->current_size < target_size && src->current_size > count) {
372
    j = ((double)rand() / (double)RAND_MAX) * src->current_size;
373
    if (added[j] != 0) continue;
374
    added[j] = 1;
375
    count++;
376

    
377
    e_orig = src->entries + j;
378

    
379
    e_dup.id = nodeid_dup(e_orig->id);
380
    e_dup.timestamp = e_orig->timestamp;
381

    
382
    err = cache_insert_or_update(dst, &e_dup, src->metadata + src->metadata_size * j);
383
    if (err == -1) {
384
      /* Cache entry is fresher */
385
      nodeid_free(e_dup.id);
386
    }
387
  }
388
cache_check(dst);
389
cache_check(src);
390
 return dst->current_size;
391
}
392

    
393

    
394
struct peer_cache *rand_cache_except(struct peer_cache *c, int n,
395
                                     struct nodeID *except[], int len)
396
{
397
  struct peer_cache *res;
398
  struct cache_entry *e;
399
  int present[len];
400
  int count = 0;
401

    
402
cache_check(c);
403

    
404
 memset(present, 0, sizeof(int) * len);
405

    
406
  if (c->current_size < n) {
407
    n = c->current_size;
408
  }
409
  res = cache_init(n, c->metadata_size, c->max_timestamp);
410
  while(res->current_size < (n - count)) {
411
    int j,i,flag;
412

    
413
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
414
    e = c->entries +j;
415

    
416
    flag = 0;
417
    for (i=0; i<len; i++) {
418
      if (nodeid_equal(e->id, except[i])) {
419
        if (present[i] == 0) {
420
          count++;
421
          present[i] = 1;
422
        }
423
        flag = 1;
424
        break;
425
      }
426
    }
427
    if (flag) continue;
428

    
429
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
430
    c->current_size--;
431
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
432
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
433
    c->entries[c->current_size].id = NULL;
434
cache_check(c);
435
  }
436

    
437
  return res;
438
}
439

    
440
struct peer_cache *rand_cache(struct peer_cache *c, int n)
441
{
442
  struct peer_cache *res;
443

    
444
cache_check(c);
445
  if (c->current_size < n) {
446
    n = c->current_size;
447
  }
448
  res = cache_init(n, c->metadata_size, c->max_timestamp);
449

    
450
  while(res->current_size < n) {
451
    int j;
452

    
453
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
454
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
455
    c->current_size--;
456
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
457
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
458
    c->entries[c->current_size].id = NULL;
459
cache_check(c);
460
  }
461

    
462
  return res;
463
}
464

    
465
struct peer_cache *entries_undump(const uint8_t *buff, int size)
466
{
467
  struct peer_cache *res;
468
  int i = 0;
469
  const uint8_t *p = buff;
470
  uint8_t *meta;
471
  int cache_size, metadata_size;
472

    
473
  cache_size = int_rcpy(buff);
474
  metadata_size = int_rcpy(buff + 4);
475
  p = buff + 8;
476
  res = cache_init(cache_size, metadata_size, 0);
477
  meta = res->metadata;
478
  while (p - buff < size) {
479
    int len;
480

    
481
    res->entries[i].timestamp = int_rcpy(p);
482
    p += sizeof(uint32_t);
483
    res->entries[i++].id = nodeid_undump(p, &len);
484
    p += len;
485
    if (metadata_size) {
486
      memcpy(meta, p, metadata_size);
487
      p += metadata_size;
488
      meta += metadata_size;
489
    }
490
  }
491
  res->current_size = i;
492
if (p - buff != size) { fprintf(stderr, "Waz!! %d != %d\n", (int)(p - buff), size); exit(-1);}
493

    
494
  return res;
495
}
496

    
497
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
498
{
499
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
500
  int_cpy(b + 4, c->metadata_size);
501

    
502
  return 8;
503
}
504

    
505
int entry_dump(uint8_t *b, const struct peer_cache *c, int i, size_t max_write_size)
506
{
507
  int res;
508
  int size = 0;
509

    
510
  if (i && (i >= c->cache_size - 1)) {
511
    return 0;
512
  }
513
  int_cpy(b, c->entries[i].timestamp);
514
  size = +4;
515
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
516
  if (res < 0 ) {
517
    return -1;
518
  }
519
  size += res;
520
  if (c->metadata_size) {
521
    if (c->metadata_size > max_write_size - size) {
522
      return -1;
523
    }
524
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
525
    size += c->metadata_size;
526
  }
527

    
528
  return size;
529
}
530

    
531
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
532
{
533
  struct peer_cache *res;
534
  int i,j,pos;
535

    
536
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
537
  if (res == NULL) {
538
    return res;
539
  }
540

    
541
  for (i = 0; i < c->current_size; i++) {
542
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
543
      pos = 0;
544
      for (j=0; j<res->current_size;j++) {
545
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
546
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
547
          pos++;
548
        }
549
      }
550
      if (c->metadata_size) {
551
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
552
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
553
      }
554
      for (j = res->current_size; j > pos; j--) {
555
        res->entries[j] = res->entries[j - 1];
556
      }
557
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
558
      res->entries[pos].timestamp = c->entries[i].timestamp;
559
      res->current_size++;
560
    }
561
  }
562

    
563
  return res;
564
}
565

    
566
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
567
{
568
  int n, pos;
569
  struct peer_cache *new_cache;
570
  uint8_t *meta;
571

    
572
  if (c1->metadata_size != c2->metadata_size) {
573
    return NULL;
574
  }
575

    
576
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
577
  if (new_cache == NULL) {
578
    return NULL;
579
  }
580

    
581
  meta = new_cache->metadata;
582

    
583
  for (n = 0; n < c1->current_size; n++) {
584
    if (new_cache->metadata_size) {
585
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
586
      meta += new_cache->metadata_size;
587
    }
588
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
589
    c1->entries[n].id = NULL;
590
  }
591

    
592
  for (n = 0; n < c2->current_size; n++) {
593
    pos = in_cache(new_cache, &c2->entries[n]);
594
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
595
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
596
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
597
    }
598
    if (pos < 0) {
599
      if (new_cache->metadata_size) {
600
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
601
        meta += new_cache->metadata_size;
602
      }
603
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
604
      c2->entries[n].id = NULL;
605
    }
606
  }
607
  *size = new_cache->current_size;
608

    
609
  return new_cache;
610
}
611

    
612

    
613
int cache_max_size(const struct peer_cache *c)
614
{
615
  return c->cache_size;
616
}
617

    
618
int cache_current_size(const struct peer_cache *c)
619
{
620
  return c->current_size;
621
}
622

    
623
int cache_resize (struct peer_cache *c, int size)
624
{
625
  int dif = size - c->cache_size;
626

    
627
  if (!dif) {
628
    return c->current_size;
629
  }
630

    
631
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
632
  if (dif > 0) {
633
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
634
  } else if (c->current_size > size) {
635
    c->current_size = size;
636
  }
637

    
638
  if (c->metadata_size) {
639
    c->metadata = realloc(c->metadata, c->metadata_size * size);
640
    if (dif > 0) {
641
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
642
    }
643
  }
644

    
645
  c->cache_size = size;
646

    
647
  return c->current_size;
648
}
649

    
650
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
651
{
652
  int n1, n2;
653
  struct peer_cache *new_cache;
654
  uint8_t *meta;
655

    
656
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
657
  if (new_cache == NULL) {
658
    return NULL;
659
  }
660

    
661
  meta = new_cache->metadata;
662
  *source = 0;
663
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
664
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
665
      return new_cache;
666
    }
667
    if (n1 == c1->current_size) {
668
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
669
        if (new_cache->metadata_size) {
670
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
671
          meta += new_cache->metadata_size;
672
        }
673
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
674
        c2->entries[n2].id = NULL;
675
        *source |= 0x02;
676
      }
677
      n2++;
678
    } else if (n2 == c2->current_size) {
679
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
680
        if (new_cache->metadata_size) {
681
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
682
          meta += new_cache->metadata_size;
683
        }
684
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
685
        c1->entries[n1].id = NULL;
686
        *source |= 0x01;
687
      }
688
      n1++;
689
    } else {
690
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
691
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
692
          if (new_cache->metadata_size) {
693
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
694
            meta += new_cache->metadata_size;
695
          }
696
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
697
          c1->entries[n1].id = NULL;
698
          *source |= 0x01;
699
        }
700
        n1++;
701
      } else {
702
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
703
          if (new_cache->metadata_size) {
704
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
705
            meta += new_cache->metadata_size;
706
          }
707
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
708
          c2->entries[n2].id = NULL;
709
          *source |= 0x02;
710
        }
711
        n2++;
712
      }
713
    }
714
  }
715

    
716
  return new_cache;
717
}
718

    
719
void cache_check(const struct peer_cache *c)
720
{
721
  int i, j;
722
  int ts = 0;
723

    
724
  for (i = 0; i < c->current_size; i++) {
725
    if (c->entries[i].timestamp < ts) {
726
      fprintf(stderr, "WTF!!!! %d.ts=%d > %d.ts=%d!!!\n",
727
              i-1, ts, i, c->entries[i].timestamp);
728
      *((char *)0) = 1;
729
    }
730
    ts = c->entries[i].timestamp;
731
    for (j = i + 1; j < c->current_size; j++) {
732
      if (nodeid_equal(c->entries[i].id, c->entries[j].id)) {
733
        fprintf(stderr, "WTF!!!! %d = %d!!!\n", i, j);
734
        *((char *)0) = 1;
735
      }
736
    }
737
  }
738
}
739

    
740
void cache_log(const struct peer_cache *c, const char *name){
741
  char addr[256];
742
  int i;
743
  const char default_name[] = "none";
744
  const char *actual_name = (name)? name : default_name;
745
  fprintf(stderr, "### dumping cache (%s)\n", actual_name);
746
  fprintf(stderr, "\tcache_size=%d, current_size=%d\n", c->cache_size, c->current_size);
747
  for (i = 0; i < c->current_size; i++) {
748
    node_addr(c->entries[i].id, addr, 256);
749
    fprintf(stderr, "\t%d: %s[%d]\n", i, addr, c->entries[i].timestamp);
750
  }
751
  fprintf(stderr, "\n-----------------------------\n");
752
}