Statistics
| Branch: | Revision:

grapes / src / Cache / topocache.c @ 848dc803

History | View | Annotate | Download (15.5 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12
#undef NDEBUG
13
#include <assert.h>
14

    
15
#include "net_helper.h"
16
#include "topocache.h"
17
#include "int_coding.h"
18

    
19
struct cache_entry {
20
  struct nodeID *id;
21
  uint32_t timestamp;
22
};
23

    
24
struct peer_cache {
25
  struct cache_entry *entries;
26
  int cache_size;
27
  int current_size;
28
  int metadata_size;
29
  uint8_t *metadata;
30
  int max_timestamp;
31
};
32

    
33
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
34
{
35
  int i, position;
36

    
37
  if (c->current_size == c->cache_size) {
38
    return -2;
39
  }
40
  position = 0;
41
  for (i = 0; i < c->current_size; i++) {
42
    assert(e->id);
43
    assert(c->entries[i].id);
44
    if (c->entries[i].timestamp <= e->timestamp) {
45
      position = i + 1;
46
    }
47
    if (nodeid_equal(e->id, c->entries[i].id)) {
48
      if (c->entries[i].timestamp > e->timestamp) {
49
        assert(i >= position);
50
        nodeid_free(c->entries[i].id);
51
        c->entries[i] = *e;
52
        memcpy(c->metadata + i * c->metadata_size, meta, c->metadata_size);
53
        if (position != i) {
54
          memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (i - position));
55
          memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (i -position) * c->metadata_size);
56
        }
57

    
58
        return position;
59
      }
60

    
61
      return -1;
62
    }
63
  }
64

    
65
  if (position != c->current_size) {
66
    memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
67
    memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
68
  }
69
  c->current_size++;
70
  c->entries[position] = *e;
71
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
72

    
73
  return position;
74
}
75

    
76
struct nodeID *nodeid(const struct peer_cache *c, int i)
77
{
78
  if (i < c->current_size) {
79
    return c->entries[i].id;
80
  }
81

    
82
  return NULL;
83
}
84

    
85
const void *get_metadata(const struct peer_cache *c, int *size)
86
{
87
  *size = c->metadata_size;
88
  return c->metadata;
89
}
90

    
91
int cache_metadata_update(struct peer_cache *c, const struct nodeID *p, const void *meta, int meta_size)
92
{
93
  int i;
94

    
95
  if (!meta_size || meta_size != c->metadata_size) {
96
    return -3;
97
  }
98
  for (i = 0; i < c->current_size; i++) {
99
    if (nodeid_equal(c->entries[i].id, p)) {
100
      memcpy(c->metadata + i * meta_size, meta, meta_size);
101
      return 1;
102
    }
103
  }
104

    
105
  return 0;
106
}
107

    
108
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
109
{
110
  int i, pos = 0;
111

    
112
  if (meta_size && meta_size != c->metadata_size) {
113
    return -3;
114
  }
115
  for (i = 0; i < c->current_size; i++) {
116
    if (nodeid_equal(c->entries[i].id, neighbour)) {
117
      if (f != NULL) {
118
        cache_del(c,neighbour);
119
        if (i == c->current_size) break;
120
      } else {
121
          cache_metadata_update(c,neighbour,meta,meta_size);
122
          return -1;
123
      }
124
    }
125
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
126
      pos++;
127
    }
128
  }
129
  if (c->current_size == c->cache_size) {
130
    return -2;
131
  }
132
  if (c->metadata_size) {
133
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
134
    if (meta_size) {
135
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
136
    } else {
137
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
138
    }
139
  }
140
  for (i = c->current_size; i > pos; i--) {
141
    c->entries[i] = c->entries[i - 1];
142
  }
143
  c->entries[pos].id = nodeid_dup(neighbour);
144
  c->entries[pos].timestamp = 1;
145
  c->current_size++;
146

    
147
  return c->current_size;
148
}
149

    
150
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
151
{
152
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
153
}
154

    
155
int cache_del(struct peer_cache *c, const struct nodeID *neighbour)
156
{
157
  int i;
158
  int found = 0;
159

    
160
  for (i = 0; i < c->current_size; i++) {
161
    if (nodeid_equal(c->entries[i].id, neighbour)) {
162
      nodeid_free(c->entries[i].id);
163
      c->current_size--;
164
      found = 1;
165
      if (c->metadata_size && (i < c->current_size)) {
166
        memmove(c->metadata + c->metadata_size * i,
167
                c->metadata + c->metadata_size * (i + 1),
168
                c->metadata_size * (c->current_size - i));
169
      }
170
    }
171
    if (found && (i < c->current_size)) {
172
      c->entries[i] = c->entries[i + 1];
173
    }
174
  }
175

    
176
  return c->current_size;
177
}
178

    
179
void cache_delay(struct peer_cache *c, int dts)
180
{
181
  int i;
182
  
183
  for (i = 0; i < c->current_size; i++) {
184
    if (c->max_timestamp && (c->entries[i].timestamp + dts > c->max_timestamp)) {
185
      int j = i;
186

    
187
      while(j < c->current_size && c->entries[j].id) {
188
        nodeid_free(c->entries[j].id);
189
        c->entries[j++].id = NULL;
190
      }
191
      c->current_size = i;        /* The cache is ordered by timestamp...
192
                                   all the other entries wiil be older than
193
                                   this one, so remove all of them
194
                                */
195
    } else {
196
      c->entries[i].timestamp = c->entries[i].timestamp + dts > 0 ? c->entries[i].timestamp + dts : 0;
197
    }
198
  }
199
}
200

    
201
void cache_update(struct peer_cache *c)
202
{
203
  cache_delay(c, 1);
204
}
205

    
206

    
207
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
208
{
209
  struct peer_cache *res;
210

    
211
  res = malloc(sizeof(struct peer_cache));
212
  if (res == NULL) {
213
    return NULL;
214
  }
215
  res->max_timestamp = max_timestamp;
216
  res->cache_size = n;
217
  res->current_size = 0;
218
  res->entries = malloc(sizeof(struct cache_entry) * n);
219
  if (res->entries == NULL) {
220
    free(res);
221

    
222
    return NULL;
223
  }
224
  
225
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
226
  if (metadata_size) {
227
    res->metadata = malloc(metadata_size * n);
228
  } else {
229
    res->metadata = NULL;
230
  }
231

    
232
  if (res->metadata) {
233
    res->metadata_size = metadata_size;
234
    memset(res->metadata, 0, metadata_size * n);
235
  } else {
236
    res->metadata_size = 0;
237
  }
238

    
239
  return res;
240
}
241

    
242
struct peer_cache *cache_copy(const struct peer_cache *c1)
243
{
244
  int n;
245
  struct peer_cache *new_cache;
246

    
247
  new_cache = cache_init(c1->current_size, c1->metadata_size, c1->max_timestamp);
248
  if (new_cache == NULL) {
249
    return NULL;
250
  }
251

    
252
  for (n = 0; n < c1->current_size; n++) {
253
    new_cache->entries[new_cache->current_size].id = nodeid_dup(c1->entries[n].id);
254
    new_cache->entries[new_cache->current_size++].timestamp = c1->entries[n].timestamp;
255
  }
256
  if (new_cache->metadata_size) {
257
    memcpy(new_cache->metadata, c1->metadata, c1->metadata_size * c1->current_size);
258
  }
259

    
260
  return new_cache;
261
}
262

    
263
void cache_free(struct peer_cache *c)
264
{
265
  int i;
266

    
267
  for (i = 0; i < c->current_size; i++) {
268
    if(c->entries[i].id) {
269
      nodeid_free(c->entries[i].id);
270
    }
271
  }
272
  free(c->entries);
273
  free(c->metadata);
274
  free(c);
275
}
276

    
277
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
278
{
279
  int i;
280

    
281
  for (i = 0; i < c->current_size; i++) {
282
    if (nodeid_equal(c->entries[i].id, elem->id)) {
283
      return i;
284
    }
285
  }
286

    
287
  return -1;
288
}
289

    
290
struct nodeID *rand_peer(const struct peer_cache *c, void **meta, int max)
291
{
292
  int j;
293

    
294
  if (c->current_size == 0) {
295
    return NULL;
296
  }
297
  if (!max || max >= c->current_size)
298
    max = c->current_size;
299
  else
300
    ++max;
301
  j = ((double)rand() / (double)RAND_MAX) * max;
302

    
303
  if (meta) {
304
    *meta = c->metadata + (j * c->metadata_size);
305
  }
306

    
307
  return c->entries[j].id;
308
}
309

    
310
struct nodeID *last_peer(const struct peer_cache *c)
311
{
312
  if (c->current_size == 0) {
313
    return NULL;
314
  }
315

    
316
  return c->entries[c->current_size - 1].id;
317
}
318

    
319
struct peer_cache *rand_cache(struct peer_cache *c, int n)
320
{
321
  struct peer_cache *res;
322

    
323
cache_check(c);
324
  if (c->current_size < n) {
325
    n = c->current_size;
326
  }
327
  res = cache_init(n, c->metadata_size, c->max_timestamp);
328

    
329
  while(res->current_size < n) {
330
    int j;
331

    
332
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
333
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
334
    c->current_size--;
335
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
336
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
337
    c->entries[c->current_size].id = NULL;
338
cache_check(c);
339
  }
340

    
341
  return res;
342
}
343

    
344
struct peer_cache *entries_undump(const uint8_t *buff, int size)
345
{
346
  struct peer_cache *res;
347
  int i = 0;
348
  const uint8_t *p = buff;
349
  uint8_t *meta;
350
  int cache_size, metadata_size;
351

    
352
  cache_size = int_rcpy(buff);
353
  metadata_size = int_rcpy(buff + 4);
354
  p = buff + 8;
355
  res = cache_init(cache_size, metadata_size, 0);
356
  meta = res->metadata;
357
  while (p - buff < size) {
358
    int len;
359

    
360
    res->entries[i].timestamp = int_rcpy(p);
361
    p += sizeof(uint32_t);
362
    res->entries[i++].id = nodeid_undump(p, &len);
363
    p += len;
364
    if (metadata_size) {
365
      memcpy(meta, p, metadata_size);
366
      p += metadata_size;
367
      meta += metadata_size;
368
    }
369
  }
370
  res->current_size = i;
371
  assert(p - buff == size);
372

    
373
  return res;
374
}
375

    
376
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
377
{
378
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
379
  int_cpy(b + 4, c->metadata_size);
380

    
381
  return 8;
382
}
383

    
384
int entry_dump(uint8_t *b, const struct peer_cache *c, int i, size_t max_write_size)
385
{
386
  int res;
387
  int size = 0;
388
 
389
  if (i && (i >= c->cache_size - 1)) {
390
    return 0;
391
  }
392
  int_cpy(b, c->entries[i].timestamp);
393
  size = +4;
394
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
395
  if (res < 0 ) {
396
    return -1;
397
  }
398
  size += res;
399
  if (c->metadata_size) {
400
    if (c->metadata_size > max_write_size - size) {
401
      return -1;
402
    }
403
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
404
    size += c->metadata_size;
405
  }
406

    
407
  return size;
408
}
409

    
410
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
411
{
412
  struct peer_cache *res;
413
  int i,j,pos;
414

    
415
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
416
  if (res == NULL) {
417
    return res;
418
  }
419

    
420
  for (i = 0; i < c->current_size; i++) {
421
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
422
      pos = 0;
423
      for (j=0; j<res->current_size;j++) {
424
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
425
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
426
          pos++;
427
        }
428
      }
429
      if (c->metadata_size) {
430
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
431
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
432
      }
433
      for (j = res->current_size; j > pos; j--) {
434
        res->entries[j] = res->entries[j - 1];
435
      }
436
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
437
      res->entries[pos].timestamp = c->entries[i].timestamp;
438
      res->current_size++;
439
    }
440
  }
441

    
442
  return res;
443
}
444

    
445
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
446
{
447
  int n, pos;
448
  struct peer_cache *new_cache;
449
  uint8_t *meta;
450

    
451
  if (c1->metadata_size != c2->metadata_size) {
452
    return NULL;
453
  }
454

    
455
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
456
  if (new_cache == NULL) {
457
    return NULL;
458
  }
459

    
460
  meta = new_cache->metadata;
461

    
462
  for (n = 0; n < c1->current_size; n++) {
463
    if (new_cache->metadata_size) {
464
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
465
      meta += new_cache->metadata_size;
466
    }
467
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
468
    c1->entries[n].id = NULL;
469
  }
470
  
471
  for (n = 0; n < c2->current_size; n++) {
472
    pos = in_cache(new_cache, &c2->entries[n]);
473
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
474
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
475
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
476
    }
477
    if (pos < 0) {
478
      if (new_cache->metadata_size) {
479
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
480
        meta += new_cache->metadata_size;
481
      }
482
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
483
      c2->entries[n].id = NULL;
484
    }
485
  }
486
  *size = new_cache->current_size;
487

    
488
  return new_cache;
489
}
490

    
491
int cache_resize (struct peer_cache *c, int size)
492
{
493
  int dif = size - c->cache_size;
494

    
495
  if (!dif) {
496
    return c->current_size;
497
  }
498

    
499
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
500
  if (dif > 0) {
501
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
502
  } else if (c->current_size > size) {
503
    c->current_size = size;
504
  }
505

    
506
  if (c->metadata_size) {
507
    c->metadata = realloc(c->metadata, c->metadata_size * size);
508
    if (dif > 0) {
509
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
510
    }
511
  }
512

    
513
  c->cache_size = size;
514

    
515
  return c->current_size;
516
}
517
  
518
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
519
{
520
  int n1, n2;
521
  struct peer_cache *new_cache;
522
  uint8_t *meta;
523

    
524
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
525
  if (new_cache == NULL) {
526
    return NULL;
527
  }
528

    
529
  meta = new_cache->metadata;
530
  *source = 0;
531
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
532
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
533
      return new_cache;
534
    }
535
    if (n1 == c1->current_size) {
536
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
537
        if (new_cache->metadata_size) {
538
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
539
          meta += new_cache->metadata_size;
540
        }
541
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
542
        c2->entries[n2].id = NULL;
543
        *source |= 0x02;
544
      }
545
      n2++;
546
    } else if (n2 == c2->current_size) {
547
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
548
        if (new_cache->metadata_size) {
549
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
550
          meta += new_cache->metadata_size;
551
        }
552
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
553
        c1->entries[n1].id = NULL;
554
        *source |= 0x01;
555
      }
556
      n1++;
557
    } else {
558
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
559
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
560
          if (new_cache->metadata_size) {
561
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
562
            meta += new_cache->metadata_size;
563
          }
564
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
565
          c1->entries[n1].id = NULL;
566
          *source |= 0x01;
567
        }
568
        n1++;
569
      } else {
570
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
571
          if (new_cache->metadata_size) {
572
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
573
            meta += new_cache->metadata_size;
574
          }
575
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
576
          c2->entries[n2].id = NULL;
577
          *source |= 0x02;
578
        }
579
        n2++;
580
      }
581
    }
582
  }
583

    
584
  return new_cache;
585
}
586

    
587
void cache_check(const struct peer_cache *c)
588
{
589
  int i, j;
590

    
591
  for (i = 0; i < c->current_size; i++) {
592
    for (j = i + 1; j < c->current_size; j++) {
593
      assert(!nodeid_equal(c->entries[i].id, c->entries[j].id));
594
    }
595
  }
596
}