Statistics
| Branch: | Revision:

grapes / src / Cache / topocache.c @ 505990dd

History | View | Annotate | Download (14.7 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12
#undef NDEBUG
13
#include <assert.h>
14

    
15
#include "net_helper.h"
16
#include "topocache.h"
17
#include "int_coding.h"
18

    
19
struct cache_entry {
20
  struct nodeID *id;
21
  uint32_t timestamp;
22
};
23

    
24
struct peer_cache {
25
  struct cache_entry *entries;
26
  int cache_size;
27
  int current_size;
28
  int metadata_size;
29
  uint8_t *metadata;
30
  int max_timestamp;
31
};
32

    
33
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
34
{
35
  int i, position;
36

    
37
  if (c->current_size == c->cache_size) {
38
    return -2;
39
  }
40
  position = 0;
41
  for (i = 0; i < c->current_size; i++) {
42
    assert(e->id);
43
    assert(c->entries[i].id);
44
    if (c->entries[i].timestamp <= e->timestamp) {
45
      position = i + 1;
46
    }
47
    if (nodeid_equal(e->id, c->entries[i].id)) {
48
      if (c->entries[i].timestamp > e->timestamp) {
49
        assert(i >= position);
50
        nodeid_free(c->entries[i].id);
51
        c->entries[i] = *e;
52
        memcpy(c->metadata + i * c->metadata_size, meta, c->metadata_size);
53
        if (position != i) {
54
          memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (i - position));
55
          memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (i -position) * c->metadata_size);
56
        }
57

    
58
        return position;
59
      }
60

    
61
      return -1;
62
    }
63
  }
64

    
65
  if (position != c->current_size) {
66
    memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
67
    memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
68
  }
69
  c->current_size++;
70
  c->entries[position] = *e;
71
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
72

    
73
  return position;
74
}
75

    
76
struct nodeID *nodeid(const struct peer_cache *c, int i)
77
{
78
  if (i < c->current_size) {
79
    return c->entries[i].id;
80
  }
81

    
82
  return NULL;
83
}
84

    
85
const void *get_metadata(const struct peer_cache *c, int *size)
86
{
87
  *size = c->metadata_size;
88
  return c->metadata;
89
}
90

    
91
int cache_metadata_update(struct peer_cache *c, const struct nodeID *p, const void *meta, int meta_size)
92
{
93
  int i;
94

    
95
  if (!meta_size || meta_size != c->metadata_size) {
96
    return -3;
97
  }
98
  for (i = 0; i < c->current_size; i++) {
99
    if (nodeid_equal(c->entries[i].id, p)) {
100
      memcpy(c->metadata + i * meta_size, meta, meta_size);
101
      return 1;
102
    }
103
  }
104

    
105
  return 0;
106
}
107

    
108
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
109
{
110
  int i, pos = 0;
111

    
112
  if (meta_size && meta_size != c->metadata_size) {
113
    return -3;
114
  }
115
  for (i = 0; i < c->current_size; i++) {
116
    if (nodeid_equal(c->entries[i].id, neighbour)) {
117
      if (f != NULL) {
118
        cache_del(c,neighbour);
119
        if (i == c->current_size) break;
120
      } else {
121
          cache_metadata_update(c,neighbour,meta,meta_size);
122
          return -1;
123
      }
124
    }
125
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
126
      pos++;
127
    }
128
  }
129
  if (c->current_size == c->cache_size) {
130
    return -2;
131
  }
132
  if (c->metadata_size) {
133
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
134
    if (meta_size) {
135
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
136
    } else {
137
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
138
    }
139
  }
140
  for (i = c->current_size; i > pos; i--) {
141
    c->entries[i] = c->entries[i - 1];
142
  }
143
  c->entries[pos].id = nodeid_dup(neighbour);
144
  c->entries[pos].timestamp = 1;
145
  c->current_size++;
146

    
147
  return c->current_size;
148
}
149

    
150
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
151
{
152
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
153
}
154

    
155
int cache_del(struct peer_cache *c, const struct nodeID *neighbour)
156
{
157
  int i;
158
  int found = 0;
159

    
160
  for (i = 0; i < c->current_size; i++) {
161
    if (nodeid_equal(c->entries[i].id, neighbour)) {
162
      nodeid_free(c->entries[i].id);
163
      c->current_size--;
164
      found = 1;
165
      if (c->metadata_size && (i < c->current_size)) {
166
        memmove(c->metadata + c->metadata_size * i,
167
                c->metadata + c->metadata_size * (i + 1),
168
                c->metadata_size * (c->current_size - i));
169
      }
170
    }
171
    if (found && (i < c->current_size)) {
172
      c->entries[i] = c->entries[i + 1];
173
    }
174
  }
175

    
176
  return c->current_size;
177
}
178

    
179
void cache_update(struct peer_cache *c)
180
{
181
  int i;
182
  
183
  for (i = 0; i < c->current_size; i++) {
184
    if (c->max_timestamp && (c->entries[i].timestamp == c->max_timestamp)) {
185
      int j = i;
186

    
187
      while(j < c->current_size && c->entries[j].id) {
188
        nodeid_free(c->entries[j].id);
189
        c->entries[j++].id = NULL;
190
      }
191
      c->current_size = i;        /* The cache is ordered by timestamp...
192
                                   all the other entries wiil be older than
193
                                   this one, so remove all of them
194
                                */
195
    } else {
196
      c->entries[i].timestamp++;
197
    }
198
  }
199
}
200

    
201
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
202
{
203
  struct peer_cache *res;
204

    
205
  res = malloc(sizeof(struct peer_cache));
206
  if (res == NULL) {
207
    return NULL;
208
  }
209
  res->max_timestamp = max_timestamp;
210
  res->cache_size = n;
211
  res->current_size = 0;
212
  res->entries = malloc(sizeof(struct cache_entry) * n);
213
  if (res->entries == NULL) {
214
    free(res);
215

    
216
    return NULL;
217
  }
218
  
219
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
220
  if (metadata_size) {
221
    res->metadata = malloc(metadata_size * n);
222
  } else {
223
    res->metadata = NULL;
224
  }
225

    
226
  if (res->metadata) {
227
    res->metadata_size = metadata_size;
228
    memset(res->metadata, 0, metadata_size * n);
229
  } else {
230
    res->metadata_size = 0;
231
  }
232

    
233
  return res;
234
}
235

    
236
void cache_free(struct peer_cache *c)
237
{
238
  int i;
239

    
240
  for (i = 0; i < c->current_size; i++) {
241
    if(c->entries[i].id) {
242
      nodeid_free(c->entries[i].id);
243
    }
244
  }
245
  free(c->entries);
246
  free(c->metadata);
247
  free(c);
248
}
249

    
250
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
251
{
252
  int i;
253

    
254
  for (i = 0; i < c->current_size; i++) {
255
    if (nodeid_equal(c->entries[i].id, elem->id)) {
256
      return i;
257
    }
258
  }
259

    
260
  return -1;
261
}
262

    
263
struct nodeID *rand_peer(const struct peer_cache *c, void **meta, int max)
264
{
265
  int j;
266

    
267
  if (c->current_size == 0) {
268
    return NULL;
269
  }
270
  if (!max || max >= c->current_size)
271
    max = c->current_size;
272
  else
273
    ++max;
274
  j = ((double)rand() / (double)RAND_MAX) * max;
275

    
276
  if (meta) {
277
    *meta = c->metadata + (j * c->metadata_size);
278
  }
279

    
280
  return c->entries[j].id;
281
}
282

    
283
struct nodeID *last_peer(const struct peer_cache *c)
284
{
285
  if (c->current_size == 0) {
286
    return NULL;
287
  }
288

    
289
  return c->entries[c->current_size - 1].id;
290
}
291

    
292
struct peer_cache *rand_cache(struct peer_cache *c, int n)
293
{
294
  struct peer_cache *res;
295

    
296
cache_check(c);
297
  if (c->current_size < n) {
298
    n = c->current_size;
299
  }
300
  res = cache_init(n, c->metadata_size, c->max_timestamp);
301

    
302
  while(res->current_size < n) {
303
    int j;
304

    
305
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
306
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
307
    c->current_size--;
308
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
309
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
310
    c->entries[c->current_size].id = NULL;
311
cache_check(c);
312
  }
313

    
314
  return res;
315
}
316

    
317
struct peer_cache *entries_undump(const uint8_t *buff, int size)
318
{
319
  struct peer_cache *res;
320
  int i = 0;
321
  const uint8_t *p = buff;
322
  uint8_t *meta;
323
  int cache_size, metadata_size;
324

    
325
  cache_size = int_rcpy(buff);
326
  metadata_size = int_rcpy(buff + 4);
327
  p = buff + 8;
328
  res = cache_init(cache_size, metadata_size, 0);
329
  meta = res->metadata;
330
  while (p - buff < size) {
331
    int len;
332

    
333
    res->entries[i].timestamp = int_rcpy(p);
334
    p += sizeof(uint32_t);
335
    res->entries[i++].id = nodeid_undump(p, &len);
336
    p += len;
337
    if (metadata_size) {
338
      memcpy(meta, p, metadata_size);
339
      p += metadata_size;
340
      meta += metadata_size;
341
    }
342
  }
343
  res->current_size = i;
344
  assert(p - buff == size);
345

    
346
  return res;
347
}
348

    
349
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
350
{
351
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
352
  int_cpy(b + 4, c->metadata_size);
353

    
354
  return 8;
355
}
356

    
357
int entry_dump(uint8_t *b, const struct peer_cache *c, int i, size_t max_write_size)
358
{
359
  int res;
360
  int size = 0;
361
 
362
  if (i && (i >= c->cache_size - 1)) {
363
    return 0;
364
  }
365
  int_cpy(b, c->entries[i].timestamp);
366
  size = +4;
367
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
368
  if (res < 0 ) {
369
    return -1;
370
  }
371
  size += res;
372
  if (c->metadata_size) {
373
    if (c->metadata_size > max_write_size - size) {
374
      return -1;
375
    }
376
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
377
    size += c->metadata_size;
378
  }
379

    
380
  return size;
381
}
382

    
383
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
384
{
385
  struct peer_cache *res;
386
  int i,j,pos;
387

    
388
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
389
  if (res == NULL) {
390
    return res;
391
  }
392

    
393
  for (i = 0; i < c->current_size; i++) {
394
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
395
      pos = 0;
396
      for (j=0; j<res->current_size;j++) {
397
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
398
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
399
          pos++;
400
        }
401
      }
402
      if (c->metadata_size) {
403
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
404
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
405
      }
406
      for (j = res->current_size; j > pos; j--) {
407
        res->entries[j] = res->entries[j - 1];
408
      }
409
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
410
      res->entries[pos].timestamp = c->entries[i].timestamp;
411
      res->current_size++;
412
    }
413
  }
414

    
415
  return res;
416
}
417

    
418
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
419
{
420
  int n, pos;
421
  struct peer_cache *new_cache;
422
  uint8_t *meta;
423

    
424
  if (c1->metadata_size != c2->metadata_size) {
425
    return NULL;
426
  }
427

    
428
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
429
  if (new_cache == NULL) {
430
    return NULL;
431
  }
432

    
433
  meta = new_cache->metadata;
434

    
435
  for (n = 0; n < c1->current_size; n++) {
436
    if (new_cache->metadata_size) {
437
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
438
      meta += new_cache->metadata_size;
439
    }
440
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
441
    c1->entries[n].id = NULL;
442
  }
443
  
444
  for (n = 0; n < c2->current_size; n++) {
445
    pos = in_cache(new_cache, &c2->entries[n]);
446
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
447
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
448
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
449
    }
450
    if (pos < 0) {
451
      if (new_cache->metadata_size) {
452
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
453
        meta += new_cache->metadata_size;
454
      }
455
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
456
      c2->entries[n].id = NULL;
457
    }
458
  }
459
  *size = new_cache->current_size;
460

    
461
  return new_cache;
462
}
463

    
464
int cache_resize (struct peer_cache *c, int size)
465
{
466
  int dif = size - c->cache_size;
467

    
468
  if (!dif) {
469
    return c->current_size;
470
  }
471

    
472
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
473
  if (dif > 0) {
474
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
475
  } else if (c->current_size > size) {
476
    c->current_size = size;
477
  }
478

    
479
  if (c->metadata_size) {
480
    c->metadata = realloc(c->metadata, c->metadata_size * size);
481
    if (dif > 0) {
482
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
483
    }
484
  }
485

    
486
  c->cache_size = size;
487

    
488
  return c->current_size;
489
}
490
  
491
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
492
{
493
  int n1, n2;
494
  struct peer_cache *new_cache;
495
  uint8_t *meta;
496

    
497
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
498
  if (new_cache == NULL) {
499
    return NULL;
500
  }
501

    
502
  meta = new_cache->metadata;
503
  *source = 0;
504
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
505
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
506
      return new_cache;
507
    }
508
    if (n1 == c1->current_size) {
509
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
510
        if (new_cache->metadata_size) {
511
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
512
          meta += new_cache->metadata_size;
513
        }
514
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
515
        c2->entries[n2].id = NULL;
516
        *source |= 0x02;
517
      }
518
      n2++;
519
    } else if (n2 == c2->current_size) {
520
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
521
        if (new_cache->metadata_size) {
522
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
523
          meta += new_cache->metadata_size;
524
        }
525
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
526
        c1->entries[n1].id = NULL;
527
        *source |= 0x01;
528
      }
529
      n1++;
530
    } else {
531
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
532
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
533
          if (new_cache->metadata_size) {
534
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
535
            meta += new_cache->metadata_size;
536
          }
537
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
538
          c1->entries[n1].id = NULL;
539
          *source |= 0x01;
540
        }
541
        n1++;
542
      } else {
543
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
544
          if (new_cache->metadata_size) {
545
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
546
            meta += new_cache->metadata_size;
547
          }
548
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
549
          c2->entries[n2].id = NULL;
550
          *source |= 0x02;
551
        }
552
        n2++;
553
      }
554
    }
555
  }
556

    
557
  return new_cache;
558
}
559

    
560
void cache_check(const struct peer_cache *c)
561
{
562
  int i, j;
563

    
564
  for (i = 0; i < c->current_size; i++) {
565
    for (j = i + 1; j < c->current_size; j++) {
566
      assert(!nodeid_equal(c->entries[i].id, c->entries[j].id));
567
    }
568
  }
569
}