Statistics
| Branch: | Revision:

grapes / src / Cache / topocache.c @ 8738ac23

History | View | Annotate | Download (14.4 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12

    
13
#include "net_helper.h"
14
#include "topocache.h"
15
#include "int_coding.h"
16

    
17
struct cache_entry {
18
  struct nodeID *id;
19
  uint32_t timestamp;
20
};
21

    
22
struct peer_cache {
23
  struct cache_entry *entries;
24
  int cache_size;
25
  int current_size;
26
  int metadata_size;
27
  uint8_t *metadata;
28
  int max_timestamp;
29
};
30

    
31
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
32
{
33
  int i, position;
34

    
35
  if (c->current_size == c->cache_size) {
36
    return -2;
37
  }
38
  position = 0;
39
  for (i = 0; i < c->current_size; i++) {
40
if (e->id == NULL) {fprintf(stderr, "e->ID = NULL!!!\n"); *((char *)0) = 1;}
41
if (c->entries[i].id == NULL) {fprintf(stderr, "entries[%d]->ID = NULL!!!\n", i); exit(-1);}
42
    if (nodeid_equal(e->id, c->entries[i].id)) {
43
      return -1;
44
    }
45
    if (c->entries[i].timestamp <= e->timestamp) {
46
      position = i + 1;
47
    }
48
  }
49

    
50
  memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
51
  memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
52
  c->current_size++;
53
  c->entries[position] = *e;
54
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
55

    
56
  return position;
57
}
58

    
59
struct nodeID *nodeid(const struct peer_cache *c, int i)
60
{
61
  if (i < c->current_size) {
62
    return c->entries[i].id;
63
  }
64

    
65
  return NULL;
66
}
67

    
68
const void *get_metadata(const struct peer_cache *c, int *size)
69
{
70
  *size = c->metadata_size;
71
  return c->metadata;
72
}
73

    
74
int cache_metadata_update(struct peer_cache *c, const struct nodeID *p, const void *meta, int meta_size)
75
{
76
  int i;
77

    
78
  if (!meta_size || meta_size != c->metadata_size) {
79
    return -3;
80
  }
81
  for (i = 0; i < c->current_size; i++) {
82
    if (nodeid_equal(c->entries[i].id, p)) {
83
      memcpy(c->metadata + i * meta_size, meta, meta_size);
84
      return 1;
85
    }
86
  }
87

    
88
  return 0;
89
}
90

    
91
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
92
{
93
  int i, pos = 0;
94

    
95
  if (meta_size && meta_size != c->metadata_size) {
96
    return -3;
97
  }
98
  for (i = 0; i < c->current_size; i++) {
99
    if (nodeid_equal(c->entries[i].id, neighbour)) {
100
      if (f != NULL) {
101
        cache_del(c,neighbour);
102
        if (i == c->current_size) break;
103
      } else {
104
          cache_metadata_update(c,neighbour,meta,meta_size);
105
          return -1;
106
      }
107
    }
108
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
109
      pos++;
110
    }
111
  }
112
  if (c->current_size == c->cache_size) {
113
    return -2;
114
  }
115
  if (c->metadata_size) {
116
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
117
    if (meta_size) {
118
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
119
    } else {
120
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
121
    }
122
  }
123
  for (i = c->current_size; i > pos; i--) {
124
    c->entries[i] = c->entries[i - 1];
125
  }
126
  c->entries[pos].id = nodeid_dup(neighbour);
127
  c->entries[pos].timestamp = 1;
128
  c->current_size++;
129

    
130
  return c->current_size;
131
}
132

    
133
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
134
{
135
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
136
}
137

    
138
int cache_del(struct peer_cache *c, const struct nodeID *neighbour)
139
{
140
  int i;
141
  int found = 0;
142

    
143
  for (i = 0; i < c->current_size; i++) {
144
    if (nodeid_equal(c->entries[i].id, neighbour)) {
145
      nodeid_free(c->entries[i].id);
146
      c->current_size--;
147
      found = 1;
148
      if (c->metadata_size && (i < c->current_size)) {
149
        memmove(c->metadata + c->metadata_size * i,
150
                c->metadata + c->metadata_size * (i + 1),
151
                c->metadata_size * (c->current_size - i));
152
      }
153
    }
154
    if (found && (i < c->current_size)) {
155
      c->entries[i] = c->entries[i + 1];
156
    }
157
  }
158

    
159
  return c->current_size;
160
}
161

    
162
void cache_update(struct peer_cache *c)
163
{
164
  int i;
165
  
166
  for (i = 0; i < c->current_size; i++) {
167
    if (c->max_timestamp && (c->entries[i].timestamp == c->max_timestamp)) {
168
      int j = i;
169

    
170
      while(j < c->current_size && c->entries[j].id) {
171
        nodeid_free(c->entries[j].id);
172
        c->entries[j++].id = NULL;
173
      }
174
      c->current_size = i;        /* The cache is ordered by timestamp...
175
                                   all the other entries wiil be older than
176
                                   this one, so remove all of them
177
                                */
178
    } else {
179
      c->entries[i].timestamp++;
180
    }
181
  }
182
}
183

    
184
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
185
{
186
  struct peer_cache *res;
187

    
188
  res = malloc(sizeof(struct peer_cache));
189
  if (res == NULL) {
190
    return NULL;
191
  }
192
  res->max_timestamp = max_timestamp;
193
  res->cache_size = n;
194
  res->current_size = 0;
195
  res->entries = malloc(sizeof(struct cache_entry) * n);
196
  if (res->entries == NULL) {
197
    free(res);
198

    
199
    return NULL;
200
  }
201
  
202
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
203
  if (metadata_size) {
204
    res->metadata = malloc(metadata_size * n);
205
  } else {
206
    res->metadata = NULL;
207
  }
208

    
209
  if (res->metadata) {
210
    res->metadata_size = metadata_size;
211
    memset(res->metadata, 0, metadata_size * n);
212
  } else {
213
    res->metadata_size = 0;
214
  }
215

    
216
  return res;
217
}
218

    
219
void cache_free(struct peer_cache *c)
220
{
221
  int i;
222

    
223
  for (i = 0; i < c->current_size; i++) {
224
    if(c->entries[i].id) {
225
      nodeid_free(c->entries[i].id);
226
    }
227
  }
228
  free(c->entries);
229
  free(c->metadata);
230
  free(c);
231
}
232

    
233
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
234
{
235
  int i;
236

    
237
  for (i = 0; i < c->current_size; i++) {
238
    if (nodeid_equal(c->entries[i].id, elem->id)) {
239
      return i;
240
    }
241
  }
242

    
243
  return -1;
244
}
245

    
246
struct nodeID *rand_peer(const struct peer_cache *c, void **meta, int max)
247
{
248
  int j;
249

    
250
  if (c->current_size == 0) {
251
    return NULL;
252
  }
253
  if (!max || max >= c->current_size)
254
    max = c->current_size;
255
  else
256
    ++max;
257
  j = ((double)rand() / (double)RAND_MAX) * max;
258

    
259
  if (meta) {
260
    *meta = c->metadata + (j * c->metadata_size);
261
  }
262

    
263
  return c->entries[j].id;
264
}
265

    
266
struct nodeID *last_peer(const struct peer_cache *c)
267
{
268
  if (c->current_size == 0) {
269
    return NULL;
270
  }
271

    
272
  return c->entries[c->current_size - 1].id;
273
}
274

    
275
struct peer_cache *rand_cache(struct peer_cache *c, int n)
276
{
277
  struct peer_cache *res;
278

    
279
cache_check(c);
280
  if (c->current_size < n) {
281
    n = c->current_size;
282
  }
283
  res = cache_init(n, c->metadata_size, c->max_timestamp);
284

    
285
  while(res->current_size < n) {
286
    int j;
287

    
288
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
289
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
290
    c->current_size--;
291
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
292
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
293
    c->entries[c->current_size].id = NULL;
294
cache_check(c);
295
  }
296

    
297
  return res;
298
}
299

    
300
struct peer_cache *entries_undump(const uint8_t *buff, int size)
301
{
302
  struct peer_cache *res;
303
  int i = 0;
304
  const uint8_t *p = buff;
305
  uint8_t *meta;
306
  int cache_size, metadata_size;
307

    
308
  cache_size = int_rcpy(buff);
309
  metadata_size = int_rcpy(buff + 4);
310
  p = buff + 8;
311
  res = cache_init(cache_size, metadata_size, 0);
312
  meta = res->metadata;
313
  while (p - buff < size) {
314
    int len;
315

    
316
    res->entries[i].timestamp = int_rcpy(p);
317
    p += sizeof(uint32_t);
318
    res->entries[i++].id = nodeid_undump(p, &len);
319
    p += len;
320
    if (metadata_size) {
321
      memcpy(meta, p, metadata_size);
322
      p += metadata_size;
323
      meta += metadata_size;
324
    }
325
  }
326
  res->current_size = i;
327
if (p - buff != size) { fprintf(stderr, "Waz!! %d != %d\n", (int)(p - buff), size); exit(-1);}
328

    
329
  return res;
330
}
331

    
332
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
333
{
334
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
335
  int_cpy(b + 4, c->metadata_size);
336

    
337
  return 8;
338
}
339

    
340
int entry_dump(uint8_t *b, const struct peer_cache *c, int i, size_t max_write_size)
341
{
342
  int res;
343
  int size = 0;
344
 
345
  if (i && (i >= c->cache_size - 1)) {
346
    return 0;
347
  }
348
  int_cpy(b, c->entries[i].timestamp);
349
  size = +4;
350
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
351
  if (res < 0 ) {
352
    return -1;
353
  }
354
  size += res;
355
  if (c->metadata_size) {
356
    if (c->metadata_size > max_write_size - size) {
357
      return -1;
358
    }
359
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
360
    size += c->metadata_size;
361
  }
362

    
363
  return size;
364
}
365

    
366
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
367
{
368
  struct peer_cache *res;
369
  int i,j,pos;
370

    
371
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
372
  if (res == NULL) {
373
    return res;
374
  }
375

    
376
  for (i = 0; i < c->current_size; i++) {
377
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
378
      pos = 0;
379
      for (j=0; j<res->current_size;j++) {
380
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
381
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
382
          pos++;
383
        }
384
      }
385
      if (c->metadata_size) {
386
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
387
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
388
      }
389
      for (j = res->current_size; j > pos; j--) {
390
        res->entries[j] = res->entries[j - 1];
391
      }
392
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
393
      res->entries[pos].timestamp = c->entries[i].timestamp;
394
      res->current_size++;
395
    }
396
  }
397

    
398
  return res;
399
}
400

    
401
struct peer_cache *cache_union(const struct peer_cache *c1, const struct peer_cache *c2, int *size)
402
{
403
  int n, pos;
404
  struct peer_cache *new_cache;
405
  uint8_t *meta;
406

    
407
  if (c1->metadata_size != c2->metadata_size) {
408
    return NULL;
409
  }
410

    
411
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
412
  if (new_cache == NULL) {
413
    return NULL;
414
  }
415

    
416
  meta = new_cache->metadata;
417

    
418
  for (n = 0; n < c1->current_size; n++) {
419
    if (new_cache->metadata_size) {
420
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
421
      meta += new_cache->metadata_size;
422
    }
423
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
424
    c1->entries[n].id = NULL;
425
  }
426
  
427
  for (n = 0; n < c2->current_size; n++) {
428
    pos = in_cache(new_cache, &c2->entries[n]);
429
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
430
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
431
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
432
    }
433
    if (pos < 0) {
434
      if (new_cache->metadata_size) {
435
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
436
        meta += new_cache->metadata_size;
437
      }
438
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
439
      c2->entries[n].id = NULL;
440
    }
441
  }
442
  *size = new_cache->current_size;
443

    
444
  return new_cache;
445
}
446

    
447
int cache_resize (struct peer_cache *c, int size)
448
{
449
  int dif = size - c->cache_size;
450

    
451
  if (!dif) {
452
    return c->current_size;
453
  }
454

    
455
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
456
  if (dif > 0) {
457
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
458
  } else if (c->current_size > size) {
459
    c->current_size = size;
460
  }
461

    
462
  if (c->metadata_size) {
463
    c->metadata = realloc(c->metadata, c->metadata_size * size);
464
    if (dif > 0) {
465
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
466
    }
467
  }
468

    
469
  c->cache_size = size;
470

    
471
  return c->current_size;
472
}
473
  
474
struct peer_cache *merge_caches(const struct peer_cache *c1, const struct peer_cache *c2, int newsize, int *source)
475
{
476
  int n1, n2;
477
  struct peer_cache *new_cache;
478
  uint8_t *meta;
479

    
480
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
481
  if (new_cache == NULL) {
482
    return NULL;
483
  }
484

    
485
  meta = new_cache->metadata;
486
  *source = 0;
487
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
488
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
489
      return new_cache;
490
    }
491
    if (n1 == c1->current_size) {
492
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
493
        if (new_cache->metadata_size) {
494
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
495
          meta += new_cache->metadata_size;
496
        }
497
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
498
        c2->entries[n2].id = NULL;
499
        *source |= 0x02;
500
      }
501
      n2++;
502
    } else if (n2 == c2->current_size) {
503
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
504
        if (new_cache->metadata_size) {
505
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
506
          meta += new_cache->metadata_size;
507
        }
508
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
509
        c1->entries[n1].id = NULL;
510
        *source |= 0x01;
511
      }
512
      n1++;
513
    } else {
514
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
515
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
516
          if (new_cache->metadata_size) {
517
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
518
            meta += new_cache->metadata_size;
519
          }
520
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
521
          c1->entries[n1].id = NULL;
522
          *source |= 0x01;
523
        }
524
        n1++;
525
      } else {
526
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
527
          if (new_cache->metadata_size) {
528
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
529
            meta += new_cache->metadata_size;
530
          }
531
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
532
          c2->entries[n2].id = NULL;
533
          *source |= 0x02;
534
        }
535
        n2++;
536
      }
537
    }
538
  }
539

    
540
  return new_cache;
541
}
542

    
543
void cache_check(const struct peer_cache *c)
544
{
545
  int i, j;
546

    
547
  for (i = 0; i < c->current_size; i++) {
548
    for (j = i + 1; j < c->current_size; j++) {
549
      if (nodeid_equal(c->entries[i].id, c->entries[j].id)) {
550
        fprintf(stderr, "WTF!!!! %d = %d!!!\n", i, j);
551
        *((char *)0) = 1;
552
      }
553
    }
554
  }
555
}