Statistics
| Branch: | Revision:

grapes / src / TopologyManager / topocache.c @ b71e9607

History | View | Annotate | Download (14.4 KB)

1
/*
2
 *  Copyright (c) 2010 Luca Abeni
3
 *
4
 *  This is free software; see lgpl-2.1.txt
5
 */
6

    
7
#include <stdint.h>
8
#include <stdlib.h>
9
#include <string.h>
10

    
11
#include <stdio.h>
12

    
13
#include "net_helper.h"
14
#include "topocache.h"
15
#include "int_coding.h"
16

    
17
struct cache_entry {
18
  struct nodeID *id;
19
  uint32_t timestamp;
20
};
21

    
22
struct peer_cache {
23
  struct cache_entry *entries;
24
  int cache_size;
25
  int current_size;
26
  int metadata_size;
27
  uint8_t *metadata;
28
  int max_timestamp;
29
};
30

    
31
static int cache_insert(struct peer_cache *c, struct cache_entry *e, const void *meta)
32
{
33
  int i, position;
34

    
35
  if (c->current_size == c->cache_size) {
36
    return -2;
37
  }
38
  position = c->current_size;
39
  for (i = 0; i < c->current_size; i++) {
40
if (e->id == NULL) {fprintf(stderr, "e->ID = NULL!!!\n"); *((char *)0) = 1;}
41
if (c->entries[i].id == NULL) {fprintf(stderr, "entries[%d]->ID = NULL!!!\n", i); exit(-1);}
42
    if (nodeid_equal(e->id, c->entries[i].id)) {
43
      return -1;
44
    }
45
    if (e->timestamp < c->entries[i].timestamp) {
46
       position = i;
47
     }
48
  }
49

    
50
  memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (c->current_size - position));
51
  memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (c->current_size - position) * c->metadata_size);
52
  c->current_size++;
53
  c->entries[position] = *e;
54
  memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
55

    
56
  return position;
57
}
58

    
59
struct nodeID *nodeid(const struct peer_cache *c, int i)
60
{
61
  if (i < c->current_size) {
62
    return c->entries[i].id;
63
  }
64

    
65
  return NULL;
66
}
67

    
68
const void *get_metadata(const struct peer_cache *c, int *size)
69
{
70
  *size = c->metadata_size;
71
  return c->metadata;
72
}
73

    
74
int cache_metadata_update(struct peer_cache *c, struct nodeID *p, const void *meta, int meta_size)
75
{
76
  int i;
77

    
78
  if (!meta_size || meta_size != c->metadata_size) {
79
    return -3;
80
  }
81
  for (i = 0; i < c->current_size; i++) {
82
    if (nodeid_equal(c->entries[i].id, p)) {
83
      memcpy(c->metadata + i * meta_size, meta, meta_size);
84
      return 1;
85
    }
86
  }
87

    
88
  return 0;
89
}
90

    
91
int cache_add_ranked(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size, ranking_function f, const void *tmeta)
92
{
93
  int i, pos = 0;
94

    
95
  if (meta_size && meta_size != c->metadata_size) {
96
    return -3;
97
  }
98
  for (i = 0; i < c->current_size; i++) {
99
    if (nodeid_equal(c->entries[i].id, neighbour)) {
100
      if (f != NULL) {
101
        cache_del(c,neighbour);
102
        if (i == c->current_size) break;
103
      } else {
104
          cache_metadata_update(c,neighbour,meta,meta_size);
105
          return -1;
106
      }
107
    }
108
    if ((f != NULL) && f(tmeta, meta, c->metadata+(c->metadata_size * i)) == 2) {
109
      pos++;
110
    }
111
  }
112
  if (c->current_size == c->cache_size) {
113
    return -2;
114
  }
115
  if (c->metadata_size) {
116
    memmove(c->metadata + (pos + 1) * c->metadata_size, c->metadata + pos * c->metadata_size, (c->current_size - pos) * c->metadata_size);
117
    if (meta_size) {
118
      memcpy(c->metadata + pos * c->metadata_size, meta, meta_size);
119
    } else {
120
      memset(c->metadata + pos * c->metadata_size, 0, c->metadata_size);
121
    }
122
  }
123
  for (i = c->current_size; i > pos; i--) {
124
    c->entries[i] = c->entries[i - 1];
125
  }
126
  c->entries[pos].id = nodeid_dup(neighbour);
127
  c->entries[pos].timestamp = 1;
128
  c->current_size++;
129

    
130
  return c->current_size;
131
}
132

    
133
int cache_add(struct peer_cache *c, struct nodeID *neighbour, const void *meta, int meta_size)
134
{
135
  return cache_add_ranked(c, neighbour, meta, meta_size, NULL, NULL);
136
}
137

    
138
int cache_del(struct peer_cache *c, struct nodeID *neighbour)
139
{
140
  int i;
141
  int found = 0;
142

    
143
  for (i = 0; i < c->current_size; i++) {
144
    if (nodeid_equal(c->entries[i].id, neighbour)) {
145
      nodeid_free(c->entries[i].id);
146
      c->current_size--;
147
      found = 1;
148
      if (c->metadata_size && (i < c->current_size)) {
149
        memmove(c->metadata + c->metadata_size * i,
150
                c->metadata + c->metadata_size * (i + 1),
151
                c->metadata_size * (c->current_size - i));
152
      }
153
    }
154
    if (found && (i < c->current_size)) {
155
      c->entries[i] = c->entries[i + 1];
156
    }
157
  }
158

    
159
  return c->current_size;
160
}
161

    
162
void cache_update(struct peer_cache *c)
163
{
164
  int i;
165
  
166
  for (i = 0; i < c->current_size; i++) {
167
    if (c->max_timestamp && (c->entries[i].timestamp == c->max_timestamp)) {
168
      int j = i;
169

    
170
      while(j < c->current_size && c->entries[j].id) {
171
        nodeid_free(c->entries[j].id);
172
        c->entries[j++].id = NULL;
173
      }
174
      c->current_size = i;        /* The cache is ordered by timestamp...
175
                                   all the other entries wiil be older than
176
                                   this one, so remove all of them
177
                                */
178
    } else {
179
      c->entries[i].timestamp++;
180
    }
181
  }
182
}
183

    
184
struct peer_cache *cache_init(int n, int metadata_size, int max_timestamp)
185
{
186
  struct peer_cache *res;
187

    
188
  res = malloc(sizeof(struct peer_cache));
189
  if (res == NULL) {
190
    return NULL;
191
  }
192
  res->max_timestamp = max_timestamp;
193
  res->cache_size = n;
194
  res->current_size = 0;
195
  res->entries = malloc(sizeof(struct cache_entry) * n);
196
  if (res->entries == NULL) {
197
    free(res);
198

    
199
    return NULL;
200
  }
201
  
202
  memset(res->entries, 0, sizeof(struct cache_entry) * n);
203
  if (metadata_size) {
204
    res->metadata = malloc(metadata_size * n);
205
  } else {
206
    res->metadata = NULL;
207
  }
208

    
209
  if (res->metadata) {
210
    res->metadata_size = metadata_size;
211
    memset(res->metadata, 0, metadata_size * n);
212
  } else {
213
    res->metadata_size = 0;
214
  }
215

    
216
  return res;
217
}
218

    
219
void cache_free(struct peer_cache *c)
220
{
221
  int i;
222

    
223
  for (i = 0; i < c->current_size; i++) {
224
    if(c->entries[i].id) {
225
      nodeid_free(c->entries[i].id);
226
    }
227
  }
228
  free(c->entries);
229
  free(c->metadata);
230
  free(c);
231
}
232

    
233
static int in_cache(const struct peer_cache *c, const struct cache_entry *elem)
234
{
235
  int i;
236

    
237
  for (i = 0; i < c->current_size; i++) {
238
    if (nodeid_equal(c->entries[i].id, elem->id)) {
239
      return i;
240
    }
241
  }
242

    
243
  return -1;
244
}
245

    
246
struct nodeID *rand_peer(struct peer_cache *c, void **meta, int max)
247
{
248
  int j;
249

    
250
  if (c->current_size == 0) {
251
    return NULL;
252
  }
253
  if (!max || max >= c->current_size)
254
    max = c->current_size;
255
  else
256
    ++max;
257
  j = ((double)rand() / (double)RAND_MAX) * max;
258

    
259
  if (meta) {
260
    *meta = c->metadata + (j * c->metadata_size);
261
  }
262

    
263
  return c->entries[j].id;
264
}
265

    
266
struct nodeID *last_peer(struct peer_cache *c)
267
{
268
  if (c->current_size == 0) {
269
    return NULL;
270
  }
271

    
272
  return c->entries[c->current_size - 1].id;
273
}
274

    
275
struct peer_cache *rand_cache(struct peer_cache *c, int n)
276
{
277
  struct peer_cache *res;
278

    
279
cache_check(c);
280
  if (c->current_size < n) {
281
    n = c->current_size;
282
  }
283
  res = cache_init(n, c->metadata_size, c->max_timestamp);
284

    
285
  while(res->current_size < n) {
286
    int j;
287

    
288
    j = ((double)rand() / (double)RAND_MAX) * c->current_size;
289
    cache_insert(res, c->entries + j, c->metadata + c->metadata_size * j);
290
    c->current_size--;
291
    memmove(c->entries + j, c->entries + j + 1, sizeof(struct cache_entry) * (c->current_size - j));
292
    memmove(c->metadata + c->metadata_size * j, c->metadata + c->metadata_size * (j + 1), c->metadata_size * (c->current_size - j));
293
    c->entries[c->current_size].id = NULL;
294
cache_check(c);
295
  }
296

    
297
  return res;
298
}
299

    
300
struct peer_cache *entries_undump(const uint8_t *buff, int size)
301
{
302
  struct peer_cache *res;
303
  int i = 0;
304
  const uint8_t *p = buff;
305
  uint8_t *meta;
306
  int cache_size, metadata_size;
307

    
308
  cache_size = int_rcpy(buff);
309
  metadata_size = int_rcpy(buff + 4);
310
  p = buff + 8;
311
  res = cache_init(cache_size, metadata_size, 0);
312
  meta = res->metadata;
313
  while (p - buff < size) {
314
    int len;
315

    
316
    res->entries[i].timestamp = int_rcpy(p);
317
    p += sizeof(uint32_t);
318
    res->entries[i++].id = nodeid_undump(p, &len);
319
    p += len;
320
    if (metadata_size) {
321
      memcpy(meta, p, metadata_size);
322
      p += metadata_size;
323
      meta += metadata_size;
324
    }
325
  }
326
  res->current_size = i;
327
if (p - buff != size) { fprintf(stderr, "Waz!! %d != %d\n", (int)(p - buff), size); exit(-1);}
328

    
329
  return res;
330
}
331

    
332
int cache_header_dump(uint8_t *b, const struct peer_cache *c, int include_me)
333
{
334
  int_cpy(b, c->cache_size + (include_me ? 1 : 0));
335
  int_cpy(b + 4, c->metadata_size);
336

    
337
  return 8;
338
}
339

    
340
int entry_dump(uint8_t *b, struct peer_cache *c, int i, size_t max_write_size)
341
{
342
  int res;
343
  int size = 0;
344
 
345
  if (i && (i >= c->cache_size - 1)) {
346
    return 0;
347
  }
348
  int_cpy(b, c->entries[i].timestamp);
349
  size = +4;
350
  res = nodeid_dump(b + size, c->entries[i].id, max_write_size - size);
351
  if (res < 0 ) {
352
    fprintf (stderr,"cavolo1\n");
353
    return -1;
354
  }
355
  size += res;
356
  if (c->metadata_size) {
357
    if (c->metadata_size > max_write_size - size) {
358
      fprintf (stderr,"cavolo2\n");
359
      return -1;
360
    }
361
    memcpy(b + size, c->metadata + c->metadata_size * i, c->metadata_size);
362
    size += c->metadata_size;
363
  }
364

    
365
  return size;
366
}
367

    
368
struct peer_cache *cache_rank (const struct peer_cache *c, ranking_function rank, const struct nodeID *target, const void *target_meta)
369
{
370
  struct peer_cache *res;
371
  int i,j,pos;
372

    
373
  res = cache_init(c->cache_size, c->metadata_size, c->max_timestamp);
374
  if (res == NULL) {
375
    return res;
376
  }
377

    
378
  for (i = 0; i < c->current_size; i++) {
379
    if (!target || !nodeid_equal(c->entries[i].id,target)) {
380
      pos = 0;
381
      for (j=0; j<res->current_size;j++) {
382
        if (((rank != NULL) && rank(target_meta, c->metadata+(c->metadata_size * i), res->metadata+(res->metadata_size * j)) == 2) ||
383
            ((rank == NULL) && res->entries[j].timestamp < c->entries[i].timestamp)) {
384
          pos++;
385
        }
386
      }
387
      if (c->metadata_size) {
388
        memmove(res->metadata + (pos + 1) * res->metadata_size, res->metadata + pos * res->metadata_size, (res->current_size - pos) * res->metadata_size);
389
        memcpy(res->metadata + pos * res->metadata_size, c->metadata+(c->metadata_size * i), res->metadata_size);
390
      }
391
      for (j = res->current_size; j > pos; j--) {
392
        res->entries[j] = res->entries[j - 1];
393
      }
394
      res->entries[pos].id = nodeid_dup(c->entries[i].id);
395
      res->entries[pos].timestamp = c->entries[i].timestamp;
396
      res->current_size++;
397
    }
398
  }
399

    
400
  return res;
401
}
402

    
403
struct peer_cache *cache_union(struct peer_cache *c1, struct peer_cache *c2, int *size)
404
{
405
  int n, pos;
406
  struct peer_cache *new_cache;
407
  uint8_t *meta;
408

    
409
  if (c1->metadata_size != c2->metadata_size) {
410
    return NULL;
411
  }
412

    
413
  new_cache = cache_init(c1->current_size + c2->current_size, c1->metadata_size, c1->max_timestamp);
414
  if (new_cache == NULL) {
415
    return NULL;
416
  }
417

    
418
  meta = new_cache->metadata;
419

    
420
  for (n = 0; n < c1->current_size; n++) {
421
    if (new_cache->metadata_size) {
422
      memcpy(meta, c1->metadata + n * c1->metadata_size, c1->metadata_size);
423
      meta += new_cache->metadata_size;
424
    }
425
    new_cache->entries[new_cache->current_size++] = c1->entries[n];
426
    c1->entries[n].id = NULL;
427
  }
428
  
429
  for (n = 0; n < c2->current_size; n++) {
430
    pos = in_cache(new_cache, &c2->entries[n]);
431
    if (pos >= 0 && new_cache->entries[pos].timestamp > c2->entries[n].timestamp) {
432
      cache_metadata_update(new_cache, c2->entries[n].id, c2->metadata + n * c2->metadata_size, c2->metadata_size);
433
      new_cache->entries[pos].timestamp = c2->entries[n].timestamp;
434
    }
435
    if (pos < 0) {
436
      if (new_cache->metadata_size) {
437
        memcpy(meta, c2->metadata + n * c2->metadata_size, c2->metadata_size);
438
        meta += new_cache->metadata_size;
439
      }
440
      new_cache->entries[new_cache->current_size++] = c2->entries[n];
441
      c2->entries[n].id = NULL;
442
    }
443
  }
444
  *size = new_cache->current_size;
445

    
446
  return new_cache;
447
}
448

    
449
int cache_resize (struct peer_cache *c, int size)
450
{
451
  int dif = size - c->cache_size;
452

    
453
  if (!dif) {
454
    return c->current_size;
455
  }
456

    
457
  c->entries = realloc(c->entries, sizeof(struct cache_entry) * size);
458
  if (dif > 0) {
459
    memset(c->entries + c->cache_size, 0, sizeof(struct cache_entry) * dif);
460
  } else if (c->current_size > size) {
461
    c->current_size = size;
462
  }
463

    
464
  if (c->metadata_size) {
465
    c->metadata = realloc(c->metadata, c->metadata_size * size);
466
    if (dif > 0) {
467
      memset(c->metadata + c->metadata_size * c->cache_size, 0, c->metadata_size * dif);
468
    }
469
  }
470

    
471
  c->cache_size = size;
472

    
473
  return c->current_size;
474
}
475
  
476
struct peer_cache *merge_caches(struct peer_cache *c1, struct peer_cache *c2, int newsize, int *source)
477
{
478
  int n1, n2;
479
  struct peer_cache *new_cache;
480
  uint8_t *meta;
481

    
482
  new_cache = cache_init(newsize, c1->metadata_size, c1->max_timestamp);
483
  if (new_cache == NULL) {
484
    return NULL;
485
  }
486

    
487
  meta = new_cache->metadata;
488
  *source = 0;
489
  for (n1 = 0, n2 = 0; new_cache->current_size < new_cache->cache_size;) {
490
    if ((n1 == c1->current_size) && (n2 == c2->current_size)) {
491
      return new_cache;
492
    }
493
    if (n1 == c1->current_size) {
494
      if (in_cache(new_cache, &c2->entries[n2]) < 0) {
495
        if (new_cache->metadata_size) {
496
          memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
497
          meta += new_cache->metadata_size;
498
        }
499
        new_cache->entries[new_cache->current_size++] = c2->entries[n2];
500
        c2->entries[n2].id = NULL;
501
        *source |= 0x02;
502
      }
503
      n2++;
504
    } else if (n2 == c2->current_size) {
505
      if (in_cache(new_cache, &c1->entries[n1]) < 0) {
506
        if (new_cache->metadata_size) {
507
          memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
508
          meta += new_cache->metadata_size;
509
        }
510
        new_cache->entries[new_cache->current_size++] = c1->entries[n1];
511
        c1->entries[n1].id = NULL;
512
        *source |= 0x01;
513
      }
514
      n1++;
515
    } else {
516
      if (c2->entries[n2].timestamp > c1->entries[n1].timestamp) {
517
        if (in_cache(new_cache, &c1->entries[n1]) < 0) {
518
          if (new_cache->metadata_size) {
519
            memcpy(meta, c1->metadata + n1 * c1->metadata_size, c1->metadata_size);
520
            meta += new_cache->metadata_size;
521
          }
522
          new_cache->entries[new_cache->current_size++] = c1->entries[n1];
523
          c1->entries[n1].id = NULL;
524
          *source |= 0x01;
525
        }
526
        n1++;
527
      } else {
528
        if (in_cache(new_cache, &c2->entries[n2]) < 0) {
529
          if (new_cache->metadata_size) {
530
            memcpy(meta, c2->metadata + n2 * c2->metadata_size, c2->metadata_size);
531
            meta += new_cache->metadata_size;
532
          }
533
          new_cache->entries[new_cache->current_size++] = c2->entries[n2];
534
          c2->entries[n2].id = NULL;
535
          *source |= 0x02;
536
        }
537
        n2++;
538
      }
539
    }
540
  }
541

    
542
  return new_cache;
543
}
544

    
545
void cache_check(const struct peer_cache *c)
546
{
547
  int i, j;
548

    
549
  for (i = 0; i < c->current_size; i++) {
550
    for (j = i + 1; j < c->current_size; j++) {
551
      if (nodeid_equal(c->entries[i].id, c->entries[j].id)) {
552
        fprintf(stderr, "WTF!!!! %d = %d!!!\n", i, j);
553
        *((char *)0) = 1;
554
      }
555
    }
556
  }
557
}