Revision 2d4b0f20 src/Cache/topocache.c

View differences:

src/Cache/topocache.c
47 47
    }
48 48
    if (nodeid_equal(e->id, c->entries[i].id)) {
49 49
      if (c->entries[i].timestamp > e->timestamp) {
50
        if (position >= i) {
51
          c->entries[i].timestamp = e->timestamp;
52
          memcpy(c->metadata + i * c->metadata_size, meta, c->metadata_size);
53
        } else {
54
          nodeid_free(c->entries[i].id);
50
        nodeid_free(c->entries[i].id);
51
        c->entries[position] = *e;
52
        memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
53

  
54
        if (position < i) {
55 55
          memmove(c->entries + position + 1, c->entries + position, sizeof(struct cache_entry) * (i - position));
56 56
          memmove(c->metadata + (position + 1) * c->metadata_size, c->metadata + position * c->metadata_size, (i -position) * c->metadata_size);
57
          c->entries[position] = *e;
58
          memcpy(c->metadata + position * c->metadata_size, meta, c->metadata_size);
59 57
        }
60
      }
61
      return position;
58

  
59
        return position;
60
      } else return -1;
62 61
    }
63 62
  }
64 63

  
......
322 321

  
323 322
int cache_fill_ordered(struct peer_cache *dst, const struct peer_cache *src, int target_size)
324 323
{
325
  struct cache_entry *e_orig, *e_dup;
326
  int count, j;
324
  struct cache_entry *e_orig, e_dup;
325
  int count, j, err;
327 326
cache_check(dst);
328 327
cache_check(src);
329 328
  if (target_size <= 0 || target_size > dst->cache_size) {
......
338 337
    count++;
339 338

  
340 339
    e_orig = src->entries + j;
341
    e_dup = malloc(sizeof(struct cache_entry));
342
    if (!e_dup) return -1;
340
    e_dup.id = nodeid_dup(e_orig->id);
341
    e_dup.timestamp = e_orig->timestamp;
343 342

  
344
    e_dup->id = nodeid_dup(e_orig->id);
345
    e_dup->timestamp = e_orig->timestamp;
343
    err = cache_insert_or_update(dst, &e_dup, src->metadata + src->metadata_size * j);
344
    if (err == -1) {
345
      /* Cache entry is fresher */
346
      nodeid_free(e_dup.id);
347
    }
346 348

  
347
    cache_insert_or_update(dst, e_dup, src->metadata + src->metadata_size * j);
348 349
    j++;
349 350
  }
350 351
cache_check(dst);
......
355 356
int cache_fill_rand(struct peer_cache *dst, const struct peer_cache *src, int target_size)
356 357
{
357 358
  int added[src->current_size];
358
  struct cache_entry *e_orig, *e_dup;
359
  int count, j;
359
  struct cache_entry *e_orig, e_dup;
360
  int count, j, err;
360 361
cache_check(dst);
361 362
cache_check(src);
362 363
  if (target_size <= 0 || target_size > dst->cache_size) {
......
374 375
    count++;
375 376

  
376 377
    e_orig = src->entries + j;
377
    e_dup = malloc(sizeof(struct cache_entry));
378
    if (!e_dup) return -1;
379 378

  
380
    e_dup->id = nodeid_dup(e_orig->id);
381
    e_dup->timestamp = e_orig->timestamp;
379
    e_dup.id = nodeid_dup(e_orig->id);
380
    e_dup.timestamp = e_orig->timestamp;
382 381

  
383
    cache_insert_or_update(dst, e_dup, src->metadata + src->metadata_size * j);
382
    err = cache_insert_or_update(dst, &e_dup, src->metadata + src->metadata_size * j);
383
    if (err == -1) {
384
      /* Cache entry is fresher */
385
      nodeid_free(e_dup.id);
386
    }
384 387
  }
385 388
cache_check(dst);
386 389
cache_check(src);

Also available in: Unified diff