Statistics
| Branch: | Revision:

iof-bird-daemon / nest / rt-table.c @ 094d2bdb

History | View | Annotate | Download (52.4 KB)

1
/*
2
 *        BIRD -- Routing Tables
3
 *
4
 *        (c) 1998--2000 Martin Mares <mj@ucw.cz>
5
 *
6
 *        Can be freely distributed and used under the terms of the GNU GPL.
7
 */
8

    
9
/**
10
 * DOC: Routing tables
11
 *
12
 * Routing tables are probably the most important structures BIRD uses. They
13
 * hold all the information about known networks, the associated routes and
14
 * their attributes.
15
 *
16
 * There are multiple routing tables (a primary one together with any
17
 * number of secondary ones if requested by the configuration). Each table
18
 * is basically a FIB containing entries describing the individual
19
 * destination networks. For each network (represented by structure &net),
20
 * there is a one-way linked list of route entries (&rte), the first entry
21
 * on the list being the best one (i.e., the one we currently use
22
 * for routing), the order of the other ones is undetermined.
23
 *
24
 * The &rte contains information specific to the route (preference, protocol
25
 * metrics, time of last modification etc.) and a pointer to a &rta structure
26
 * (see the route attribute module for a precise explanation) holding the
27
 * remaining route attributes which are expected to be shared by multiple
28
 * routes in order to conserve memory.
29
 */
30

    
31
#undef LOCAL_DEBUG
32

    
33
#include "nest/bird.h"
34
#include "nest/route.h"
35
#include "nest/protocol.h"
36
#include "nest/cli.h"
37
#include "nest/iface.h"
38
#include "lib/resource.h"
39
#include "lib/event.h"
40
#include "lib/string.h"
41
#include "conf/conf.h"
42
#include "filter/filter.h"
43
#include "lib/string.h"
44
#include "lib/alloca.h"
45

    
46
pool *rt_table_pool;
47

    
48
static slab *rte_slab;
49
static linpool *rte_update_pool;
50

    
51
static list routing_tables;
52

    
53
static void rt_format_via(rte *e, byte *via);
54
static void rt_free_hostcache(rtable *tab);
55
static void rt_notify_hostcache(rtable *tab, net *net);
56
static void rt_update_hostcache(rtable *tab);
57
static void rt_next_hop_update(rtable *tab);
58

    
59
static inline void rt_schedule_gc(rtable *tab);
60

    
61
static inline struct ea_list *
62
make_tmp_attrs(struct rte *rt, struct linpool *pool)
63
{
64
  struct ea_list *(*mta)(struct rte *rt, struct linpool *pool);
65
  mta = rt->attrs->src->proto->make_tmp_attrs;
66
  return mta ? mta(rt, rte_update_pool) : NULL;
67
}
68

    
69
/* Like fib_route(), but skips empty net entries */
70
static net *
71
net_route(rtable *tab, ip_addr a, int len)
72
{
73
  ip_addr a0;
74
  net *n;
75

    
76
  while (len >= 0)
77
    {
78
      a0 = ipa_and(a, ipa_mkmask(len));
79
      n = fib_find(&tab->fib, &a0, len);
80
      if (n && n->routes)
81
        return n;
82
      len--;
83
    }
84
  return NULL;
85
}
86

    
87
static void
88
rte_init(struct fib_node *N)
89
{
90
  net *n = (net *) N;
91

    
92
  N->flags = 0;
93
  n->routes = NULL;
94
}
95

    
96
/**
97
 * rte_find - find a route
98
 * @net: network node
99
 * @src: route source
100
 *
101
 * The rte_find() function returns a route for destination @net
102
 * which is from route source @src.
103
 */
104
rte *
105
rte_find(net *net, struct rte_src *src)
106
{
107
  rte *e = net->routes;
108

    
109
  while (e && e->attrs->src != src)
110
    e = e->next;
111
  return e;
112
}
113

    
114
/**
115
 * rte_get_temp - get a temporary &rte
116
 * @a: attributes to assign to the new route (a &rta; in case it's
117
 * un-cached, rte_update() will create a cached copy automatically)
118
 *
119
 * Create a temporary &rte and bind it with the attributes @a.
120
 * Also set route preference to the default preference set for
121
 * the protocol.
122
 */
123
rte *
124
rte_get_temp(rta *a)
125
{
126
  rte *e = sl_alloc(rte_slab);
127

    
128
  e->attrs = a;
129
  e->flags = 0;
130
  e->pref = a->src->proto->preference;
131
  return e;
132
}
133

    
134
rte *
135
rte_do_cow(rte *r)
136
{
137
  rte *e = sl_alloc(rte_slab);
138

    
139
  memcpy(e, r, sizeof(rte));
140
  e->attrs = rta_clone(r->attrs);
141
  e->flags = 0;
142
  return e;
143
}
144

    
145
static int                                /* Actually better or at least as good as */
146
rte_better(rte *new, rte *old)
147
{
148
  int (*better)(rte *, rte *);
149

    
150
  if (!old)
151
    return 1;
152
  if (new->pref > old->pref)
153
    return 1;
154
  if (new->pref < old->pref)
155
    return 0;
156
  if (new->attrs->src->proto->proto != old->attrs->src->proto->proto)
157
    {
158
      /*
159
       *  If the user has configured protocol preferences, so that two different protocols
160
       *  have the same preference, try to break the tie by comparing addresses. Not too
161
       *  useful, but keeps the ordering of routes unambiguous.
162
       */
163
      return new->attrs->src->proto->proto > old->attrs->src->proto->proto;
164
    }
165
  if (better = new->attrs->src->proto->rte_better)
166
    return better(new, old);
167
  return 0;
168
}
169

    
170
static void
171
rte_trace(struct proto *p, rte *e, int dir, char *msg)
172
{
173
  byte via[STD_ADDRESS_P_LENGTH+32];
174

    
175
  rt_format_via(e, via);
176
  log(L_TRACE "%s %c %s %I/%d %s", p->name, dir, msg, e->net->n.prefix, e->net->n.pxlen, via);
177
}
178

    
179
static inline void
180
rte_trace_in(unsigned int flag, struct proto *p, rte *e, char *msg)
181
{
182
  if (p->debug & flag)
183
    rte_trace(p, e, '>', msg);
184
}
185

    
186
static inline void
187
rte_trace_out(unsigned int flag, struct proto *p, rte *e, char *msg)
188
{
189
  if (p->debug & flag)
190
    rte_trace(p, e, '<', msg);
191
}
192

    
193
static rte *
194
export_filter(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, int silent)
195
{
196
  struct proto *p = ah->proto;
197
  struct filter *filter = ah->out_filter;
198
  struct proto_stats *stats = ah->stats;
199
  ea_list *tmpb = NULL;
200
  rte *rt;
201
  int v;
202

    
203
  rt = rt0;
204
  *rt_free = NULL;
205

    
206
  /* If called does not care for eattrs, we prepare one internally */
207
  if (!tmpa)
208
    {
209
      tmpb = make_tmp_attrs(rt, rte_update_pool);
210
      tmpa = &tmpb;
211
    }
212

    
213
  v = p->import_control ? p->import_control(p, &rt, tmpa, rte_update_pool) : 0;
214
  if (v < 0)
215
    {
216
      if (silent)
217
        goto reject;
218

    
219
      stats->exp_updates_rejected++;
220
      rte_trace_out(D_FILTERS, p, rt, "rejected by protocol");
221
      goto reject;
222
    }
223
  if (v > 0)
224
    {
225
      if (!silent)
226
        rte_trace_out(D_FILTERS, p, rt, "forced accept by protocol");
227
      goto accept;
228
    }
229

    
230
  v = filter && ((filter == FILTER_REJECT) ||
231
                 (f_run(filter, &rt, tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT));
232
  if (v)
233
    {
234
      if (silent)
235
        goto reject;
236

    
237
      stats->exp_updates_filtered++;
238
      rte_trace_out(D_FILTERS, p, rt, "filtered out");
239
      goto reject;
240
    }
241

    
242
 accept:
243
  if (rt != rt0)
244
    *rt_free = rt;
245
  return rt;
246

    
247
 reject:
248
  /* Discard temporary rte */
249
  if (rt != rt0)
250
    rte_free(rt);
251
  return NULL;
252
}
253

    
254
static void
255
do_rt_notify(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
256
{
257
  struct proto *p = ah->proto;
258
  struct proto_stats *stats = ah->stats;
259

    
260

    
261
  /*
262
   * First, apply export limit.
263
   *
264
   * Export route limits has several problems. Because exp_routes
265
   * counter is reset before refeed, we don't really know whether
266
   * limit is breached and whether the update is new or not. Therefore
267
   * the number of really exported routes may exceed the limit
268
   * temporarily (routes exported before and new routes in refeed).
269
   *
270
   * Minor advantage is that if the limit is decreased and refeed is
271
   * requested, the number of exported routes really decrease.
272
   *
273
   * Second problem is that with export limits, we don't know whether
274
   * old was really exported (it might be blocked by limit). When a
275
   * withdraw is exported, we announce it even when the previous
276
   * update was blocked. This is not a big issue, but the same problem
277
   * is in updating exp_routes counter. Therefore, to be consistent in
278
   * increases and decreases of exp_routes, we count exported routes
279
   * regardless of blocking by limits.
280
   *
281
   * Similar problem is in handling updates - when a new route is
282
   * received and blocking is active, the route would be blocked, but
283
   * when an update for the route will be received later, the update
284
   * would be propagated (as old != NULL). Therefore, we have to block
285
   * also non-new updates (contrary to import blocking).
286
   */
287

    
288
  struct proto_limit *l = ah->out_limit;
289
  if (l && new)
290
    {
291
      if ((!old || refeed) && (stats->exp_routes >= l->limit))
292
        proto_notify_limit(ah, l, stats->exp_routes);
293

    
294
      if (l->state == PLS_BLOCKED)
295
        {
296
          stats->exp_routes++;        /* see note above */
297
          stats->exp_updates_rejected++;
298
          rte_trace_out(D_FILTERS, p, new, "rejected [limit]");
299
          new = NULL;
300

    
301
          if (!old)
302
            return;
303
        }
304
    }
305

    
306

    
307
  if (new)
308
    stats->exp_updates_accepted++;
309
  else
310
    stats->exp_withdraws_accepted++;
311

    
312
  /* Hack: We do not decrease exp_routes during refeed, we instead
313
     reset exp_routes at the start of refeed. */
314
  if (new)
315
    stats->exp_routes++;
316
  if (old && !refeed)
317
    stats->exp_routes--;
318

    
319
  if (p->debug & D_ROUTES)
320
    {
321
      if (new && old)
322
        rte_trace_out(D_ROUTES, p, new, "replaced");
323
      else if (new)
324
        rte_trace_out(D_ROUTES, p, new, "added");
325
      else if (old)
326
        rte_trace_out(D_ROUTES, p, old, "removed");
327
    }
328
  if (!new)
329
    p->rt_notify(p, ah->table, net, NULL, old, NULL);
330
  else if (tmpa)
331
    {
332
      ea_list *t = tmpa;
333
      while (t->next)
334
        t = t->next;
335
      t->next = new->attrs->eattrs;
336
      p->rt_notify(p, ah->table, net, new, old, tmpa);
337
      t->next = NULL;
338
    }
339
  else
340
    p->rt_notify(p, ah->table, net, new, old, new->attrs->eattrs);
341
}
342

    
343
static void
344
rt_notify_basic(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
345
{
346
  // struct proto *p = ah->proto;
347
  struct proto_stats *stats = ah->stats;
348

    
349
  rte *new_free = NULL;
350
  rte *old_free = NULL;
351

    
352
  if (new)
353
    stats->exp_updates_received++;
354
  else
355
    stats->exp_withdraws_received++;
356

    
357
  /*
358
   * This is a tricky part - we don't know whether route 'old' was
359
   * exported to protocol 'p' or was filtered by the export filter.
360
   * We try to run the export filter to know this to have a correct
361
   * value in 'old' argument of rte_update (and proper filter value)
362
   *
363
   * FIXME - this is broken because 'configure soft' may change
364
   * filters but keep routes. Refeed is expected to be called after
365
   * change of the filters and with old == new, therefore we do not
366
   * even try to run the filter on an old route, This may lead to 
367
   * 'spurious withdraws' but ensure that there are no 'missing
368
   * withdraws'.
369
   *
370
   * This is not completely safe as there is a window between
371
   * reconfiguration and the end of refeed - if a newly filtered
372
   * route disappears during this period, proper withdraw is not
373
   * sent (because old would be also filtered) and the route is
374
   * not refeeded (because it disappeared before that).
375
   */
376

    
377
  if (new)
378
    new = export_filter(ah, new, &new_free, &tmpa, 0);
379

    
380
  if (old && !refeed)
381
    old = export_filter(ah, old, &old_free, NULL, 1);
382

    
383
  /* FIXME - This is broken because of incorrect 'old' value (see above) */
384
  if (!new && !old)
385
    return;
386

    
387
  do_rt_notify(ah, net, new, old, tmpa, refeed);
388

    
389
  /* Discard temporary rte's */
390
  if (new_free)
391
    rte_free(new_free);
392
  if (old_free)
393
    rte_free(old_free);
394
}
395

    
396
static void
397
rt_notify_accepted(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed, rte *before_old,
398
                   ea_list *tmpa, int feed)
399
{
400
  // struct proto *p = ah->proto;
401
  struct proto_stats *stats = ah->stats;
402

    
403
  rte *new_best = NULL;
404
  rte *old_best = NULL;
405
  rte *new_free = NULL;
406
  rte *old_free = NULL;
407
  rte *r;
408

    
409
  /* Used to track whether we met old_changed position. If it is NULL
410
     it was the first and met it implicitly before current best route. */
411
  int old_meet = (old_changed && !before_old) ? 1 : 0;
412

    
413
  if (new_changed)
414
    stats->exp_updates_received++;
415
  else
416
    stats->exp_withdraws_received++;
417

    
418
  /* First, find the new_best route - first accepted by filters */
419
  for (r=net->routes; r; r=r->next)
420
    {
421
      if (new_best = export_filter(ah, r, &new_free, &tmpa, 0))
422
        break;
423

    
424
      /* Note if we walked around the position of old_changed route */
425
      if (r == before_old)
426
        old_meet = 1;
427
    }
428

    
429
  /* 
430
   * Second, handle the feed case. That means we do not care for
431
   * old_best. It is NULL for feed, and the new_best for refeed. 
432
   * For refeed, there is a hack similar to one in rt_notify_basic()
433
   * to ensure withdraws in case of changed filters
434
   */
435
  if (feed)
436
    {
437
      if (feed == 2)        /* refeed */
438
        old_best = new_best ? new_best : net->routes;
439
      else
440
        old_best = NULL;
441

    
442
      if (!new_best && !old_best)
443
        return;
444

    
445
      goto found;
446
    }
447

    
448
  /*
449
   * Now, we find the old_best route. Generally, it is the same as the
450
   * new_best, unless new_best is the same as new_changed or
451
   * old_changed is accepted before new_best.
452
   *
453
   * There are four cases:
454
   *
455
   * - We would find and accept old_changed before new_best, therefore
456
   *   old_changed is old_best. In remaining cases we suppose this
457
   *   is not true.
458
   *
459
   * - We found no new_best, therefore there is also no old_best and
460
   *   we ignore this withdraw.
461
   *
462
   * - We found new_best different than new_changed, therefore
463
   *   old_best is the same as new_best and we ignore this update.
464
   *
465
   * - We found new_best the same as new_changed, therefore it cannot
466
   *   be old_best and we have to continue search for old_best.
467
   */
468

    
469
  /* First case */
470
  if (old_meet)
471
    if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
472
      goto found;
473

    
474
  /* Second case */
475
  if (!new_best)
476
    return;
477

    
478
  /* Third case, we use r instead of new_best, because export_filter() could change it */
479
  if (r != new_changed)
480
    {
481
      if (new_free)
482
        rte_free(new_free);
483
      return;
484
    }
485

    
486
  /* Fourth case */
487
  for (r=r->next; r; r=r->next)
488
    {
489
      if (old_best = export_filter(ah, r, &old_free, NULL, 1))
490
        goto found;
491

    
492
      if (r == before_old)
493
        if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
494
          goto found;
495
    }
496

    
497
  /* Implicitly, old_best is NULL and new_best is non-NULL */
498

    
499
 found:
500
  do_rt_notify(ah, net, new_best, old_best, tmpa, (feed == 2));
501

    
502
  /* Discard temporary rte's */
503
  if (new_free)
504
    rte_free(new_free);
505
  if (old_free)
506
    rte_free(old_free);
507
}
508

    
509
/**
510
 * rte_announce - announce a routing table change
511
 * @tab: table the route has been added to
512
 * @type: type of route announcement (RA_OPTIMAL or RA_ANY)
513
 * @net: network in question
514
 * @new: the new route to be announced
515
 * @old: the previous route for the same network
516
 * @tmpa: a list of temporary attributes belonging to the new route
517
 *
518
 * This function gets a routing table update and announces it
519
 * to all protocols that acccepts given type of route announcement
520
 * and are connected to the same table by their announcement hooks.
521
 *
522
 * Route announcement of type RA_OPTIMAL si generated when optimal
523
 * route (in routing table @tab) changes. In that case @old stores the
524
 * old optimal route.
525
 *
526
 * Route announcement of type RA_ANY si generated when any route (in
527
 * routing table @tab) changes In that case @old stores the old route
528
 * from the same protocol.
529
 *
530
 * For each appropriate protocol, we first call its import_control()
531
 * hook which performs basic checks on the route (each protocol has a
532
 * right to veto or force accept of the route before any filter is
533
 * asked) and adds default values of attributes specific to the new
534
 * protocol (metrics, tags etc.).  Then it consults the protocol's
535
 * export filter and if it accepts the route, the rt_notify() hook of
536
 * the protocol gets called.
537
 */
538
static void
539
rte_announce(rtable *tab, unsigned type, net *net, rte *new, rte *old, rte *before_old, ea_list *tmpa)
540
{
541
  struct announce_hook *a;
542

    
543
  if (type == RA_OPTIMAL)
544
    {
545
      if (new)
546
        new->attrs->src->proto->stats.pref_routes++;
547
      if (old)
548
        old->attrs->src->proto->stats.pref_routes--;
549

    
550
      if (tab->hostcache)
551
        rt_notify_hostcache(tab, net);
552
    }
553

    
554
  WALK_LIST(a, tab->hooks)
555
    {
556
      ASSERT(a->proto->core_state == FS_HAPPY || a->proto->core_state == FS_FEEDING);
557
      if (a->proto->accept_ra_types == type)
558
        if (type == RA_ACCEPTED)
559
          rt_notify_accepted(a, net, new, old, before_old, tmpa, 0);
560
        else
561
          rt_notify_basic(a, net, new, old, tmpa, 0);
562
    }
563
}
564

    
565
static inline int
566
rte_validate(rte *e)
567
{
568
  int c;
569
  net *n = e->net;
570

    
571
  if ((n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
572
    {
573
      log(L_WARN "Ignoring bogus prefix %I/%d received via %s",
574
          n->n.prefix, n->n.pxlen, e->sender->proto->name);
575
      return 0;
576
    }
577

    
578
  c = ipa_classify_net(n->n.prefix);
579
  if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
580
    {
581
      log(L_WARN "Ignoring bogus route %I/%d received via %s",
582
          n->n.prefix, n->n.pxlen, e->sender->proto->name);
583
      return 0;
584
    }
585

    
586
  return 1;
587
}
588

    
589
/**
590
 * rte_free - delete a &rte
591
 * @e: &rte to be deleted
592
 *
593
 * rte_free() deletes the given &rte from the routing table it's linked to.
594
 */
595
void
596
rte_free(rte *e)
597
{
598
  if (rta_is_cached(e->attrs))
599
    rta_free(e->attrs);
600
  sl_free(rte_slab, e);
601
}
602

    
603
static inline void
604
rte_free_quick(rte *e)
605
{
606
  rta_free(e->attrs);
607
  sl_free(rte_slab, e);
608
}
609

    
610
static int
611
rte_same(rte *x, rte *y)
612
{
613
  return
614
    x->attrs == y->attrs &&
615
    x->flags == y->flags &&
616
    x->pflags == y->pflags &&
617
    x->pref == y->pref &&
618
    (!x->attrs->src->proto->rte_same || x->attrs->src->proto->rte_same(x, y));
619
}
620

    
621
static void
622
rte_recalculate(struct announce_hook *ah, net *net, rte *new, ea_list *tmpa, struct rte_src *src)
623
{
624
  struct proto *p = ah->proto;
625
  struct rtable *table = ah->table;
626
  struct proto_stats *stats = ah->stats;
627
  rte *before_old = NULL;
628
  rte *old_best = net->routes;
629
  rte *old = NULL;
630
  rte **k;
631

    
632
  k = &net->routes;                        /* Find and remove original route from the same protocol */
633
  while (old = *k)
634
    {
635
      if (old->attrs->src == src)
636
        {
637
          /* If there is the same route in the routing table but from
638
           * a different sender, then there are two paths from the
639
           * source protocol to this routing table through transparent
640
           * pipes, which is not allowed.
641
           *
642
           * We log that and ignore the route. If it is withdraw, we
643
           * ignore it completely (there might be 'spurious withdraws',
644
           * see FIXME in do_rte_announce())
645
           */
646
          if (old->sender->proto != p)
647
            {
648
              if (new)
649
                {
650
                  log(L_ERR "Pipe collision detected when sending %I/%d to table %s",
651
                      net->n.prefix, net->n.pxlen, table->name);
652
                  rte_free_quick(new);
653
                }
654
              return;
655
            }
656

    
657
          if (new && rte_same(old, new))
658
            {
659
              /* No changes, ignore the new route */
660
              stats->imp_updates_ignored++;
661
              rte_trace_in(D_ROUTES, p, new, "ignored");
662
              rte_free_quick(new);
663
#ifdef CONFIG_RIP
664
              /* lastmod is used internally by RIP as the last time
665
                 when the route was received. */
666
              if (src->proto->proto == &proto_rip)
667
                old->lastmod = now;
668
#endif
669
              return;
670
            }
671
          *k = old->next;
672
          break;
673
        }
674
      k = &old->next;
675
      before_old = old;
676
    }
677

    
678
  if (!old)
679
    before_old = NULL;
680

    
681
  if (!old && !new)
682
    {
683
      stats->imp_withdraws_ignored++;
684
      return;
685
    }
686

    
687
  struct proto_limit *l = ah->in_limit;
688
  if (l && !old && new)
689
    {
690
      if (stats->imp_routes >= l->limit)
691
        proto_notify_limit(ah, l, stats->imp_routes);
692

    
693
      if (l->state == PLS_BLOCKED)
694
        {
695
          stats->imp_updates_ignored++;
696
          rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
697
          rte_free_quick(new);
698
          return;
699
        }
700
    }
701

    
702
  if (new)
703
    stats->imp_updates_accepted++;
704
  else
705
    stats->imp_withdraws_accepted++;
706

    
707
  if (new)
708
    stats->imp_routes++;
709
  if (old)
710
    stats->imp_routes--;
711

    
712
  if (table->config->sorted)
713
    {
714
      /* If routes are sorted, just insert new route to appropriate position */
715
      if (new)
716
        {
717
          if (before_old && !rte_better(new, before_old))
718
            k = &before_old->next;
719
          else
720
            k = &net->routes;
721

    
722
          for (; *k; k=&(*k)->next)
723
            if (rte_better(new, *k))
724
              break;
725

    
726
          new->next = *k;
727
          *k = new;
728
        }
729
    }
730
  else
731
    {
732
      /* If routes are not sorted, find the best route and move it on
733
         the first position. There are several optimized cases. */
734

    
735
      if (src->proto->rte_recalculate && src->proto->rte_recalculate(table, net, new, old, old_best))
736
        goto do_recalculate;
737

    
738
      if (new && rte_better(new, old_best))
739
        {
740
          /* The first case - the new route is cleary optimal,
741
             we link it at the first position */
742

    
743
          new->next = net->routes;
744
          net->routes = new;
745
        }
746
      else if (old == old_best)
747
        {
748
          /* The second case - the old best route disappeared, we add the
749
             new route (if we have any) to the list (we don't care about
750
             position) and then we elect the new optimal route and relink
751
             that route at the first position and announce it. New optimal
752
             route might be NULL if there is no more routes */
753

    
754
        do_recalculate:
755
          /* Add the new route to the list */
756
          if (new)
757
            {
758
              new->next = net->routes;
759
              net->routes = new;
760
            }
761

    
762
          /* Find a new optimal route (if there is any) */
763
          if (net->routes)
764
            {
765
              rte **bp = &net->routes;
766
              for (k=&(*bp)->next; *k; k=&(*k)->next)
767
                if (rte_better(*k, *bp))
768
                  bp = k;
769

    
770
              /* And relink it */
771
              rte *best = *bp;
772
              *bp = best->next;
773
              best->next = net->routes;
774
              net->routes = best;
775
            }
776
        }
777
      else if (new)
778
        {
779
          /* The third case - the new route is not better than the old
780
             best route (therefore old_best != NULL) and the old best
781
             route was not removed (therefore old_best == net->routes).
782
             We just link the new route after the old best route. */
783

    
784
          ASSERT(net->routes != NULL);
785
          new->next = net->routes->next;
786
          net->routes->next = new;
787
        }
788
      /* The fourth (empty) case - suboptimal route was removed, nothing to do */
789
    }
790

    
791
  if (new)
792
    new->lastmod = now;
793

    
794
  /* Log the route change */
795
  if (new)
796
    rte_trace_in(D_ROUTES, p, new, net->routes == new ? "added [best]" : "added");
797

    
798
  if (!new && (p->debug & D_ROUTES))
799
    {
800
      if (old != old_best)
801
        rte_trace_in(D_ROUTES, p, old, "removed");
802
      else if (net->routes)
803
        rte_trace_in(D_ROUTES, p, old, "removed [replaced]");
804
      else
805
        rte_trace_in(D_ROUTES, p, old, "removed [sole]");
806
    }
807

    
808
  /* Propagate the route change */
809
  rte_announce(table, RA_ANY, net, new, old, NULL, tmpa);
810
  if (net->routes != old_best)
811
    rte_announce(table, RA_OPTIMAL, net, net->routes, old_best, NULL, tmpa);
812
  if (table->config->sorted)
813
    rte_announce(table, RA_ACCEPTED, net, new, old, before_old, tmpa);
814

    
815
  if (!net->routes &&
816
      (table->gc_counter++ >= table->config->gc_max_ops) &&
817
      (table->gc_time + table->config->gc_min_time <= now))
818
    rt_schedule_gc(table);
819

    
820
  if (old)
821
    {
822
      if (p->rte_remove)
823
        p->rte_remove(net, old);
824
      rte_free_quick(old);
825
    }
826
  if (new)
827
    {
828
      if (p->rte_insert)
829
        p->rte_insert(net, new);
830
    }
831
}
832

    
833
static int rte_update_nest_cnt;                /* Nesting counter to allow recursive updates */
834

    
835
static inline void
836
rte_update_lock(void)
837
{
838
  rte_update_nest_cnt++;
839
}
840

    
841
static inline void
842
rte_update_unlock(void)
843
{
844
  if (!--rte_update_nest_cnt)
845
    lp_flush(rte_update_pool);
846
}
847

    
848
/**
849
 * rte_update - enter a new update to a routing table
850
 * @table: table to be updated
851
 * @ah: pointer to table announce hook
852
 * @net: network node
853
 * @p: protocol submitting the update
854
 * @src: protocol originating the update
855
 * @new: a &rte representing the new route or %NULL for route removal.
856
 *
857
 * This function is called by the routing protocols whenever they discover
858
 * a new route or wish to update/remove an existing route. The right announcement
859
 * sequence is to build route attributes first (either un-cached with @aflags set
860
 * to zero or a cached one using rta_lookup(); in this case please note that
861
 * you need to increase the use count of the attributes yourself by calling
862
 * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
863
 * the appropriate data and finally submit the new &rte by calling rte_update().
864
 *
865
 * @src specifies the protocol that originally created the route and the meaning
866
 * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
867
 * same value as @new->attrs->proto. @p specifies the protocol that called
868
 * rte_update(). In most cases it is the same protocol as @src. rte_update()
869
 * stores @p in @new->sender;
870
 *
871
 * When rte_update() gets any route, it automatically validates it (checks,
872
 * whether the network and next hop address are valid IP addresses and also
873
 * whether a normal routing protocol doesn't try to smuggle a host or link
874
 * scope route to the table), converts all protocol dependent attributes stored
875
 * in the &rte to temporary extended attributes, consults import filters of the
876
 * protocol to see if the route should be accepted and/or its attributes modified,
877
 * stores the temporary attributes back to the &rte.
878
 *
879
 * Now, having a "public" version of the route, we
880
 * automatically find any old route defined by the protocol @src
881
 * for network @n, replace it by the new one (or removing it if @new is %NULL),
882
 * recalculate the optimal route for this destination and finally broadcast
883
 * the change (if any) to all routing protocols by calling rte_announce().
884
 *
885
 * All memory used for attribute lists and other temporary allocations is taken
886
 * from a special linear pool @rte_update_pool and freed when rte_update()
887
 * finishes.
888
 */
889

    
890
void
891
rte_update2(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
892
{
893
  struct proto *p = ah->proto;
894
  struct proto_stats *stats = ah->stats;
895
  struct filter *filter = ah->in_filter;
896
  ea_list *tmpa = NULL;
897

    
898
  rte_update_lock();
899
  if (new)
900
    {
901
      new->sender = ah;
902

    
903
      stats->imp_updates_received++;
904
      if (!rte_validate(new))
905
        {
906
          rte_trace_in(D_FILTERS, p, new, "invalid");
907
          stats->imp_updates_invalid++;
908
          goto drop;
909
        }
910
      if (filter == FILTER_REJECT)
911
        {
912
          stats->imp_updates_filtered++;
913
          rte_trace_in(D_FILTERS, p, new, "filtered out");
914
          goto drop;
915
        }
916

    
917
      tmpa = make_tmp_attrs(new, rte_update_pool);
918
      if (filter)
919
        {
920
          ea_list *old_tmpa = tmpa;
921
          int fr = f_run(filter, &new, &tmpa, rte_update_pool, 0);
922
          if (fr > F_ACCEPT)
923
            {
924
              stats->imp_updates_filtered++;
925
              rte_trace_in(D_FILTERS, p, new, "filtered out");
926
              goto drop;
927
            }
928
          if (tmpa != old_tmpa && src->proto->store_tmp_attrs)
929
            src->proto->store_tmp_attrs(new, tmpa);
930
        }
931

    
932
      if (!rta_is_cached(new->attrs)) /* Need to copy attributes */
933
        new->attrs = rta_lookup(new->attrs);
934
      new->flags |= REF_COW;
935
    }
936
  else
937
    {
938
      stats->imp_withdraws_received++;
939

    
940
      if (!net || !src)
941
        {
942
          stats->imp_withdraws_ignored++;
943
          rte_update_unlock();
944
          return;
945
        }
946
    }
947

    
948
  rte_recalculate(ah, net, new, tmpa, src);
949
  rte_update_unlock();
950
  return;
951

    
952
drop:
953
  rte_free(new);
954
  rte_recalculate(ah, net, NULL, NULL, src);
955
  rte_update_unlock();
956
}
957

    
958
/* Independent call to rte_announce(), used from next hop
959
   recalculation, outside of rte_update(). new must be non-NULL */
960
static inline void 
961
rte_announce_i(rtable *tab, unsigned type, net *n, rte *new, rte *old)
962
{
963
  ea_list *tmpa;
964

    
965
  rte_update_lock();
966
  tmpa = make_tmp_attrs(new, rte_update_pool);
967
  rte_announce(tab, type, n, new, old, NULL, tmpa);
968
  rte_update_unlock();
969
}
970

    
971
void
972
rte_discard(rtable *t, rte *old)        /* Non-filtered route deletion, used during garbage collection */
973
{
974
  rte_update_lock();
975
  rte_recalculate(old->sender, old->net, NULL, NULL, old->attrs->src);
976
  rte_update_unlock();
977
}
978

    
979
/**
980
 * rte_dump - dump a route
981
 * @e: &rte to be dumped
982
 *
983
 * This functions dumps contents of a &rte to debug output.
984
 */
985
void
986
rte_dump(rte *e)
987
{
988
  net *n = e->net;
989
  debug("%-1I/%2d ", n->n.prefix, n->n.pxlen);
990
  debug("KF=%02x PF=%02x pref=%d lm=%d ", n->n.flags, e->pflags, e->pref, now-e->lastmod);
991
  rta_dump(e->attrs);
992
  if (e->attrs->src->proto->proto->dump_attrs)
993
    e->attrs->src->proto->proto->dump_attrs(e);
994
  debug("\n");
995
}
996

    
997
/**
998
 * rt_dump - dump a routing table
999
 * @t: routing table to be dumped
1000
 *
1001
 * This function dumps contents of a given routing table to debug output.
1002
 */
1003
void
1004
rt_dump(rtable *t)
1005
{
1006
  rte *e;
1007
  net *n;
1008
  struct announce_hook *a;
1009

    
1010
  debug("Dump of routing table <%s>\n", t->name);
1011
#ifdef DEBUGGING
1012
  fib_check(&t->fib);
1013
#endif
1014
  FIB_WALK(&t->fib, fn)
1015
    {
1016
      n = (net *) fn;
1017
      for(e=n->routes; e; e=e->next)
1018
        rte_dump(e);
1019
    }
1020
  FIB_WALK_END;
1021
  WALK_LIST(a, t->hooks)
1022
    debug("\tAnnounces routes to protocol %s\n", a->proto->name);
1023
  debug("\n");
1024
}
1025

    
1026
/**
1027
 * rt_dump_all - dump all routing tables
1028
 *
1029
 * This function dumps contents of all routing tables to debug output.
1030
 */
1031
void
1032
rt_dump_all(void)
1033
{
1034
  rtable *t;
1035

    
1036
  WALK_LIST(t, routing_tables)
1037
    rt_dump(t);
1038
}
1039

    
1040
static inline void
1041
rt_schedule_gc(rtable *tab)
1042
{
1043
  if (tab->gc_scheduled)
1044
    return;
1045

    
1046
  tab->gc_scheduled = 1;
1047
  ev_schedule(tab->rt_event);
1048
}
1049

    
1050
static inline void
1051
rt_schedule_hcu(rtable *tab)
1052
{
1053
  if (tab->hcu_scheduled)
1054
    return;
1055

    
1056
  tab->hcu_scheduled = 1;
1057
  ev_schedule(tab->rt_event);
1058
}
1059

    
1060
static inline void
1061
rt_schedule_nhu(rtable *tab)
1062
{
1063
  if (tab->nhu_state == 0)
1064
    ev_schedule(tab->rt_event);
1065

    
1066
  /* state change 0->1, 2->3 */
1067
  tab->nhu_state |= 1;
1068
}
1069

    
1070
static void
1071
rt_prune_nets(rtable *tab)
1072
{
1073
  struct fib_iterator fit;
1074
  int ncnt = 0, ndel = 0;
1075

    
1076
#ifdef DEBUGGING
1077
  fib_check(&tab->fib);
1078
#endif
1079

    
1080
  FIB_ITERATE_INIT(&fit, &tab->fib);
1081
again:
1082
  FIB_ITERATE_START(&tab->fib, &fit, f)
1083
    {
1084
      net *n = (net *) f;
1085
      ncnt++;
1086
      if (!n->routes)                /* Orphaned FIB entry */
1087
        {
1088
          FIB_ITERATE_PUT(&fit, f);
1089
          fib_delete(&tab->fib, f);
1090
          ndel++;
1091
          goto again;
1092
        }
1093
    }
1094
  FIB_ITERATE_END(f);
1095
  DBG("Pruned %d of %d networks\n", ndel, ncnt);
1096

    
1097
  tab->gc_counter = 0;
1098
  tab->gc_time = now;
1099
  tab->gc_scheduled = 0;
1100
}
1101

    
1102
static void
1103
rt_event(void *ptr)
1104
{
1105
  rtable *tab = ptr;
1106

    
1107
  if (tab->hcu_scheduled)
1108
    rt_update_hostcache(tab);
1109

    
1110
  if (tab->nhu_state)
1111
    rt_next_hop_update(tab);
1112

    
1113
  if (tab->gc_scheduled)
1114
    {
1115
      rt_prune_nets(tab);
1116
      rt_prune_sources(); // FIXME this should be moved to independent event
1117
    }
1118
}
1119

    
1120
void
1121
rt_setup(pool *p, rtable *t, char *name, struct rtable_config *cf)
1122
{
1123
  bzero(t, sizeof(*t));
1124
  fib_init(&t->fib, p, sizeof(net), 0, rte_init);
1125
  t->name = name;
1126
  t->config = cf;
1127
  init_list(&t->hooks);
1128
  if (cf)
1129
    {
1130
      t->rt_event = ev_new(p);
1131
      t->rt_event->hook = rt_event;
1132
      t->rt_event->data = t;
1133
      t->gc_time = now;
1134
    }
1135
}
1136

    
1137
/**
1138
 * rt_init - initialize routing tables
1139
 *
1140
 * This function is called during BIRD startup. It initializes the
1141
 * routing table module.
1142
 */
1143
void
1144
rt_init(void)
1145
{
1146
  rta_init();
1147
  rt_table_pool = rp_new(&root_pool, "Routing tables");
1148
  rte_update_pool = lp_new(rt_table_pool, 4080);
1149
  rte_slab = sl_new(rt_table_pool, sizeof(rte));
1150
  init_list(&routing_tables);
1151
}
1152

    
1153

    
1154
/* Called from proto_schedule_flush_loop() only,
1155
   ensuring that all prune states are zero */
1156
void
1157
rt_schedule_prune_all(void)
1158
{
1159
  rtable *t;
1160

    
1161
  WALK_LIST(t, routing_tables)
1162
    t->prune_state = 1;
1163
}
1164

    
1165
static inline int
1166
rt_prune_step(rtable *tab, int *max_feed)
1167
{
1168
  struct fib_iterator *fit = &tab->prune_fit;
1169

    
1170
  DBG("Pruning route table %s\n", tab->name);
1171
#ifdef DEBUGGING
1172
  fib_check(&tab->fib);
1173
#endif
1174

    
1175
  if (tab->prune_state == 0)
1176
    return 1;
1177

    
1178
  if (tab->prune_state == 1)
1179
    {
1180
      FIB_ITERATE_INIT(fit, &tab->fib);
1181
      tab->prune_state = 2;
1182
    }
1183

    
1184
again:
1185
  FIB_ITERATE_START(&tab->fib, fit, fn)
1186
    {
1187
      net *n = (net *) fn;
1188
      rte *e;
1189

    
1190
    rescan:
1191
      for (e=n->routes; e; e=e->next)
1192
        if (e->sender->proto->core_state != FS_HAPPY &&
1193
            e->sender->proto->core_state != FS_FEEDING)
1194
          {
1195
            if (*max_feed <= 0)
1196
              {
1197
                FIB_ITERATE_PUT(fit, fn);
1198
                return 0;
1199
              }
1200

    
1201
            rte_discard(tab, e);
1202
            (*max_feed)--;
1203

    
1204
            goto rescan;
1205
          }
1206
      if (!n->routes)                /* Orphaned FIB entry */
1207
        {
1208
          FIB_ITERATE_PUT(fit, fn);
1209
          fib_delete(&tab->fib, fn);
1210
          goto again;
1211
        }
1212
    }
1213
  FIB_ITERATE_END(fn);
1214

    
1215
#ifdef DEBUGGING
1216
  fib_check(&tab->fib);
1217
#endif
1218

    
1219
  tab->prune_state = 0;
1220
  return 1;
1221
}
1222

    
1223
/**
1224
 * rt_prune_loop - prune routing tables
1225
 * @tab: routing table to be pruned
1226
 *
1227
 * The prune loop scans routing tables and removes routes belonging to
1228
 * inactive protocols and also stale network entries. Returns 1 when
1229
 * all such routes are pruned. It is a part of the protocol flushing
1230
 * loop.
1231
 */
1232
int
1233
rt_prune_loop(void)
1234
{
1235
  rtable *t;
1236
  int max_feed = 512;
1237

    
1238
  WALK_LIST(t, routing_tables)
1239
    if (! rt_prune_step(t, &max_feed))
1240
      return 0;
1241

    
1242
  return 1;
1243
}
1244

    
1245
void
1246
rt_preconfig(struct config *c)
1247
{
1248
  struct symbol *s = cf_find_symbol("master");
1249

    
1250
  init_list(&c->tables);
1251
  c->master_rtc = rt_new_table(s);
1252
}
1253

    
1254

    
1255
/* 
1256
 * Some functions for handing internal next hop updates
1257
 * triggered by rt_schedule_nhu().
1258
 */
1259

    
1260
static inline int
1261
rta_next_hop_outdated(rta *a)
1262
{
1263
  struct hostentry *he = a->hostentry;
1264

    
1265
  if (!he)
1266
    return 0;
1267

    
1268
  if (!he->src)
1269
    return a->dest != RTD_UNREACHABLE;
1270

    
1271
  return (a->iface != he->src->iface) || !ipa_equal(a->gw, he->gw) ||
1272
    (a->dest != he->dest) || (a->igp_metric != he->igp_metric) ||
1273
    !mpnh_same(a->nexthops, he->src->nexthops);
1274
}
1275

    
1276
static inline void
1277
rta_apply_hostentry(rta *a, struct hostentry *he)
1278
{
1279
  a->hostentry = he;
1280
  a->iface = he->src ? he->src->iface : NULL;
1281
  a->gw = he->gw;
1282
  a->dest = he->dest;
1283
  a->igp_metric = he->igp_metric;
1284
  a->nexthops = he->src ? he->src->nexthops : NULL;
1285
}
1286

    
1287
static inline rte *
1288
rt_next_hop_update_rte(rtable *tab, rte *old)
1289
{
1290
  rta a;
1291
  memcpy(&a, old->attrs, sizeof(rta));
1292
  rta_apply_hostentry(&a, old->attrs->hostentry);
1293
  a.aflags = 0;
1294

    
1295
  rte *e = sl_alloc(rte_slab);
1296
  memcpy(e, old, sizeof(rte));
1297
  e->attrs = rta_lookup(&a);
1298

    
1299
  return e;
1300
}
1301

    
1302
static inline int
1303
rt_next_hop_update_net(rtable *tab, net *n)
1304
{
1305
  rte **k, *e, *new, *old_best, **new_best;
1306
  int count = 0;
1307
  int free_old_best = 0;
1308

    
1309
  old_best = n->routes;
1310
  if (!old_best)
1311
    return 0;
1312

    
1313
  for (k = &n->routes; e = *k; k = &e->next)
1314
    if (rta_next_hop_outdated(e->attrs))
1315
      {
1316
        new = rt_next_hop_update_rte(tab, e);
1317
        *k = new;
1318

    
1319
        rte_announce_i(tab, RA_ANY, n, new, e);
1320
        rte_trace_in(D_ROUTES, new->sender->proto, new, "updated");
1321

    
1322
        /* Call a pre-comparison hook */
1323
        /* Not really an efficient way to compute this */
1324
        if (e->attrs->src->proto->rte_recalculate)
1325
          e->attrs->src->proto->rte_recalculate(tab, n, new, e, NULL);
1326

    
1327
        if (e != old_best)
1328
          rte_free_quick(e);
1329
        else /* Freeing of the old best rte is postponed */
1330
          free_old_best = 1;
1331

    
1332
        e = new;
1333
        count++;
1334
      }
1335

    
1336
  if (!count)
1337
    return 0;
1338

    
1339
  /* Find the new best route */
1340
  new_best = NULL;
1341
  for (k = &n->routes; e = *k; k = &e->next)
1342
    {
1343
      if (!new_best || rte_better(e, *new_best))
1344
        new_best = k;
1345
    }
1346

    
1347
  /* Relink the new best route to the first position */
1348
  new = *new_best;
1349
  if (new != n->routes)
1350
    {
1351
      *new_best = new->next;
1352
      new->next = n->routes;
1353
      n->routes = new;
1354
    }
1355

    
1356
  /* Announce the new best route */
1357
  if (new != old_best)
1358
    {
1359
      rte_announce_i(tab, RA_OPTIMAL, n, new, old_best);
1360
      rte_trace_in(D_ROUTES, new->sender->proto, new, "updated [best]");
1361
    }
1362

    
1363
   if (free_old_best)
1364
    rte_free_quick(old_best);
1365

    
1366
  return count;
1367
}
1368

    
1369
static void
1370
rt_next_hop_update(rtable *tab)
1371
{
1372
  struct fib_iterator *fit = &tab->nhu_fit;
1373
  int max_feed = 32;
1374

    
1375
  if (tab->nhu_state == 0)
1376
    return;
1377

    
1378
  if (tab->nhu_state == 1)
1379
    {
1380
      FIB_ITERATE_INIT(fit, &tab->fib);
1381
      tab->nhu_state = 2;
1382
    }
1383

    
1384
  FIB_ITERATE_START(&tab->fib, fit, fn)
1385
    {
1386
      if (max_feed <= 0)
1387
        {
1388
          FIB_ITERATE_PUT(fit, fn);
1389
          ev_schedule(tab->rt_event);
1390
          return;
1391
        }
1392
      max_feed -= rt_next_hop_update_net(tab, (net *) fn);
1393
    }
1394
  FIB_ITERATE_END(fn);
1395

    
1396
  /* state change 2->0, 3->1 */
1397
  tab->nhu_state &= 1;
1398

    
1399
  if (tab->nhu_state > 0)
1400
    ev_schedule(tab->rt_event);
1401
}
1402

    
1403

    
1404
struct rtable_config *
1405
rt_new_table(struct symbol *s)
1406
{
1407
  /* Hack that allows to 'redefine' the master table */
1408
  if ((s->class == SYM_TABLE) && (s->def == new_config->master_rtc))
1409
    return s->def;
1410

    
1411
  struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
1412

    
1413
  cf_define_symbol(s, SYM_TABLE, c);
1414
  c->name = s->name;
1415
  add_tail(&new_config->tables, &c->n);
1416
  c->gc_max_ops = 1000;
1417
  c->gc_min_time = 5;
1418
  return c;
1419
}
1420

    
1421
/**
1422
 * rt_lock_table - lock a routing table
1423
 * @r: routing table to be locked
1424
 *
1425
 * Lock a routing table, because it's in use by a protocol,
1426
 * preventing it from being freed when it gets undefined in a new
1427
 * configuration.
1428
 */
1429
void
1430
rt_lock_table(rtable *r)
1431
{
1432
  r->use_count++;
1433
}
1434

    
1435
/**
1436
 * rt_unlock_table - unlock a routing table
1437
 * @r: routing table to be unlocked
1438
 *
1439
 * Unlock a routing table formerly locked by rt_lock_table(),
1440
 * that is decrease its use count and delete it if it's scheduled
1441
 * for deletion by configuration changes.
1442
 */
1443
void
1444
rt_unlock_table(rtable *r)
1445
{
1446
  if (!--r->use_count && r->deleted)
1447
    {
1448
      struct config *conf = r->deleted;
1449
      DBG("Deleting routing table %s\n", r->name);
1450
      if (r->hostcache)
1451
        rt_free_hostcache(r);
1452
      rem_node(&r->n);
1453
      fib_free(&r->fib);
1454
      rfree(r->rt_event);
1455
      mb_free(r);
1456
      config_del_obstacle(conf);
1457
    }
1458
}
1459

    
1460
/**
1461
 * rt_commit - commit new routing table configuration
1462
 * @new: new configuration
1463
 * @old: original configuration or %NULL if it's boot time config
1464
 *
1465
 * Scan differences between @old and @new configuration and modify
1466
 * the routing tables according to these changes. If @new defines a
1467
 * previously unknown table, create it, if it omits a table existing
1468
 * in @old, schedule it for deletion (it gets deleted when all protocols
1469
 * disconnect from it by calling rt_unlock_table()), if it exists
1470
 * in both configurations, leave it unchanged.
1471
 */
1472
void
1473
rt_commit(struct config *new, struct config *old)
1474
{
1475
  struct rtable_config *o, *r;
1476

    
1477
  DBG("rt_commit:\n");
1478
  if (old)
1479
    {
1480
      WALK_LIST(o, old->tables)
1481
        {
1482
          rtable *ot = o->table;
1483
          if (!ot->deleted)
1484
            {
1485
              struct symbol *sym = cf_find_symbol(o->name);
1486
              if (sym && sym->class == SYM_TABLE && !new->shutdown)
1487
                {
1488
                  DBG("\t%s: same\n", o->name);
1489
                  r = sym->def;
1490
                  r->table = ot;
1491
                  ot->name = r->name;
1492
                  ot->config = r;
1493
                  if (o->sorted != r->sorted)
1494
                    log(L_WARN "Reconfiguration of rtable sorted flag not implemented");
1495
                }
1496
              else
1497
                {
1498
                  DBG("\t%s: deleted\n", o->name);
1499
                  ot->deleted = old;
1500
                  config_add_obstacle(old);
1501
                  rt_lock_table(ot);
1502
                  rt_unlock_table(ot);
1503
                }
1504
            }
1505
        }
1506
    }
1507

    
1508
  WALK_LIST(r, new->tables)
1509
    if (!r->table)
1510
      {
1511
        rtable *t = mb_alloc(rt_table_pool, sizeof(struct rtable));
1512
        DBG("\t%s: created\n", r->name);
1513
        rt_setup(rt_table_pool, t, r->name, r);
1514
        add_tail(&routing_tables, &t->n);
1515
        r->table = t;
1516
      }
1517
  DBG("\tdone\n");
1518
}
1519

    
1520
static inline void
1521
do_feed_baby(struct proto *p, int type, struct announce_hook *h, net *n, rte *e)
1522
{
1523
  ea_list *tmpa;
1524

    
1525
  rte_update_lock();
1526
  tmpa = make_tmp_attrs(e, rte_update_pool);
1527
  if (type == RA_ACCEPTED)
1528
    rt_notify_accepted(h, n, e, NULL, NULL, tmpa, p->refeeding ? 2 : 1);
1529
  else
1530
    rt_notify_basic(h, n, e, p->refeeding ? e : NULL, tmpa, p->refeeding);
1531
  rte_update_unlock();
1532
}
1533

    
1534
/**
1535
 * rt_feed_baby - advertise routes to a new protocol
1536
 * @p: protocol to be fed
1537
 *
1538
 * This function performs one pass of advertisement of routes to a newly
1539
 * initialized protocol. It's called by the protocol code as long as it
1540
 * has something to do. (We avoid transferring all the routes in single
1541
 * pass in order not to monopolize CPU time.)
1542
 */
1543
int
1544
rt_feed_baby(struct proto *p)
1545
{
1546
  struct announce_hook *h;
1547
  struct fib_iterator *fit;
1548
  int max_feed = 256;
1549

    
1550
  if (!p->feed_ahook)                        /* Need to initialize first */
1551
    {
1552
      if (!p->ahooks)
1553
        return 1;
1554
      DBG("Announcing routes to new protocol %s\n", p->name);
1555
      p->feed_ahook = p->ahooks;
1556
      fit = p->feed_iterator = mb_alloc(p->pool, sizeof(struct fib_iterator));
1557
      goto next_hook;
1558
    }
1559
  fit = p->feed_iterator;
1560

    
1561
again:
1562
  h = p->feed_ahook;
1563
  FIB_ITERATE_START(&h->table->fib, fit, fn)
1564
    {
1565
      net *n = (net *) fn;
1566
      rte *e = n->routes;
1567
      if (max_feed <= 0)
1568
        {
1569
          FIB_ITERATE_PUT(fit, fn);
1570
          return 0;
1571
        }
1572

    
1573
      if ((p->accept_ra_types == RA_OPTIMAL) ||
1574
          (p->accept_ra_types == RA_ACCEPTED))
1575
        if (e)
1576
          {
1577
            if (p->core_state != FS_FEEDING)
1578
              return 1;  /* In the meantime, the protocol fell down. */
1579
            do_feed_baby(p, p->accept_ra_types, h, n, e);
1580
            max_feed--;
1581
          }
1582

    
1583
      if (p->accept_ra_types == RA_ANY)
1584
        for(e = n->routes; e != NULL; e = e->next)
1585
          {
1586
            if (p->core_state != FS_FEEDING)
1587
              return 1;  /* In the meantime, the protocol fell down. */
1588
            do_feed_baby(p, RA_ANY, h, n, e);
1589
            max_feed--;
1590
          }
1591
    }
1592
  FIB_ITERATE_END(fn);
1593
  p->feed_ahook = h->next;
1594
  if (!p->feed_ahook)
1595
    {
1596
      mb_free(p->feed_iterator);
1597
      p->feed_iterator = NULL;
1598
      return 1;
1599
    }
1600

    
1601
next_hook:
1602
  h = p->feed_ahook;
1603
  FIB_ITERATE_INIT(fit, &h->table->fib);
1604
  goto again;
1605
}
1606

    
1607
/**
1608
 * rt_feed_baby_abort - abort protocol feeding
1609
 * @p: protocol
1610
 *
1611
 * This function is called by the protocol code when the protocol
1612
 * stops or ceases to exist before the last iteration of rt_feed_baby()
1613
 * has finished.
1614
 */
1615
void
1616
rt_feed_baby_abort(struct proto *p)
1617
{
1618
  if (p->feed_ahook)
1619
    {
1620
      /* Unlink the iterator and exit */
1621
      fit_get(&p->feed_ahook->table->fib, p->feed_iterator);
1622
      p->feed_ahook = NULL;
1623
    }
1624
}
1625

    
1626

    
1627
static inline unsigned
1628
ptr_hash(void *ptr)
1629
{
1630
  uintptr_t p = (uintptr_t) ptr;
1631
  return p ^ (p << 8) ^ (p >> 16);
1632
}
1633

    
1634
static inline unsigned
1635
hc_hash(ip_addr a, rtable *dep)
1636
{
1637
  return (ipa_hash(a) ^ ptr_hash(dep)) & 0xffff;
1638
}
1639

    
1640
static inline void
1641
hc_insert(struct hostcache *hc, struct hostentry *he)
1642
{
1643
  unsigned int k = he->hash_key >> hc->hash_shift;
1644
  he->next = hc->hash_table[k];
1645
  hc->hash_table[k] = he;
1646
}
1647

    
1648
static inline void
1649
hc_remove(struct hostcache *hc, struct hostentry *he)
1650
{
1651
  struct hostentry **hep;
1652
  unsigned int k = he->hash_key >> hc->hash_shift;
1653

    
1654
  for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
1655
  *hep = he->next;
1656
}
1657

    
1658
#define HC_DEF_ORDER 10
1659
#define HC_HI_MARK *4
1660
#define HC_HI_STEP 2
1661
#define HC_HI_ORDER 16                        /* Must be at most 16 */
1662
#define HC_LO_MARK /5
1663
#define HC_LO_STEP 2
1664
#define HC_LO_ORDER 10
1665

    
1666
static void
1667
hc_alloc_table(struct hostcache *hc, unsigned order)
1668
{
1669
  unsigned hsize = 1 << order;
1670
  hc->hash_order = order;
1671
  hc->hash_shift = 16 - order;
1672
  hc->hash_max = (order >= HC_HI_ORDER) ? ~0 : (hsize HC_HI_MARK);
1673
  hc->hash_min = (order <= HC_LO_ORDER) ?  0 : (hsize HC_LO_MARK);
1674

    
1675
  hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
1676
}
1677

    
1678
static void
1679
hc_resize(struct hostcache *hc, unsigned new_order)
1680
{
1681
  unsigned old_size = 1 << hc->hash_order;
1682
  struct hostentry **old_table = hc->hash_table;
1683
  struct hostentry *he, *hen;
1684
  int i;
1685

    
1686
  hc_alloc_table(hc, new_order);
1687
  for (i = 0; i < old_size; i++)
1688
    for (he = old_table[i]; he != NULL; he=hen)
1689
      {
1690
        hen = he->next;
1691
        hc_insert(hc, he);
1692
      }
1693
  mb_free(old_table);
1694
}
1695

    
1696
static struct hostentry *
1697
hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
1698
{
1699
  struct hostentry *he = sl_alloc(hc->slab);
1700

    
1701
  he->addr = a;
1702
  he->link = ll;
1703
  he->tab = dep;
1704
  he->hash_key = k;
1705
  he->uc = 0;
1706
  he->src = NULL;
1707

    
1708
  add_tail(&hc->hostentries, &he->ln);
1709
  hc_insert(hc, he);
1710

    
1711
  hc->hash_items++;
1712
  if (hc->hash_items > hc->hash_max)
1713
    hc_resize(hc, hc->hash_order + HC_HI_STEP);
1714

    
1715
  return he;
1716
}
1717

    
1718
static void
1719
hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
1720
{
1721
  rta_free(he->src);
1722

    
1723
  rem_node(&he->ln);
1724
  hc_remove(hc, he);
1725
  sl_free(hc->slab, he);
1726

    
1727
  hc->hash_items--;
1728
  if (hc->hash_items < hc->hash_min)
1729
    hc_resize(hc, hc->hash_order - HC_LO_STEP);
1730
}
1731

    
1732
static void
1733
rt_init_hostcache(rtable *tab)
1734
{
1735
  struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
1736
  init_list(&hc->hostentries);
1737

    
1738
  hc->hash_items = 0;
1739
  hc_alloc_table(hc, HC_DEF_ORDER);
1740
  hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
1741

    
1742
  hc->lp = lp_new(rt_table_pool, 1008);
1743
  hc->trie = f_new_trie(hc->lp);
1744

    
1745
  tab->hostcache = hc;
1746
}
1747

    
1748
static void
1749
rt_free_hostcache(rtable *tab)
1750
{
1751
  struct hostcache *hc = tab->hostcache;
1752

    
1753
  node *n;
1754
  WALK_LIST(n, hc->hostentries)
1755
    {
1756
      struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
1757
      rta_free(he->src);
1758

    
1759
      if (he->uc)
1760
        log(L_ERR "Hostcache is not empty in table %s", tab->name);
1761
    }
1762

    
1763
  rfree(hc->slab);
1764
  rfree(hc->lp);
1765
  mb_free(hc->hash_table);
1766
  mb_free(hc);
1767
}
1768

    
1769
static void
1770
rt_notify_hostcache(rtable *tab, net *net)
1771
{
1772
  struct hostcache *hc = tab->hostcache;
1773

    
1774
  if (tab->hcu_scheduled)
1775
    return;
1776

    
1777
  if (trie_match_prefix(hc->trie, net->n.prefix, net->n.pxlen))
1778
    rt_schedule_hcu(tab);
1779
}
1780

    
1781
static int
1782
if_local_addr(ip_addr a, struct iface *i)
1783
{
1784
  struct ifa *b;
1785

    
1786
  WALK_LIST(b, i->addrs)
1787
    if (ipa_equal(a, b->ip))
1788
      return 1;
1789

    
1790
  return 0;
1791
}
1792

    
1793
static u32 
1794
rt_get_igp_metric(rte *rt)
1795
{
1796
  eattr *ea = ea_find(rt->attrs->eattrs, EA_GEN_IGP_METRIC);
1797

    
1798
  if (ea)
1799
    return ea->u.data;
1800

    
1801
  rta *a = rt->attrs;
1802

    
1803
#ifdef CONFIG_OSPF
1804
  if ((a->source == RTS_OSPF) ||
1805
      (a->source == RTS_OSPF_IA) ||
1806
      (a->source == RTS_OSPF_EXT1))
1807
    return rt->u.ospf.metric1;
1808
#endif
1809

    
1810
#ifdef CONFIG_RIP
1811
  if (a->source == RTS_RIP)
1812
    return rt->u.rip.metric;
1813
#endif
1814

    
1815
  /* Device routes */
1816
  if ((a->dest != RTD_ROUTER) && (a->dest != RTD_MULTIPATH))
1817
    return 0;
1818

    
1819
  return IGP_METRIC_UNKNOWN;
1820
}
1821

    
1822
static int
1823
rt_update_hostentry(rtable *tab, struct hostentry *he)
1824
{
1825
  rta *old_src = he->src;
1826
  int pxlen = 0;
1827

    
1828
  /* Reset the hostentry */ 
1829
  he->src = NULL;
1830
  he->gw = IPA_NONE;
1831
  he->dest = RTD_UNREACHABLE;
1832
  he->igp_metric = 0;
1833

    
1834
  net *n = net_route(tab, he->addr, MAX_PREFIX_LENGTH);
1835
  if (n)
1836
    {
1837
      rta *a = n->routes->attrs;
1838
      pxlen = n->n.pxlen;
1839

    
1840
      if (a->hostentry)
1841
        {
1842
          /* Recursive route should not depend on another recursive route */
1843
          log(L_WARN "Next hop address %I resolvable through recursive route for %I/%d",
1844
              he->addr, n->n.prefix, pxlen);
1845
          goto done;
1846
        }
1847

    
1848
      if (a->dest == RTD_DEVICE)
1849
        {
1850
          if (if_local_addr(he->addr, a->iface))
1851
            {
1852
              /* The host address is a local address, this is not valid */
1853
              log(L_WARN "Next hop address %I is a local address of iface %s",
1854
                  he->addr, a->iface->name);
1855
              goto done;
1856
                  }
1857

    
1858
          /* The host is directly reachable, use link as a gateway */
1859
          he->gw = he->link;
1860
          he->dest = RTD_ROUTER;
1861
        }
1862
      else
1863
        {
1864
          /* The host is reachable through some route entry */
1865
          he->gw = a->gw;
1866
          he->dest = a->dest;
1867
        }
1868

    
1869
      he->src = rta_clone(a);
1870
      he->igp_metric = rt_get_igp_metric(n->routes);
1871
    }
1872

    
1873
 done:
1874
  /* Add a prefix range to the trie */
1875
  trie_add_prefix(tab->hostcache->trie, he->addr, MAX_PREFIX_LENGTH, pxlen, MAX_PREFIX_LENGTH);
1876

    
1877
  rta_free(old_src);
1878
  return old_src != he->src;
1879
}
1880

    
1881
static void
1882
rt_update_hostcache(rtable *tab)
1883
{
1884
  struct hostcache *hc = tab->hostcache;
1885
  struct hostentry *he;
1886
  node *n, *x;
1887

    
1888
  /* Reset the trie */
1889
  lp_flush(hc->lp);
1890
  hc->trie = f_new_trie(hc->lp);
1891

    
1892
  WALK_LIST_DELSAFE(n, x, hc->hostentries)
1893
    {
1894
      he = SKIP_BACK(struct hostentry, ln, n);
1895
      if (!he->uc)
1896
        {
1897
          hc_delete_hostentry(hc, he);
1898
          continue;
1899
        }
1900

    
1901
      if (rt_update_hostentry(tab, he))
1902
        rt_schedule_nhu(he->tab);
1903
    }
1904

    
1905
  tab->hcu_scheduled = 0;
1906
}
1907

    
1908
static struct hostentry *
1909
rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
1910
{
1911
  struct hostentry *he;
1912

    
1913
  if (!tab->hostcache)
1914
    rt_init_hostcache(tab);
1915

    
1916
  unsigned int k = hc_hash(a, dep);
1917
  struct hostcache *hc = tab->hostcache;
1918
  for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
1919
    if (ipa_equal(he->addr, a) && (he->tab == dep))
1920
      return he;
1921

    
1922
  he = hc_new_hostentry(hc, a, ll, dep, k);
1923
  rt_update_hostentry(tab, he);
1924
  return he;
1925
}
1926

    
1927
void
1928
rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr *gw, ip_addr *ll)
1929
{
1930
  rta_apply_hostentry(a, rt_get_hostentry(tab, *gw, *ll, dep));
1931
}
1932

    
1933

    
1934
/*
1935
 *  CLI commands
1936
 */
1937

    
1938
static void
1939
rt_format_via(rte *e, byte *via)
1940
{
1941
  rta *a = e->attrs;
1942

    
1943
  switch (a->dest)
1944
    {
1945
    case RTD_ROUTER:        bsprintf(via, "via %I on %s", a->gw, a->iface->name); break;
1946
    case RTD_DEVICE:        bsprintf(via, "dev %s", a->iface->name); break;
1947
    case RTD_BLACKHOLE:        bsprintf(via, "blackhole"); break;
1948
    case RTD_UNREACHABLE:        bsprintf(via, "unreachable"); break;
1949
    case RTD_PROHIBIT:        bsprintf(via, "prohibited"); break;
1950
    case RTD_MULTIPATH:        bsprintf(via, "multipath"); break;
1951
    default:                bsprintf(via, "???");
1952
    }
1953
}
1954

    
1955
static void
1956
rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, ea_list *tmpa)
1957
{
1958
  byte via[STD_ADDRESS_P_LENGTH+32], from[STD_ADDRESS_P_LENGTH+8];
1959
  byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
1960
  rta *a = e->attrs;
1961
  int primary = (e->net->routes == e);
1962
  int sync_error = (e->net->n.flags & KRF_SYNC_ERROR);
1963
  void (*get_route_info)(struct rte *, byte *buf, struct ea_list *attrs);
1964
  struct mpnh *nh;
1965

    
1966
  rt_format_via(e, via);
1967
  tm_format_datetime(tm, &config->tf_route, e->lastmod);
1968
  if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->gw))
1969
    bsprintf(from, " from %I", a->from);
1970
  else
1971
    from[0] = 0;
1972

    
1973
  get_route_info = a->src->proto->proto->get_route_info;
1974
  if (get_route_info || d->verbose)
1975
    {
1976
      /* Need to normalize the extended attributes */
1977
      ea_list *t = tmpa;
1978
      t = ea_append(t, a->eattrs);
1979
      tmpa = alloca(ea_scan(t));
1980
      ea_merge(t, tmpa);
1981
      ea_sort(tmpa);
1982
    }
1983
  if (get_route_info)
1984
    get_route_info(e, info, tmpa);
1985
  else
1986
    bsprintf(info, " (%d)", e->pref);
1987
  cli_printf(c, -1007, "%-18s %s [%s %s%s]%s%s", ia, via, a->src->proto->name,
1988
             tm, from, primary ? (sync_error ? " !" : " *") : "", info);
1989
  for (nh = a->nexthops; nh; nh = nh->next)
1990
    cli_printf(c, -1007, "\tvia %I on %s weight %d", nh->gw, nh->iface->name, nh->weight + 1);
1991
  if (d->verbose)
1992
    rta_show(c, a, tmpa);
1993
}
1994

    
1995
static void
1996
rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
1997
{
1998
  rte *e, *ee;
1999
  byte ia[STD_ADDRESS_P_LENGTH+8];
2000
  struct announce_hook *a;
2001
  int ok;
2002

    
2003
  bsprintf(ia, "%I/%d", n->n.prefix, n->n.pxlen);
2004
  if (n->routes)
2005
    d->net_counter++;
2006
  for(e=n->routes; e; e=e->next)
2007
    {
2008
      struct ea_list *tmpa;
2009
      struct rte_src *src = e->attrs->src;
2010
      struct proto *p1 = d->export_protocol;
2011
      struct proto *p2 = d->show_protocol;
2012
      d->rt_counter++;
2013
      ee = e;
2014
      rte_update_lock();                /* We use the update buffer for filtering */
2015
      tmpa = make_tmp_attrs(e, rte_update_pool);
2016
      ok = (d->filter == FILTER_ACCEPT || f_run(d->filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
2017
      if (p2 && p2 != src->proto) ok = 0;
2018
      if (ok && d->export_mode)
2019
        {
2020
          int ic;
2021
          if ((ic = p1->import_control ? p1->import_control(p1, &e, &tmpa, rte_update_pool) : 0) < 0)
2022
            ok = 0;
2023
          else if (!ic && d->export_mode > 1)
2024
            {
2025
              /* FIXME - this shows what should be exported according
2026
                 to current filters, but not what was really exported.
2027
                 'configure soft' command may change the export filter
2028
                 and do not update routes */
2029

    
2030
              if ((a = proto_find_announce_hook(p1, d->table)) && ((a->out_filter == FILTER_REJECT) ||
2031
                  (a->out_filter && f_run(a->out_filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT)))
2032
                ok = 0;
2033
            }
2034
        }
2035
      if (ok)
2036
        {
2037
          d->show_counter++;
2038
          if (d->stats < 2)
2039
            rt_show_rte(c, ia, e, d, tmpa);
2040
          ia[0] = 0;
2041
        }
2042
      if (e != ee)
2043
      {
2044
        rte_free(e);
2045
        e = ee;
2046
      }
2047
      rte_update_unlock();
2048
      if (d->primary_only)
2049
        break;
2050
    }
2051
}
2052

    
2053
static void
2054
rt_show_cont(struct cli *c)
2055
{
2056
  struct rt_show_data *d = c->rover;
2057
#ifdef DEBUGGING
2058
  unsigned max = 4;
2059
#else
2060
  unsigned max = 64;
2061
#endif
2062
  struct fib *fib = &d->table->fib;
2063
  struct fib_iterator *it = &d->fit;
2064

    
2065
  FIB_ITERATE_START(fib, it, f)
2066
    {
2067
      net *n = (net *) f;
2068
      if (d->running_on_config && d->running_on_config != config)
2069
        {
2070
          cli_printf(c, 8004, "Stopped due to reconfiguration");
2071
          goto done;
2072
        }
2073
      if (d->export_protocol &&
2074
          d->export_protocol->core_state != FS_HAPPY &&
2075
          d->export_protocol->core_state != FS_FEEDING)
2076
        {
2077
          cli_printf(c, 8005, "Protocol is down");
2078
          goto done;
2079
        }
2080
      if (!max--)
2081
        {
2082
          FIB_ITERATE_PUT(it, f);
2083
          return;
2084
        }
2085
      rt_show_net(c, n, d);
2086
    }
2087
  FIB_ITERATE_END(f);
2088
  if (d->stats)
2089
    cli_printf(c, 14, "%d of %d routes for %d networks", d->show_counter, d->rt_counter, d->net_counter);
2090
  else
2091
    cli_printf(c, 0, "");
2092
done:
2093
  c->cont = c->cleanup = NULL;
2094
}
2095

    
2096
static void
2097
rt_show_cleanup(struct cli *c)
2098
{
2099
  struct rt_show_data *d = c->rover;
2100

    
2101
  /* Unlink the iterator */
2102
  fit_get(&d->table->fib, &d->fit);
2103
}
2104

    
2105
void
2106
rt_show(struct rt_show_data *d)
2107
{
2108
  net *n;
2109

    
2110
  if (d->pxlen == 256)
2111
    {
2112
      FIB_ITERATE_INIT(&d->fit, &d->table->fib);
2113
      this_cli->cont = rt_show_cont;
2114
      this_cli->cleanup = rt_show_cleanup;
2115
      this_cli->rover = d;
2116
    }
2117
  else
2118
    {
2119
      if (d->show_for)
2120
        n = net_route(d->table, d->prefix, d->pxlen);
2121
      else
2122
        n = net_find(d->table, d->prefix, d->pxlen);
2123
      if (n)
2124
        {
2125
          rt_show_net(this_cli, n, d);
2126
          cli_msg(0, "");
2127
        }
2128
      else
2129
        cli_msg(8001, "Network not in table");
2130
    }
2131
}
2132

    
2133
/*
2134
 *  Documentation for functions declared inline in route.h
2135
 */
2136
#if 0
2137

2138
/**
2139
 * net_find - find a network entry
2140
 * @tab: a routing table
2141
 * @addr: address of the network
2142
 * @len: length of the network prefix
2143
 *
2144
 * net_find() looks up the given network in routing table @tab and
2145
 * returns a pointer to its &net entry or %NULL if no such network
2146
 * exists.
2147
 */
2148
static inline net *net_find(rtable *tab, ip_addr addr, unsigned len)
2149
{ DUMMY; }
2150

2151
/**
2152
 * net_get - obtain a network entry
2153
 * @tab: a routing table
2154
 * @addr: address of the network
2155
 * @len: length of the network prefix
2156
 *
2157
 * net_get() looks up the given network in routing table @tab and
2158
 * returns a pointer to its &net entry. If no such entry exists, it's
2159
 * created.
2160
 */
2161
static inline net *net_get(rtable *tab, ip_addr addr, unsigned len)
2162
{ DUMMY; }
2163

2164
/**
2165
 * rte_cow - copy a route for writing
2166
 * @r: a route entry to be copied
2167
 *
2168
 * rte_cow() takes a &rte and prepares it for modification. The exact action
2169
 * taken depends on the flags of the &rte -- if it's a temporary entry, it's
2170
 * just returned unchanged, else a new temporary entry with the same contents
2171
 * is created.
2172
 *
2173
 * The primary use of this function is inside the filter machinery -- when
2174
 * a filter wants to modify &rte contents (to change the preference or to
2175
 * attach another set of attributes), it must ensure that the &rte is not
2176
 * shared with anyone else (and especially that it isn't stored in any routing
2177
 * table).
2178
 *
2179
 * Result: a pointer to the new writable &rte.
2180
 */
2181
static inline rte * rte_cow(rte *r)
2182
{ DUMMY; }
2183

2184
#endif