Statistics
| Branch: | Revision:

iof-bird-daemon / nest / rt-table.c @ cf98be7b

History | View | Annotate | Download (53.2 KB)

1
/*
2
 *        BIRD -- Routing Tables
3
 *
4
 *        (c) 1998--2000 Martin Mares <mj@ucw.cz>
5
 *
6
 *        Can be freely distributed and used under the terms of the GNU GPL.
7
 */
8

    
9
/**
10
 * DOC: Routing tables
11
 *
12
 * Routing tables are probably the most important structures BIRD uses. They
13
 * hold all the information about known networks, the associated routes and
14
 * their attributes.
15
 *
16
 * There are multiple routing tables (a primary one together with any
17
 * number of secondary ones if requested by the configuration). Each table
18
 * is basically a FIB containing entries describing the individual
19
 * destination networks. For each network (represented by structure &net),
20
 * there is a one-way linked list of route entries (&rte), the first entry
21
 * on the list being the best one (i.e., the one we currently use
22
 * for routing), the order of the other ones is undetermined.
23
 *
24
 * The &rte contains information specific to the route (preference, protocol
25
 * metrics, time of last modification etc.) and a pointer to a &rta structure
26
 * (see the route attribute module for a precise explanation) holding the
27
 * remaining route attributes which are expected to be shared by multiple
28
 * routes in order to conserve memory.
29
 */
30

    
31
#undef LOCAL_DEBUG
32

    
33
#include "nest/bird.h"
34
#include "nest/route.h"
35
#include "nest/protocol.h"
36
#include "nest/cli.h"
37
#include "nest/iface.h"
38
#include "lib/resource.h"
39
#include "lib/event.h"
40
#include "lib/string.h"
41
#include "conf/conf.h"
42
#include "filter/filter.h"
43
#include "lib/string.h"
44
#include "lib/alloca.h"
45

    
46
pool *rt_table_pool;
47

    
48
static slab *rte_slab;
49
static linpool *rte_update_pool;
50

    
51
static list routing_tables;
52

    
53
static void rt_format_via(rte *e, byte *via);
54
static void rt_free_hostcache(rtable *tab);
55
static void rt_notify_hostcache(rtable *tab, net *net);
56
static void rt_update_hostcache(rtable *tab);
57
static void rt_next_hop_update(rtable *tab);
58

    
59
static inline void rt_schedule_gc(rtable *tab);
60

    
61
/* Like fib_route(), but skips empty net entries */
62
static net *
63
net_route(rtable *tab, ip_addr a, int len)
64
{
65
  ip_addr a0;
66
  net *n;
67

    
68
  while (len >= 0)
69
    {
70
      a0 = ipa_and(a, ipa_mkmask(len));
71
      n = fib_find(&tab->fib, &a0, len);
72
      if (n && rte_is_valid(n->routes))
73
        return n;
74
      len--;
75
    }
76
  return NULL;
77
}
78

    
79
static void
80
rte_init(struct fib_node *N)
81
{
82
  net *n = (net *) N;
83

    
84
  N->flags = 0;
85
  n->routes = NULL;
86
}
87

    
88
/**
89
 * rte_find - find a route
90
 * @net: network node
91
 * @p: protocol
92
 *
93
 * The rte_find() function returns a route for destination @net
94
 * which belongs has been defined by protocol @p.
95
 */
96
rte *
97
rte_find(net *net, struct proto *p)
98
{
99
  rte *e = net->routes;
100

    
101
  while (e && e->attrs->proto != p)
102
    e = e->next;
103
  return e;
104
}
105

    
106
/**
107
 * rte_get_temp - get a temporary &rte
108
 * @a: attributes to assign to the new route (a &rta; in case it's
109
 * un-cached, rte_update() will create a cached copy automatically)
110
 *
111
 * Create a temporary &rte and bind it with the attributes @a.
112
 * Also set route preference to the default preference set for
113
 * the protocol.
114
 */
115
rte *
116
rte_get_temp(rta *a)
117
{
118
  rte *e = sl_alloc(rte_slab);
119

    
120
  e->attrs = a;
121
  e->flags = 0;
122
  e->pref = a->proto->preference;
123
  return e;
124
}
125

    
126
rte *
127
rte_do_cow(rte *r)
128
{
129
  rte *e = sl_alloc(rte_slab);
130

    
131
  memcpy(e, r, sizeof(rte));
132
  e->attrs = rta_clone(r->attrs);
133
  e->flags = 0;
134
  return e;
135
}
136

    
137
static int                                /* Actually better or at least as good as */
138
rte_better(rte *new, rte *old)
139
{
140
  int (*better)(rte *, rte *);
141

    
142
  if (!rte_is_valid(old))
143
    return 1;
144
  if (!rte_is_valid(new))
145
    return 0;
146

    
147
  if (new->pref > old->pref)
148
    return 1;
149
  if (new->pref < old->pref)
150
    return 0;
151
  if (new->attrs->proto->proto != old->attrs->proto->proto)
152
    {
153
      /*
154
       *  If the user has configured protocol preferences, so that two different protocols
155
       *  have the same preference, try to break the tie by comparing addresses. Not too
156
       *  useful, but keeps the ordering of routes unambiguous.
157
       */
158
      return new->attrs->proto->proto > old->attrs->proto->proto;
159
    }
160
  if (better = new->attrs->proto->rte_better)
161
    return better(new, old);
162
  return 0;
163
}
164

    
165
static void
166
rte_trace(struct proto *p, rte *e, int dir, char *msg)
167
{
168
  byte via[STD_ADDRESS_P_LENGTH+32];
169

    
170
  rt_format_via(e, via);
171
  log(L_TRACE "%s %c %s %I/%d %s", p->name, dir, msg, e->net->n.prefix, e->net->n.pxlen, via);
172
}
173

    
174
static inline void
175
rte_trace_in(unsigned int flag, struct proto *p, rte *e, char *msg)
176
{
177
  if (p->debug & flag)
178
    rte_trace(p, e, '>', msg);
179
}
180

    
181
static inline void
182
rte_trace_out(unsigned int flag, struct proto *p, rte *e, char *msg)
183
{
184
  if (p->debug & flag)
185
    rte_trace(p, e, '<', msg);
186
}
187

    
188
static rte *
189
export_filter(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, int silent)
190
{
191
  struct proto *p = ah->proto;
192
  struct filter *filter = ah->out_filter;
193
  struct proto_stats *stats = ah->stats;
194
  ea_list *tmpb = NULL;
195
  rte *rt;
196
  int v;
197

    
198
  rt = rt0;
199
  *rt_free = NULL;
200

    
201
  /* If called does not care for eattrs, we prepare one internally */
202
  if (!tmpa)
203
    {
204
      struct proto *src = rt->attrs->proto;
205
      tmpb = src->make_tmp_attrs ? src->make_tmp_attrs(rt, rte_update_pool) : NULL;
206
      tmpa = &tmpb;
207
    }
208

    
209
  v = p->import_control ? p->import_control(p, &rt, tmpa, rte_update_pool) : 0;
210
  if (v < 0)
211
    {
212
      if (silent)
213
        goto reject;
214

    
215
      stats->exp_updates_rejected++;
216
      rte_trace_out(D_FILTERS, p, rt, "rejected by protocol");
217
      goto reject;
218
    }
219
  if (v > 0)
220
    {
221
      if (!silent)
222
        rte_trace_out(D_FILTERS, p, rt, "forced accept by protocol");
223
      goto accept;
224
    }
225

    
226
  v = filter && ((filter == FILTER_REJECT) ||
227
                 (f_run(filter, &rt, tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT));
228
  if (v)
229
    {
230
      if (silent)
231
        goto reject;
232

    
233
      stats->exp_updates_filtered++;
234
      rte_trace_out(D_FILTERS, p, rt, "filtered out");
235
      goto reject;
236
    }
237

    
238
 accept:
239
  if (rt != rt0)
240
    *rt_free = rt;
241
  return rt;
242

    
243
 reject:
244
  /* Discard temporary rte */
245
  if (rt != rt0)
246
    rte_free(rt);
247
  return NULL;
248
}
249

    
250
static void
251
do_rt_notify(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
252
{
253
  struct proto *p = ah->proto;
254
  struct proto_stats *stats = ah->stats;
255

    
256

    
257
  /*
258
   * First, apply export limit.
259
   *
260
   * Export route limits has several problems. Because exp_routes
261
   * counter is reset before refeed, we don't really know whether
262
   * limit is breached and whether the update is new or not. Therefore
263
   * the number of really exported routes may exceed the limit
264
   * temporarily (routes exported before and new routes in refeed).
265
   *
266
   * Minor advantage is that if the limit is decreased and refeed is
267
   * requested, the number of exported routes really decrease.
268
   *
269
   * Second problem is that with export limits, we don't know whether
270
   * old was really exported (it might be blocked by limit). When a
271
   * withdraw is exported, we announce it even when the previous
272
   * update was blocked. This is not a big issue, but the same problem
273
   * is in updating exp_routes counter. Therefore, to be consistent in
274
   * increases and decreases of exp_routes, we count exported routes
275
   * regardless of blocking by limits.
276
   *
277
   * Similar problem is in handling updates - when a new route is
278
   * received and blocking is active, the route would be blocked, but
279
   * when an update for the route will be received later, the update
280
   * would be propagated (as old != NULL). Therefore, we have to block
281
   * also non-new updates (contrary to import blocking).
282
   */
283

    
284
  struct proto_limit *l = ah->out_limit;
285
  if (l && new)
286
    {
287
      if ((!old || refeed) && (stats->exp_routes >= l->limit))
288
        proto_notify_limit(ah, l, stats->exp_routes);
289

    
290
      if (l->state == PLS_BLOCKED)
291
        {
292
          stats->exp_routes++;        /* see note above */
293
          stats->exp_updates_rejected++;
294
          rte_trace_out(D_FILTERS, p, new, "rejected [limit]");
295
          new = NULL;
296

    
297
          if (!old)
298
            return;
299
        }
300
    }
301

    
302

    
303
  if (new)
304
    stats->exp_updates_accepted++;
305
  else
306
    stats->exp_withdraws_accepted++;
307

    
308
  /* Hack: We do not decrease exp_routes during refeed, we instead
309
     reset exp_routes at the start of refeed. */
310
  if (new)
311
    stats->exp_routes++;
312
  if (old && !refeed)
313
    stats->exp_routes--;
314

    
315
  if (p->debug & D_ROUTES)
316
    {
317
      if (new && old)
318
        rte_trace_out(D_ROUTES, p, new, "replaced");
319
      else if (new)
320
        rte_trace_out(D_ROUTES, p, new, "added");
321
      else if (old)
322
        rte_trace_out(D_ROUTES, p, old, "removed");
323
    }
324
  if (!new)
325
    p->rt_notify(p, ah->table, net, NULL, old, NULL);
326
  else if (tmpa)
327
    {
328
      ea_list *t = tmpa;
329
      while (t->next)
330
        t = t->next;
331
      t->next = new->attrs->eattrs;
332
      p->rt_notify(p, ah->table, net, new, old, tmpa);
333
      t->next = NULL;
334
    }
335
  else
336
    p->rt_notify(p, ah->table, net, new, old, new->attrs->eattrs);
337
}
338

    
339
static void
340
rt_notify_basic(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
341
{
342
  // struct proto *p = ah->proto;
343
  struct proto_stats *stats = ah->stats;
344

    
345
  rte *new_free = NULL;
346
  rte *old_free = NULL;
347

    
348
  if (new)
349
    stats->exp_updates_received++;
350
  else
351
    stats->exp_withdraws_received++;
352

    
353
  /*
354
   * This is a tricky part - we don't know whether route 'old' was
355
   * exported to protocol 'p' or was filtered by the export filter.
356
   * We try to run the export filter to know this to have a correct
357
   * value in 'old' argument of rte_update (and proper filter value)
358
   *
359
   * FIXME - this is broken because 'configure soft' may change
360
   * filters but keep routes. Refeed is expected to be called after
361
   * change of the filters and with old == new, therefore we do not
362
   * even try to run the filter on an old route, This may lead to 
363
   * 'spurious withdraws' but ensure that there are no 'missing
364
   * withdraws'.
365
   *
366
   * This is not completely safe as there is a window between
367
   * reconfiguration and the end of refeed - if a newly filtered
368
   * route disappears during this period, proper withdraw is not
369
   * sent (because old would be also filtered) and the route is
370
   * not refeeded (because it disappeared before that).
371
   */
372

    
373
  if (new)
374
    new = export_filter(ah, new, &new_free, &tmpa, 0);
375

    
376
  if (old && !refeed)
377
    old = export_filter(ah, old, &old_free, NULL, 1);
378

    
379
  /* FIXME - This is broken because of incorrect 'old' value (see above) */
380
  if (!new && !old)
381
    return;
382

    
383
  do_rt_notify(ah, net, new, old, tmpa, refeed);
384

    
385
  /* Discard temporary rte's */
386
  if (new_free)
387
    rte_free(new_free);
388
  if (old_free)
389
    rte_free(old_free);
390
}
391

    
392
static void
393
rt_notify_accepted(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed, rte *before_old,
394
                   ea_list *tmpa, int feed)
395
{
396
  // struct proto *p = ah->proto;
397
  struct proto_stats *stats = ah->stats;
398

    
399
  rte *new_best = NULL;
400
  rte *old_best = NULL;
401
  rte *new_free = NULL;
402
  rte *old_free = NULL;
403
  rte *r;
404

    
405
  /* Used to track whether we met old_changed position. If before_old is NULL
406
     old_changed was the first and we met it implicitly before current best route. */
407
  int old_meet = old_changed && !before_old;
408

    
409
  /* Note that before_old is either NULL or valid (not rejected) route.
410
     If old_changed is valid, before_old have to be too. If old changed route
411
     was not valid, caller must use NULL for both old_changed and before_old. */
412

    
413
  if (new_changed)
414
    stats->exp_updates_received++;
415
  else
416
    stats->exp_withdraws_received++;
417

    
418
  /* First, find the new_best route - first accepted by filters */
419
  for (r=net->routes; rte_is_valid(r); r=r->next)
420
    {
421
      if (new_best = export_filter(ah, r, &new_free, &tmpa, 0))
422
        break;
423

    
424
      /* Note if we walked around the position of old_changed route */
425
      if (r == before_old)
426
        old_meet = 1;
427
    }
428

    
429
  /* 
430
   * Second, handle the feed case. That means we do not care for
431
   * old_best. It is NULL for feed, and the new_best for refeed. 
432
   * For refeed, there is a hack similar to one in rt_notify_basic()
433
   * to ensure withdraws in case of changed filters
434
   */
435
  if (feed)
436
    {
437
      if (feed == 2)        /* refeed */
438
        old_best = new_best ? new_best :
439
          (rte_is_valid(net->routes) ? net->routes : NULL);
440
      else
441
        old_best = NULL;
442

    
443
      if (!new_best && !old_best)
444
        return;
445

    
446
      goto found;
447
    }
448

    
449
  /*
450
   * Now, we find the old_best route. Generally, it is the same as the
451
   * new_best, unless new_best is the same as new_changed or
452
   * old_changed is accepted before new_best.
453
   *
454
   * There are four cases:
455
   *
456
   * - We would find and accept old_changed before new_best, therefore
457
   *   old_changed is old_best. In remaining cases we suppose this
458
   *   is not true.
459
   *
460
   * - We found no new_best, therefore there is also no old_best and
461
   *   we ignore this withdraw.
462
   *
463
   * - We found new_best different than new_changed, therefore
464
   *   old_best is the same as new_best and we ignore this update.
465
   *
466
   * - We found new_best the same as new_changed, therefore it cannot
467
   *   be old_best and we have to continue search for old_best.
468
   */
469

    
470
  /* First case */
471
  if (old_meet)
472
    if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
473
      goto found;
474

    
475
  /* Second case */
476
  if (!new_best)
477
    return;
478

    
479
  /* Third case, we use r instead of new_best, because export_filter() could change it */
480
  if (r != new_changed)
481
    {
482
      if (new_free)
483
        rte_free(new_free);
484
      return;
485
    }
486

    
487
  /* Fourth case */
488
  for (r=r->next; rte_is_valid(r); r=r->next)
489
    {
490
      if (old_best = export_filter(ah, r, &old_free, NULL, 1))
491
        goto found;
492

    
493
      if (r == before_old)
494
        if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
495
          goto found;
496
    }
497

    
498
  /* Implicitly, old_best is NULL and new_best is non-NULL */
499

    
500
 found:
501
  do_rt_notify(ah, net, new_best, old_best, tmpa, (feed == 2));
502

    
503
  /* Discard temporary rte's */
504
  if (new_free)
505
    rte_free(new_free);
506
  if (old_free)
507
    rte_free(old_free);
508
}
509

    
510
/**
511
 * rte_announce - announce a routing table change
512
 * @tab: table the route has been added to
513
 * @type: type of route announcement (RA_OPTIMAL or RA_ANY)
514
 * @net: network in question
515
 * @new: the new route to be announced
516
 * @old: the previous route for the same network
517
 * @tmpa: a list of temporary attributes belonging to the new route
518
 *
519
 * This function gets a routing table update and announces it
520
 * to all protocols that acccepts given type of route announcement
521
 * and are connected to the same table by their announcement hooks.
522
 *
523
 * Route announcement of type RA_OPTIMAL si generated when optimal
524
 * route (in routing table @tab) changes. In that case @old stores the
525
 * old optimal route.
526
 *
527
 * Route announcement of type RA_ANY si generated when any route (in
528
 * routing table @tab) changes In that case @old stores the old route
529
 * from the same protocol.
530
 *
531
 * For each appropriate protocol, we first call its import_control()
532
 * hook which performs basic checks on the route (each protocol has a
533
 * right to veto or force accept of the route before any filter is
534
 * asked) and adds default values of attributes specific to the new
535
 * protocol (metrics, tags etc.).  Then it consults the protocol's
536
 * export filter and if it accepts the route, the rt_notify() hook of
537
 * the protocol gets called.
538
 */
539
static void
540
rte_announce(rtable *tab, unsigned type, net *net, rte *new, rte *old, rte *before_old, ea_list *tmpa)
541
{
542
  if (!rte_is_valid(old))
543
    old = before_old = NULL;
544

    
545
  if (!rte_is_valid(new))
546
    new = NULL;
547

    
548
  if (!old && !new)
549
    return;
550

    
551
  if (type == RA_OPTIMAL)
552
    {
553
      if (new)
554
        new->attrs->proto->stats.pref_routes++;
555
      if (old)
556
        old->attrs->proto->stats.pref_routes--;
557

    
558
      if (tab->hostcache)
559
        rt_notify_hostcache(tab, net);
560
    }
561

    
562
  struct announce_hook *a;
563
  WALK_LIST(a, tab->hooks)
564
    {
565
      ASSERT(a->proto->core_state == FS_HAPPY || a->proto->core_state == FS_FEEDING);
566
      if (a->proto->accept_ra_types == type)
567
        if (type == RA_ACCEPTED)
568
          rt_notify_accepted(a, net, new, old, before_old, tmpa, 0);
569
        else
570
          rt_notify_basic(a, net, new, old, tmpa, 0);
571
    }
572
}
573

    
574
static inline int
575
rte_validate(rte *e)
576
{
577
  int c;
578
  net *n = e->net;
579

    
580
  if ((n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
581
    {
582
      log(L_WARN "Ignoring bogus prefix %I/%d received via %s",
583
          n->n.prefix, n->n.pxlen, e->sender->proto->name);
584
      return 0;
585
    }
586

    
587
  c = ipa_classify_net(n->n.prefix);
588
  if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
589
    {
590
      log(L_WARN "Ignoring bogus route %I/%d received via %s",
591
          n->n.prefix, n->n.pxlen, e->sender->proto->name);
592
      return 0;
593
    }
594

    
595
  return 1;
596
}
597

    
598
/**
599
 * rte_free - delete a &rte
600
 * @e: &rte to be deleted
601
 *
602
 * rte_free() deletes the given &rte from the routing table it's linked to.
603
 */
604
void
605
rte_free(rte *e)
606
{
607
  if (e->attrs->aflags & RTAF_CACHED)
608
    rta_free(e->attrs);
609
  sl_free(rte_slab, e);
610
}
611

    
612
static inline void
613
rte_free_quick(rte *e)
614
{
615
  rta_free(e->attrs);
616
  sl_free(rte_slab, e);
617
}
618

    
619
static int
620
rte_same(rte *x, rte *y)
621
{
622
  return
623
    x->attrs == y->attrs &&
624
    x->flags == y->flags &&
625
    x->pflags == y->pflags &&
626
    x->pref == y->pref &&
627
    (!x->attrs->proto->rte_same || x->attrs->proto->rte_same(x, y));
628
}
629

    
630
static void
631
rte_recalculate(struct announce_hook *ah, net *net, rte *new, ea_list *tmpa, struct proto *src)
632
{
633
  struct proto *p = ah->proto;
634
  struct rtable *table = ah->table;
635
  struct proto_stats *stats = ah->stats;
636
  rte *before_old = NULL;
637
  rte *old_best = net->routes;
638
  rte *old = NULL;
639
  rte **k;
640

    
641
  k = &net->routes;                        /* Find and remove original route from the same protocol */
642
  while (old = *k)
643
    {
644
      if (old->attrs->proto == src)
645
        {
646
          /* If there is the same route in the routing table but from
647
           * a different sender, then there are two paths from the
648
           * source protocol to this routing table through transparent
649
           * pipes, which is not allowed.
650
           *
651
           * We log that and ignore the route. If it is withdraw, we
652
           * ignore it completely (there might be 'spurious withdraws',
653
           * see FIXME in do_rte_announce())
654
           */
655
          if (old->sender->proto != p)
656
            {
657
              if (new)
658
                {
659
                  log(L_ERR "Pipe collision detected when sending %I/%d to table %s",
660
                      net->n.prefix, net->n.pxlen, table->name);
661
                  rte_free_quick(new);
662
                }
663
              return;
664
            }
665

    
666
          if (new && rte_same(old, new))
667
            {
668
              /* No changes, ignore the new route */
669

    
670
              if (!rte_is_rejected(new))
671
                {
672
                  stats->imp_updates_ignored++;
673
                  rte_trace_in(D_ROUTES, p, new, "ignored");
674
                }
675

    
676
              rte_free_quick(new);
677
#ifdef CONFIG_RIP
678
              /* lastmod is used internally by RIP as the last time
679
                 when the route was received. */
680
              if (src->proto == &proto_rip)
681
                old->lastmod = now;
682
#endif
683
              return;
684
            }
685
          *k = old->next;
686
          break;
687
        }
688
      k = &old->next;
689
      before_old = old;
690
    }
691

    
692
  if (!old)
693
    before_old = NULL;
694

    
695
  if (!old && !new)
696
    {
697
      stats->imp_withdraws_ignored++;
698
      return;
699
    }
700

    
701
  struct proto_limit *l = ah->in_limit;
702
  if (l && !old && new)
703
    {
704
      u32 all_routes = stats->imp_routes + stats->rej_routes;
705

    
706
      if (all_routes >= l->limit)
707
        proto_notify_limit(ah, l, all_routes);
708

    
709
      if (l->state == PLS_BLOCKED)
710
        {
711
          stats->imp_updates_ignored++;
712
          rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
713
          rte_free_quick(new);
714
          return;
715
        }
716
    }
717

    
718
  if (new && !rte_is_rejected(new))
719
    stats->imp_updates_accepted++;
720
  else
721
    stats->imp_withdraws_accepted++;
722

    
723
  if (new)
724
    rte_is_rejected(new) ? stats->rej_routes++ : stats->imp_routes++;
725
  if (old)
726
    rte_is_rejected(old) ? stats->rej_routes-- : stats->imp_routes--;
727

    
728
  if (table->config->sorted)
729
    {
730
      /* If routes are sorted, just insert new route to appropriate position */
731
      if (new)
732
        {
733
          if (before_old && !rte_better(new, before_old))
734
            k = &before_old->next;
735
          else
736
            k = &net->routes;
737

    
738
          for (; *k; k=&(*k)->next)
739
            if (rte_better(new, *k))
740
              break;
741

    
742
          new->next = *k;
743
          *k = new;
744
        }
745
    }
746
  else
747
    {
748
      /* If routes are not sorted, find the best route and move it on
749
         the first position. There are several optimized cases. */
750

    
751
      if (src->rte_recalculate && src->rte_recalculate(table, net, new, old, old_best))
752
        goto do_recalculate;
753

    
754
      if (new && rte_better(new, old_best))
755
        {
756
          /* The first case - the new route is cleary optimal,
757
             we link it at the first position */
758

    
759
          new->next = net->routes;
760
          net->routes = new;
761
        }
762
      else if (old == old_best)
763
        {
764
          /* The second case - the old best route disappeared, we add the
765
             new route (if we have any) to the list (we don't care about
766
             position) and then we elect the new optimal route and relink
767
             that route at the first position and announce it. New optimal
768
             route might be NULL if there is no more routes */
769

    
770
        do_recalculate:
771
          /* Add the new route to the list */
772
          if (new)
773
            {
774
              new->next = net->routes;
775
              net->routes = new;
776
            }
777

    
778
          /* Find a new optimal route (if there is any) */
779
          if (net->routes)
780
            {
781
              rte **bp = &net->routes;
782
              for (k=&(*bp)->next; *k; k=&(*k)->next)
783
                if (rte_better(*k, *bp))
784
                  bp = k;
785

    
786
              /* And relink it */
787
              rte *best = *bp;
788
              *bp = best->next;
789
              best->next = net->routes;
790
              net->routes = best;
791
            }
792
        }
793
      else if (new)
794
        {
795
          /* The third case - the new route is not better than the old
796
             best route (therefore old_best != NULL) and the old best
797
             route was not removed (therefore old_best == net->routes).
798
             We just link the new route after the old best route. */
799

    
800
          ASSERT(net->routes != NULL);
801
          new->next = net->routes->next;
802
          net->routes->next = new;
803
        }
804
      /* The fourth (empty) case - suboptimal route was removed, nothing to do */
805
    }
806

    
807
  if (new)
808
    new->lastmod = now;
809

    
810
  /* Log the route change */
811
  if (new)
812
    rte_trace_in(D_ROUTES, p, new, net->routes == new ? "added [best]" : "added");
813

    
814
  if (!new && (p->debug & D_ROUTES))
815
    {
816
      if (old != old_best)
817
        rte_trace_in(D_ROUTES, p, old, "removed");
818
      else if (net->routes)
819
        rte_trace_in(D_ROUTES, p, old, "removed [replaced]");
820
      else
821
        rte_trace_in(D_ROUTES, p, old, "removed [sole]");
822
    }
823

    
824
  /* Propagate the route change */
825
  rte_announce(table, RA_ANY, net, new, old, NULL, tmpa);
826
  if (net->routes != old_best)
827
    rte_announce(table, RA_OPTIMAL, net, net->routes, old_best, NULL, tmpa);
828
  if (table->config->sorted)
829
    rte_announce(table, RA_ACCEPTED, net, new, old, before_old, tmpa);
830

    
831
  if (!net->routes &&
832
      (table->gc_counter++ >= table->config->gc_max_ops) &&
833
      (table->gc_time + table->config->gc_min_time <= now))
834
    rt_schedule_gc(table);
835

    
836
  if (old)
837
    {
838
      if (p->rte_remove)
839
        p->rte_remove(net, old);
840
      rte_free_quick(old);
841
    }
842
  if (new)
843
    {
844
      if (p->rte_insert)
845
        p->rte_insert(net, new);
846
    }
847
}
848

    
849
static int rte_update_nest_cnt;                /* Nesting counter to allow recursive updates */
850

    
851
static inline void
852
rte_update_lock(void)
853
{
854
  rte_update_nest_cnt++;
855
}
856

    
857
static inline void
858
rte_update_unlock(void)
859
{
860
  if (!--rte_update_nest_cnt)
861
    lp_flush(rte_update_pool);
862
}
863

    
864
/**
865
 * rte_update - enter a new update to a routing table
866
 * @table: table to be updated
867
 * @ah: pointer to table announce hook
868
 * @net: network node
869
 * @p: protocol submitting the update
870
 * @src: protocol originating the update
871
 * @new: a &rte representing the new route or %NULL for route removal.
872
 *
873
 * This function is called by the routing protocols whenever they discover
874
 * a new route or wish to update/remove an existing route. The right announcement
875
 * sequence is to build route attributes first (either un-cached with @aflags set
876
 * to zero or a cached one using rta_lookup(); in this case please note that
877
 * you need to increase the use count of the attributes yourself by calling
878
 * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
879
 * the appropriate data and finally submit the new &rte by calling rte_update().
880
 *
881
 * @src specifies the protocol that originally created the route and the meaning
882
 * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
883
 * same value as @new->attrs->proto. @p specifies the protocol that called
884
 * rte_update(). In most cases it is the same protocol as @src. rte_update()
885
 * stores @p in @new->sender;
886
 *
887
 * When rte_update() gets any route, it automatically validates it (checks,
888
 * whether the network and next hop address are valid IP addresses and also
889
 * whether a normal routing protocol doesn't try to smuggle a host or link
890
 * scope route to the table), converts all protocol dependent attributes stored
891
 * in the &rte to temporary extended attributes, consults import filters of the
892
 * protocol to see if the route should be accepted and/or its attributes modified,
893
 * stores the temporary attributes back to the &rte.
894
 *
895
 * Now, having a "public" version of the route, we
896
 * automatically find any old route defined by the protocol @src
897
 * for network @n, replace it by the new one (or removing it if @new is %NULL),
898
 * recalculate the optimal route for this destination and finally broadcast
899
 * the change (if any) to all routing protocols by calling rte_announce().
900
 *
901
 * All memory used for attribute lists and other temporary allocations is taken
902
 * from a special linear pool @rte_update_pool and freed when rte_update()
903
 * finishes.
904
 */
905

    
906
void
907
rte_update2(struct announce_hook *ah, net *net, rte *new, struct proto *src)
908
{
909
  struct proto *p = ah->proto;
910
  struct proto_stats *stats = ah->stats;
911
  struct filter *filter = ah->in_filter;
912
  ea_list *tmpa = NULL;
913

    
914
  rte_update_lock();
915
  if (new)
916
    {
917
      new->sender = ah;
918

    
919
      stats->imp_updates_received++;
920
      if (!rte_validate(new))
921
        {
922
          rte_trace_in(D_FILTERS, p, new, "invalid");
923
          stats->imp_updates_invalid++;
924
          goto drop;
925
        }
926

    
927
      if (filter == FILTER_REJECT)
928
        {
929
          stats->imp_updates_filtered++;
930
          rte_trace_in(D_FILTERS, p, new, "filtered out");
931

    
932
          if (! ah->in_keep_rejected)
933
            goto drop;
934

    
935
          /* new is a private copy, i could modify it */
936
          new->flags |= REF_REJECTED;
937
        }
938
      else
939
        {
940
          if (src->make_tmp_attrs)
941
            tmpa = src->make_tmp_attrs(new, rte_update_pool);
942
          if (filter && (filter != FILTER_REJECT))
943
            {
944
              ea_list *old_tmpa = tmpa;
945
              int fr = f_run(filter, &new, &tmpa, rte_update_pool, 0);
946
              if (fr > F_ACCEPT)
947
                {
948
                  stats->imp_updates_filtered++;
949
                  rte_trace_in(D_FILTERS, p, new, "filtered out");
950

    
951
                  if (! ah->in_keep_rejected)
952
                    goto drop;
953

    
954
                  new->flags |= REF_REJECTED;
955
                }
956
              if (tmpa != old_tmpa && src->store_tmp_attrs)
957
                src->store_tmp_attrs(new, tmpa);
958
            }
959
        }
960

    
961
      if (!(new->attrs->aflags & RTAF_CACHED)) /* Need to copy attributes */
962
        new->attrs = rta_lookup(new->attrs);
963
      new->flags |= REF_COW;
964
    }
965
  else
966
    stats->imp_withdraws_received++;
967

    
968
  rte_recalculate(ah, net, new, tmpa, src);
969
  rte_update_unlock();
970
  return;
971

    
972
drop:
973
  rte_free(new);
974
  rte_recalculate(ah, net, NULL, NULL, src);
975
  rte_update_unlock();
976
}
977

    
978
/* Independent call to rte_announce(), used from next hop
979
   recalculation, outside of rte_update(). new must be non-NULL */
980
static inline void 
981
rte_announce_i(rtable *tab, unsigned type, net *n, rte *new, rte *old)
982
{
983
  struct proto *src;
984
  ea_list *tmpa;
985

    
986
  rte_update_lock();
987
  src = new->attrs->proto;
988
  tmpa = src->make_tmp_attrs ? src->make_tmp_attrs(new, rte_update_pool) : NULL;
989
  rte_announce(tab, type, n, new, old, NULL, tmpa);
990
  rte_update_unlock();
991
}
992

    
993
void
994
rte_discard(rtable *t, rte *old)        /* Non-filtered route deletion, used during garbage collection */
995
{
996
  rte_update_lock();
997
  rte_recalculate(old->sender, old->net, NULL, NULL, old->attrs->proto);
998
  rte_update_unlock();
999
}
1000

    
1001
/**
1002
 * rte_dump - dump a route
1003
 * @e: &rte to be dumped
1004
 *
1005
 * This functions dumps contents of a &rte to debug output.
1006
 */
1007
void
1008
rte_dump(rte *e)
1009
{
1010
  net *n = e->net;
1011
  debug("%-1I/%2d ", n->n.prefix, n->n.pxlen);
1012
  debug("KF=%02x PF=%02x pref=%d lm=%d ", n->n.flags, e->pflags, e->pref, now-e->lastmod);
1013
  rta_dump(e->attrs);
1014
  if (e->attrs->proto->proto->dump_attrs)
1015
    e->attrs->proto->proto->dump_attrs(e);
1016
  debug("\n");
1017
}
1018

    
1019
/**
1020
 * rt_dump - dump a routing table
1021
 * @t: routing table to be dumped
1022
 *
1023
 * This function dumps contents of a given routing table to debug output.
1024
 */
1025
void
1026
rt_dump(rtable *t)
1027
{
1028
  rte *e;
1029
  net *n;
1030
  struct announce_hook *a;
1031

    
1032
  debug("Dump of routing table <%s>\n", t->name);
1033
#ifdef DEBUGGING
1034
  fib_check(&t->fib);
1035
#endif
1036
  FIB_WALK(&t->fib, fn)
1037
    {
1038
      n = (net *) fn;
1039
      for(e=n->routes; e; e=e->next)
1040
        rte_dump(e);
1041
    }
1042
  FIB_WALK_END;
1043
  WALK_LIST(a, t->hooks)
1044
    debug("\tAnnounces routes to protocol %s\n", a->proto->name);
1045
  debug("\n");
1046
}
1047

    
1048
/**
1049
 * rt_dump_all - dump all routing tables
1050
 *
1051
 * This function dumps contents of all routing tables to debug output.
1052
 */
1053
void
1054
rt_dump_all(void)
1055
{
1056
  rtable *t;
1057

    
1058
  WALK_LIST(t, routing_tables)
1059
    rt_dump(t);
1060
}
1061

    
1062
static inline void
1063
rt_schedule_gc(rtable *tab)
1064
{
1065
  if (tab->gc_scheduled)
1066
    return;
1067

    
1068
  tab->gc_scheduled = 1;
1069
  ev_schedule(tab->rt_event);
1070
}
1071

    
1072
static inline void
1073
rt_schedule_hcu(rtable *tab)
1074
{
1075
  if (tab->hcu_scheduled)
1076
    return;
1077

    
1078
  tab->hcu_scheduled = 1;
1079
  ev_schedule(tab->rt_event);
1080
}
1081

    
1082
static inline void
1083
rt_schedule_nhu(rtable *tab)
1084
{
1085
  if (tab->nhu_state == 0)
1086
    ev_schedule(tab->rt_event);
1087

    
1088
  /* state change 0->1, 2->3 */
1089
  tab->nhu_state |= 1;
1090
}
1091

    
1092
static void
1093
rt_prune_nets(rtable *tab)
1094
{
1095
  struct fib_iterator fit;
1096
  int ncnt = 0, ndel = 0;
1097

    
1098
#ifdef DEBUGGING
1099
  fib_check(&tab->fib);
1100
#endif
1101

    
1102
  FIB_ITERATE_INIT(&fit, &tab->fib);
1103
again:
1104
  FIB_ITERATE_START(&tab->fib, &fit, f)
1105
    {
1106
      net *n = (net *) f;
1107
      ncnt++;
1108
      if (!n->routes)                /* Orphaned FIB entry */
1109
        {
1110
          FIB_ITERATE_PUT(&fit, f);
1111
          fib_delete(&tab->fib, f);
1112
          ndel++;
1113
          goto again;
1114
        }
1115
    }
1116
  FIB_ITERATE_END(f);
1117
  DBG("Pruned %d of %d networks\n", ndel, ncnt);
1118

    
1119
  tab->gc_counter = 0;
1120
  tab->gc_time = now;
1121
  tab->gc_scheduled = 0;
1122
}
1123

    
1124
static void
1125
rt_event(void *ptr)
1126
{
1127
  rtable *tab = ptr;
1128

    
1129
  if (tab->hcu_scheduled)
1130
    rt_update_hostcache(tab);
1131

    
1132
  if (tab->nhu_state)
1133
    rt_next_hop_update(tab);
1134

    
1135
  if (tab->gc_scheduled)
1136
    rt_prune_nets(tab);
1137
}
1138

    
1139
void
1140
rt_setup(pool *p, rtable *t, char *name, struct rtable_config *cf)
1141
{
1142
  bzero(t, sizeof(*t));
1143
  fib_init(&t->fib, p, sizeof(net), 0, rte_init);
1144
  t->name = name;
1145
  t->config = cf;
1146
  init_list(&t->hooks);
1147
  if (cf)
1148
    {
1149
      t->rt_event = ev_new(p);
1150
      t->rt_event->hook = rt_event;
1151
      t->rt_event->data = t;
1152
      t->gc_time = now;
1153
    }
1154
}
1155

    
1156
/**
1157
 * rt_init - initialize routing tables
1158
 *
1159
 * This function is called during BIRD startup. It initializes the
1160
 * routing table module.
1161
 */
1162
void
1163
rt_init(void)
1164
{
1165
  rta_init();
1166
  rt_table_pool = rp_new(&root_pool, "Routing tables");
1167
  rte_update_pool = lp_new(rt_table_pool, 4080);
1168
  rte_slab = sl_new(rt_table_pool, sizeof(rte));
1169
  init_list(&routing_tables);
1170
}
1171

    
1172

    
1173
/* Called from proto_schedule_flush_loop() only,
1174
   ensuring that all prune states are zero */
1175
void
1176
rt_schedule_prune_all(void)
1177
{
1178
  rtable *t;
1179

    
1180
  WALK_LIST(t, routing_tables)
1181
    t->prune_state = 1;
1182
}
1183

    
1184
static inline int
1185
rt_prune_step(rtable *tab, int *max_feed)
1186
{
1187
  struct fib_iterator *fit = &tab->prune_fit;
1188

    
1189
  DBG("Pruning route table %s\n", tab->name);
1190
#ifdef DEBUGGING
1191
  fib_check(&tab->fib);
1192
#endif
1193

    
1194
  if (tab->prune_state == 0)
1195
    return 1;
1196

    
1197
  if (tab->prune_state == 1)
1198
    {
1199
      FIB_ITERATE_INIT(fit, &tab->fib);
1200
      tab->prune_state = 2;
1201
    }
1202

    
1203
again:
1204
  FIB_ITERATE_START(&tab->fib, fit, fn)
1205
    {
1206
      net *n = (net *) fn;
1207
      rte *e;
1208

    
1209
    rescan:
1210
      for (e=n->routes; e; e=e->next)
1211
        if (e->sender->proto->core_state != FS_HAPPY &&
1212
            e->sender->proto->core_state != FS_FEEDING)
1213
          {
1214
            if (*max_feed <= 0)
1215
              {
1216
                FIB_ITERATE_PUT(fit, fn);
1217
                return 0;
1218
              }
1219

    
1220
            rte_discard(tab, e);
1221
            (*max_feed)--;
1222

    
1223
            goto rescan;
1224
          }
1225
      if (!n->routes)                /* Orphaned FIB entry */
1226
        {
1227
          FIB_ITERATE_PUT(fit, fn);
1228
          fib_delete(&tab->fib, fn);
1229
          goto again;
1230
        }
1231
    }
1232
  FIB_ITERATE_END(fn);
1233

    
1234
#ifdef DEBUGGING
1235
  fib_check(&tab->fib);
1236
#endif
1237

    
1238
  tab->prune_state = 0;
1239
  return 1;
1240
}
1241

    
1242
/**
1243
 * rt_prune_loop - prune routing tables
1244
 * @tab: routing table to be pruned
1245
 *
1246
 * The prune loop scans routing tables and removes routes belonging to
1247
 * inactive protocols and also stale network entries. Returns 1 when
1248
 * all such routes are pruned. It is a part of the protocol flushing
1249
 * loop.
1250
 */
1251
int
1252
rt_prune_loop(void)
1253
{
1254
  rtable *t;
1255
  int max_feed = 512;
1256

    
1257
  WALK_LIST(t, routing_tables)
1258
    if (! rt_prune_step(t, &max_feed))
1259
      return 0;
1260

    
1261
  return 1;
1262
}
1263

    
1264
void
1265
rt_preconfig(struct config *c)
1266
{
1267
  struct symbol *s = cf_find_symbol("master");
1268

    
1269
  init_list(&c->tables);
1270
  c->master_rtc = rt_new_table(s);
1271
}
1272

    
1273

    
1274
/* 
1275
 * Some functions for handing internal next hop updates
1276
 * triggered by rt_schedule_nhu().
1277
 */
1278

    
1279
static inline int
1280
rta_next_hop_outdated(rta *a)
1281
{
1282
  struct hostentry *he = a->hostentry;
1283

    
1284
  if (!he)
1285
    return 0;
1286

    
1287
  if (!he->src)
1288
    return a->dest != RTD_UNREACHABLE;
1289

    
1290
  return (a->iface != he->src->iface) || !ipa_equal(a->gw, he->gw) ||
1291
    (a->dest != he->dest) || (a->igp_metric != he->igp_metric) ||
1292
    !mpnh_same(a->nexthops, he->src->nexthops);
1293
}
1294

    
1295
static inline void
1296
rta_apply_hostentry(rta *a, struct hostentry *he)
1297
{
1298
  a->hostentry = he;
1299
  a->iface = he->src ? he->src->iface : NULL;
1300
  a->gw = he->gw;
1301
  a->dest = he->dest;
1302
  a->igp_metric = he->igp_metric;
1303
  a->nexthops = he->src ? he->src->nexthops : NULL;
1304
}
1305

    
1306
static inline rte *
1307
rt_next_hop_update_rte(rtable *tab, rte *old)
1308
{
1309
  rta a;
1310
  memcpy(&a, old->attrs, sizeof(rta));
1311
  rta_apply_hostentry(&a, old->attrs->hostentry);
1312
  a.aflags = 0;
1313

    
1314
  rte *e = sl_alloc(rte_slab);
1315
  memcpy(e, old, sizeof(rte));
1316
  e->attrs = rta_lookup(&a);
1317

    
1318
  return e;
1319
}
1320

    
1321
static inline int
1322
rt_next_hop_update_net(rtable *tab, net *n)
1323
{
1324
  rte **k, *e, *new, *old_best, **new_best;
1325
  int count = 0;
1326
  int free_old_best = 0;
1327

    
1328
  old_best = n->routes;
1329
  if (!old_best)
1330
    return 0;
1331

    
1332
  for (k = &n->routes; e = *k; k = &e->next)
1333
    if (rta_next_hop_outdated(e->attrs))
1334
      {
1335
        new = rt_next_hop_update_rte(tab, e);
1336
        *k = new;
1337

    
1338
        rte_announce_i(tab, RA_ANY, n, new, e);
1339
        rte_trace_in(D_ROUTES, new->sender->proto, new, "updated");
1340

    
1341
        /* Call a pre-comparison hook */
1342
        /* Not really an efficient way to compute this */
1343
        if (e->attrs->proto->rte_recalculate)
1344
          e->attrs->proto->rte_recalculate(tab, n, new, e, NULL);
1345

    
1346
        if (e != old_best)
1347
          rte_free_quick(e);
1348
        else /* Freeing of the old best rte is postponed */
1349
          free_old_best = 1;
1350

    
1351
        e = new;
1352
        count++;
1353
      }
1354

    
1355
  if (!count)
1356
    return 0;
1357

    
1358
  /* Find the new best route */
1359
  new_best = NULL;
1360
  for (k = &n->routes; e = *k; k = &e->next)
1361
    {
1362
      if (!new_best || rte_better(e, *new_best))
1363
        new_best = k;
1364
    }
1365

    
1366
  /* Relink the new best route to the first position */
1367
  new = *new_best;
1368
  if (new != n->routes)
1369
    {
1370
      *new_best = new->next;
1371
      new->next = n->routes;
1372
      n->routes = new;
1373
    }
1374

    
1375
  /* Announce the new best route */
1376
  if (new != old_best)
1377
    {
1378
      rte_announce_i(tab, RA_OPTIMAL, n, new, old_best);
1379
      rte_trace_in(D_ROUTES, new->sender->proto, new, "updated [best]");
1380
    }
1381

    
1382
   if (free_old_best)
1383
    rte_free_quick(old_best);
1384

    
1385
  return count;
1386
}
1387

    
1388
static void
1389
rt_next_hop_update(rtable *tab)
1390
{
1391
  struct fib_iterator *fit = &tab->nhu_fit;
1392
  int max_feed = 32;
1393

    
1394
  if (tab->nhu_state == 0)
1395
    return;
1396

    
1397
  if (tab->nhu_state == 1)
1398
    {
1399
      FIB_ITERATE_INIT(fit, &tab->fib);
1400
      tab->nhu_state = 2;
1401
    }
1402

    
1403
  FIB_ITERATE_START(&tab->fib, fit, fn)
1404
    {
1405
      if (max_feed <= 0)
1406
        {
1407
          FIB_ITERATE_PUT(fit, fn);
1408
          ev_schedule(tab->rt_event);
1409
          return;
1410
        }
1411
      max_feed -= rt_next_hop_update_net(tab, (net *) fn);
1412
    }
1413
  FIB_ITERATE_END(fn);
1414

    
1415
  /* state change 2->0, 3->1 */
1416
  tab->nhu_state &= 1;
1417

    
1418
  if (tab->nhu_state > 0)
1419
    ev_schedule(tab->rt_event);
1420
}
1421

    
1422

    
1423
struct rtable_config *
1424
rt_new_table(struct symbol *s)
1425
{
1426
  /* Hack that allows to 'redefine' the master table */
1427
  if ((s->class == SYM_TABLE) && (s->def == new_config->master_rtc))
1428
    return s->def;
1429

    
1430
  struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
1431

    
1432
  cf_define_symbol(s, SYM_TABLE, c);
1433
  c->name = s->name;
1434
  add_tail(&new_config->tables, &c->n);
1435
  c->gc_max_ops = 1000;
1436
  c->gc_min_time = 5;
1437
  return c;
1438
}
1439

    
1440
/**
1441
 * rt_lock_table - lock a routing table
1442
 * @r: routing table to be locked
1443
 *
1444
 * Lock a routing table, because it's in use by a protocol,
1445
 * preventing it from being freed when it gets undefined in a new
1446
 * configuration.
1447
 */
1448
void
1449
rt_lock_table(rtable *r)
1450
{
1451
  r->use_count++;
1452
}
1453

    
1454
/**
1455
 * rt_unlock_table - unlock a routing table
1456
 * @r: routing table to be unlocked
1457
 *
1458
 * Unlock a routing table formerly locked by rt_lock_table(),
1459
 * that is decrease its use count and delete it if it's scheduled
1460
 * for deletion by configuration changes.
1461
 */
1462
void
1463
rt_unlock_table(rtable *r)
1464
{
1465
  if (!--r->use_count && r->deleted)
1466
    {
1467
      struct config *conf = r->deleted;
1468
      DBG("Deleting routing table %s\n", r->name);
1469
      if (r->hostcache)
1470
        rt_free_hostcache(r);
1471
      rem_node(&r->n);
1472
      fib_free(&r->fib);
1473
      rfree(r->rt_event);
1474
      mb_free(r);
1475
      config_del_obstacle(conf);
1476
    }
1477
}
1478

    
1479
/**
1480
 * rt_commit - commit new routing table configuration
1481
 * @new: new configuration
1482
 * @old: original configuration or %NULL if it's boot time config
1483
 *
1484
 * Scan differences between @old and @new configuration and modify
1485
 * the routing tables according to these changes. If @new defines a
1486
 * previously unknown table, create it, if it omits a table existing
1487
 * in @old, schedule it for deletion (it gets deleted when all protocols
1488
 * disconnect from it by calling rt_unlock_table()), if it exists
1489
 * in both configurations, leave it unchanged.
1490
 */
1491
void
1492
rt_commit(struct config *new, struct config *old)
1493
{
1494
  struct rtable_config *o, *r;
1495

    
1496
  DBG("rt_commit:\n");
1497
  if (old)
1498
    {
1499
      WALK_LIST(o, old->tables)
1500
        {
1501
          rtable *ot = o->table;
1502
          if (!ot->deleted)
1503
            {
1504
              struct symbol *sym = cf_find_symbol(o->name);
1505
              if (sym && sym->class == SYM_TABLE && !new->shutdown)
1506
                {
1507
                  DBG("\t%s: same\n", o->name);
1508
                  r = sym->def;
1509
                  r->table = ot;
1510
                  ot->name = r->name;
1511
                  ot->config = r;
1512
                  if (o->sorted != r->sorted)
1513
                    log(L_WARN "Reconfiguration of rtable sorted flag not implemented");
1514
                }
1515
              else
1516
                {
1517
                  DBG("\t%s: deleted\n", o->name);
1518
                  ot->deleted = old;
1519
                  config_add_obstacle(old);
1520
                  rt_lock_table(ot);
1521
                  rt_unlock_table(ot);
1522
                }
1523
            }
1524
        }
1525
    }
1526

    
1527
  WALK_LIST(r, new->tables)
1528
    if (!r->table)
1529
      {
1530
        rtable *t = mb_alloc(rt_table_pool, sizeof(struct rtable));
1531
        DBG("\t%s: created\n", r->name);
1532
        rt_setup(rt_table_pool, t, r->name, r);
1533
        add_tail(&routing_tables, &t->n);
1534
        r->table = t;
1535
      }
1536
  DBG("\tdone\n");
1537
}
1538

    
1539
static inline void
1540
do_feed_baby(struct proto *p, int type, struct announce_hook *h, net *n, rte *e)
1541
{
1542
  struct proto *src = e->attrs->proto;
1543
  ea_list *tmpa;
1544

    
1545
  rte_update_lock();
1546
  tmpa = src->make_tmp_attrs ? src->make_tmp_attrs(e, rte_update_pool) : NULL;
1547
  if (type == RA_ACCEPTED)
1548
    rt_notify_accepted(h, n, e, NULL, NULL, tmpa, p->refeeding ? 2 : 1);
1549
  else
1550
    rt_notify_basic(h, n, e, p->refeeding ? e : NULL, tmpa, p->refeeding);
1551
  rte_update_unlock();
1552
}
1553

    
1554
/**
1555
 * rt_feed_baby - advertise routes to a new protocol
1556
 * @p: protocol to be fed
1557
 *
1558
 * This function performs one pass of advertisement of routes to a newly
1559
 * initialized protocol. It's called by the protocol code as long as it
1560
 * has something to do. (We avoid transferring all the routes in single
1561
 * pass in order not to monopolize CPU time.)
1562
 */
1563
int
1564
rt_feed_baby(struct proto *p)
1565
{
1566
  struct announce_hook *h;
1567
  struct fib_iterator *fit;
1568
  int max_feed = 256;
1569

    
1570
  if (!p->feed_ahook)                        /* Need to initialize first */
1571
    {
1572
      if (!p->ahooks)
1573
        return 1;
1574
      DBG("Announcing routes to new protocol %s\n", p->name);
1575
      p->feed_ahook = p->ahooks;
1576
      fit = p->feed_iterator = mb_alloc(p->pool, sizeof(struct fib_iterator));
1577
      goto next_hook;
1578
    }
1579
  fit = p->feed_iterator;
1580

    
1581
again:
1582
  h = p->feed_ahook;
1583
  FIB_ITERATE_START(&h->table->fib, fit, fn)
1584
    {
1585
      net *n = (net *) fn;
1586
      rte *e = n->routes;
1587
      if (max_feed <= 0)
1588
        {
1589
          FIB_ITERATE_PUT(fit, fn);
1590
          return 0;
1591
        }
1592

    
1593
      /* XXXX perhaps we should change feed for RA_ACCEPTED to not use 'new' */
1594

    
1595
      if ((p->accept_ra_types == RA_OPTIMAL) ||
1596
          (p->accept_ra_types == RA_ACCEPTED))
1597
        if (rte_is_valid(e))
1598
          {
1599
            if (p->core_state != FS_FEEDING)
1600
              return 1;  /* In the meantime, the protocol fell down. */
1601
            do_feed_baby(p, p->accept_ra_types, h, n, e);
1602
            max_feed--;
1603
          }
1604

    
1605
      if (p->accept_ra_types == RA_ANY)
1606
        for(e = n->routes; rte_is_valid(e); e = e->next)
1607
          {
1608
            if (p->core_state != FS_FEEDING)
1609
              return 1;  /* In the meantime, the protocol fell down. */
1610
            do_feed_baby(p, RA_ANY, h, n, e);
1611
            max_feed--;
1612
          }
1613
    }
1614
  FIB_ITERATE_END(fn);
1615
  p->feed_ahook = h->next;
1616
  if (!p->feed_ahook)
1617
    {
1618
      mb_free(p->feed_iterator);
1619
      p->feed_iterator = NULL;
1620
      return 1;
1621
    }
1622

    
1623
next_hook:
1624
  h = p->feed_ahook;
1625
  FIB_ITERATE_INIT(fit, &h->table->fib);
1626
  goto again;
1627
}
1628

    
1629
/**
1630
 * rt_feed_baby_abort - abort protocol feeding
1631
 * @p: protocol
1632
 *
1633
 * This function is called by the protocol code when the protocol
1634
 * stops or ceases to exist before the last iteration of rt_feed_baby()
1635
 * has finished.
1636
 */
1637
void
1638
rt_feed_baby_abort(struct proto *p)
1639
{
1640
  if (p->feed_ahook)
1641
    {
1642
      /* Unlink the iterator and exit */
1643
      fit_get(&p->feed_ahook->table->fib, p->feed_iterator);
1644
      p->feed_ahook = NULL;
1645
    }
1646
}
1647

    
1648

    
1649
static inline unsigned
1650
ptr_hash(void *ptr)
1651
{
1652
  uintptr_t p = (uintptr_t) ptr;
1653
  return p ^ (p << 8) ^ (p >> 16);
1654
}
1655

    
1656
static inline unsigned
1657
hc_hash(ip_addr a, rtable *dep)
1658
{
1659
  return (ipa_hash(a) ^ ptr_hash(dep)) & 0xffff;
1660
}
1661

    
1662
static inline void
1663
hc_insert(struct hostcache *hc, struct hostentry *he)
1664
{
1665
  unsigned int k = he->hash_key >> hc->hash_shift;
1666
  he->next = hc->hash_table[k];
1667
  hc->hash_table[k] = he;
1668
}
1669

    
1670
static inline void
1671
hc_remove(struct hostcache *hc, struct hostentry *he)
1672
{
1673
  struct hostentry **hep;
1674
  unsigned int k = he->hash_key >> hc->hash_shift;
1675

    
1676
  for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
1677
  *hep = he->next;
1678
}
1679

    
1680
#define HC_DEF_ORDER 10
1681
#define HC_HI_MARK *4
1682
#define HC_HI_STEP 2
1683
#define HC_HI_ORDER 16                        /* Must be at most 16 */
1684
#define HC_LO_MARK /5
1685
#define HC_LO_STEP 2
1686
#define HC_LO_ORDER 10
1687

    
1688
static void
1689
hc_alloc_table(struct hostcache *hc, unsigned order)
1690
{
1691
  unsigned hsize = 1 << order;
1692
  hc->hash_order = order;
1693
  hc->hash_shift = 16 - order;
1694
  hc->hash_max = (order >= HC_HI_ORDER) ? ~0 : (hsize HC_HI_MARK);
1695
  hc->hash_min = (order <= HC_LO_ORDER) ?  0 : (hsize HC_LO_MARK);
1696

    
1697
  hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
1698
}
1699

    
1700
static void
1701
hc_resize(struct hostcache *hc, unsigned new_order)
1702
{
1703
  unsigned old_size = 1 << hc->hash_order;
1704
  struct hostentry **old_table = hc->hash_table;
1705
  struct hostentry *he, *hen;
1706
  int i;
1707

    
1708
  hc_alloc_table(hc, new_order);
1709
  for (i = 0; i < old_size; i++)
1710
    for (he = old_table[i]; he != NULL; he=hen)
1711
      {
1712
        hen = he->next;
1713
        hc_insert(hc, he);
1714
      }
1715
  mb_free(old_table);
1716
}
1717

    
1718
static struct hostentry *
1719
hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
1720
{
1721
  struct hostentry *he = sl_alloc(hc->slab);
1722

    
1723
  he->addr = a;
1724
  he->link = ll;
1725
  he->tab = dep;
1726
  he->hash_key = k;
1727
  he->uc = 0;
1728
  he->src = NULL;
1729

    
1730
  add_tail(&hc->hostentries, &he->ln);
1731
  hc_insert(hc, he);
1732

    
1733
  hc->hash_items++;
1734
  if (hc->hash_items > hc->hash_max)
1735
    hc_resize(hc, hc->hash_order + HC_HI_STEP);
1736

    
1737
  return he;
1738
}
1739

    
1740
static void
1741
hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
1742
{
1743
  rta_free(he->src);
1744

    
1745
  rem_node(&he->ln);
1746
  hc_remove(hc, he);
1747
  sl_free(hc->slab, he);
1748

    
1749
  hc->hash_items--;
1750
  if (hc->hash_items < hc->hash_min)
1751
    hc_resize(hc, hc->hash_order - HC_LO_STEP);
1752
}
1753

    
1754
static void
1755
rt_init_hostcache(rtable *tab)
1756
{
1757
  struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
1758
  init_list(&hc->hostentries);
1759

    
1760
  hc->hash_items = 0;
1761
  hc_alloc_table(hc, HC_DEF_ORDER);
1762
  hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
1763

    
1764
  hc->lp = lp_new(rt_table_pool, 1008);
1765
  hc->trie = f_new_trie(hc->lp);
1766

    
1767
  tab->hostcache = hc;
1768
}
1769

    
1770
static void
1771
rt_free_hostcache(rtable *tab)
1772
{
1773
  struct hostcache *hc = tab->hostcache;
1774

    
1775
  node *n;
1776
  WALK_LIST(n, hc->hostentries)
1777
    {
1778
      struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
1779
      rta_free(he->src);
1780

    
1781
      if (he->uc)
1782
        log(L_ERR "Hostcache is not empty in table %s", tab->name);
1783
    }
1784

    
1785
  rfree(hc->slab);
1786
  rfree(hc->lp);
1787
  mb_free(hc->hash_table);
1788
  mb_free(hc);
1789
}
1790

    
1791
static void
1792
rt_notify_hostcache(rtable *tab, net *net)
1793
{
1794
  struct hostcache *hc = tab->hostcache;
1795

    
1796
  if (tab->hcu_scheduled)
1797
    return;
1798

    
1799
  if (trie_match_prefix(hc->trie, net->n.prefix, net->n.pxlen))
1800
    rt_schedule_hcu(tab);
1801
}
1802

    
1803
static int
1804
if_local_addr(ip_addr a, struct iface *i)
1805
{
1806
  struct ifa *b;
1807

    
1808
  WALK_LIST(b, i->addrs)
1809
    if (ipa_equal(a, b->ip))
1810
      return 1;
1811

    
1812
  return 0;
1813
}
1814

    
1815
static u32 
1816
rt_get_igp_metric(rte *rt)
1817
{
1818
  eattr *ea = ea_find(rt->attrs->eattrs, EA_GEN_IGP_METRIC);
1819

    
1820
  if (ea)
1821
    return ea->u.data;
1822

    
1823
  rta *a = rt->attrs;
1824

    
1825
#ifdef CONFIG_OSPF
1826
  if ((a->source == RTS_OSPF) ||
1827
      (a->source == RTS_OSPF_IA) ||
1828
      (a->source == RTS_OSPF_EXT1))
1829
    return rt->u.ospf.metric1;
1830
#endif
1831

    
1832
#ifdef CONFIG_RIP
1833
  if (a->source == RTS_RIP)
1834
    return rt->u.rip.metric;
1835
#endif
1836

    
1837
  /* Device routes */
1838
  if ((a->dest != RTD_ROUTER) && (a->dest != RTD_MULTIPATH))
1839
    return 0;
1840

    
1841
  return IGP_METRIC_UNKNOWN;
1842
}
1843

    
1844
static int
1845
rt_update_hostentry(rtable *tab, struct hostentry *he)
1846
{
1847
  rta *old_src = he->src;
1848
  int pxlen = 0;
1849

    
1850
  /* Reset the hostentry */ 
1851
  he->src = NULL;
1852
  he->gw = IPA_NONE;
1853
  he->dest = RTD_UNREACHABLE;
1854
  he->igp_metric = 0;
1855

    
1856
  net *n = net_route(tab, he->addr, MAX_PREFIX_LENGTH);
1857
  if (n)
1858
    {
1859
      rte *e = n->routes;
1860
      rta *a = e->attrs;
1861
      pxlen = n->n.pxlen;
1862

    
1863
      if (a->hostentry)
1864
        {
1865
          /* Recursive route should not depend on another recursive route */
1866
          log(L_WARN "Next hop address %I resolvable through recursive route for %I/%d",
1867
              he->addr, n->n.prefix, pxlen);
1868
          goto done;
1869
        }
1870

    
1871
      if (a->dest == RTD_DEVICE)
1872
        {
1873
          if (if_local_addr(he->addr, a->iface))
1874
            {
1875
              /* The host address is a local address, this is not valid */
1876
              log(L_WARN "Next hop address %I is a local address of iface %s",
1877
                  he->addr, a->iface->name);
1878
              goto done;
1879
                  }
1880

    
1881
          /* The host is directly reachable, use link as a gateway */
1882
          he->gw = he->link;
1883
          he->dest = RTD_ROUTER;
1884
        }
1885
      else
1886
        {
1887
          /* The host is reachable through some route entry */
1888
          he->gw = a->gw;
1889
          he->dest = a->dest;
1890
        }
1891

    
1892
      he->src = rta_clone(a);
1893
      he->igp_metric = rt_get_igp_metric(e);
1894
    }
1895

    
1896
 done:
1897
  /* Add a prefix range to the trie */
1898
  trie_add_prefix(tab->hostcache->trie, he->addr, MAX_PREFIX_LENGTH, pxlen, MAX_PREFIX_LENGTH);
1899

    
1900
  rta_free(old_src);
1901
  return old_src != he->src;
1902
}
1903

    
1904
static void
1905
rt_update_hostcache(rtable *tab)
1906
{
1907
  struct hostcache *hc = tab->hostcache;
1908
  struct hostentry *he;
1909
  node *n, *x;
1910

    
1911
  /* Reset the trie */
1912
  lp_flush(hc->lp);
1913
  hc->trie = f_new_trie(hc->lp);
1914

    
1915
  WALK_LIST_DELSAFE(n, x, hc->hostentries)
1916
    {
1917
      he = SKIP_BACK(struct hostentry, ln, n);
1918
      if (!he->uc)
1919
        {
1920
          hc_delete_hostentry(hc, he);
1921
          continue;
1922
        }
1923

    
1924
      if (rt_update_hostentry(tab, he))
1925
        rt_schedule_nhu(he->tab);
1926
    }
1927

    
1928
  tab->hcu_scheduled = 0;
1929
}
1930

    
1931
static struct hostentry *
1932
rt_find_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
1933
{
1934
  struct hostentry *he;
1935

    
1936
  if (!tab->hostcache)
1937
    rt_init_hostcache(tab);
1938

    
1939
  unsigned int k = hc_hash(a, dep);
1940
  struct hostcache *hc = tab->hostcache;
1941
  for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
1942
    if (ipa_equal(he->addr, a) && (he->tab == dep))
1943
      return he;
1944

    
1945
  he = hc_new_hostentry(hc, a, ll, dep, k);
1946
  rt_update_hostentry(tab, he);
1947
  return he;
1948
}
1949

    
1950
void
1951
rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr *gw, ip_addr *ll)
1952
{
1953
  rta_apply_hostentry(a, rt_find_hostentry(tab, *gw, *ll, dep));
1954
}
1955

    
1956
/*
1957
 *  CLI commands
1958
 */
1959

    
1960
static void
1961
rt_format_via(rte *e, byte *via)
1962
{
1963
  rta *a = e->attrs;
1964

    
1965
  switch (a->dest)
1966
    {
1967
    case RTD_ROUTER:        bsprintf(via, "via %I on %s", a->gw, a->iface->name); break;
1968
    case RTD_DEVICE:        bsprintf(via, "dev %s", a->iface->name); break;
1969
    case RTD_BLACKHOLE:        bsprintf(via, "blackhole"); break;
1970
    case RTD_UNREACHABLE:        bsprintf(via, "unreachable"); break;
1971
    case RTD_PROHIBIT:        bsprintf(via, "prohibited"); break;
1972
    case RTD_MULTIPATH:        bsprintf(via, "multipath"); break;
1973
    default:                bsprintf(via, "???");
1974
    }
1975
}
1976

    
1977
static void
1978
rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, ea_list *tmpa)
1979
{
1980
  byte via[STD_ADDRESS_P_LENGTH+32], from[STD_ADDRESS_P_LENGTH+8];
1981
  byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
1982
  rta *a = e->attrs;
1983
  int primary = (e->net->routes == e);
1984
  int sync_error = (e->net->n.flags & KRF_SYNC_ERROR);
1985
  struct mpnh *nh;
1986

    
1987
  rt_format_via(e, via);
1988
  tm_format_datetime(tm, &config->tf_route, e->lastmod);
1989
  if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->gw))
1990
    bsprintf(from, " from %I", a->from);
1991
  else
1992
    from[0] = 0;
1993
  if (a->proto->proto->get_route_info || d->verbose)
1994
    {
1995
      /* Need to normalize the extended attributes */
1996
      ea_list *t = tmpa;
1997
      t = ea_append(t, a->eattrs);
1998
      tmpa = alloca(ea_scan(t));
1999
      ea_merge(t, tmpa);
2000
      ea_sort(tmpa);
2001
    }
2002
  if (a->proto->proto->get_route_info)
2003
    a->proto->proto->get_route_info(e, info, tmpa);
2004
  else
2005
    bsprintf(info, " (%d)", e->pref);
2006
  cli_printf(c, -1007, "%-18s %s [%s %s%s]%s%s", ia, via, a->proto->name,
2007
             tm, from, primary ? (sync_error ? " !" : " *") : "", info);
2008
  for (nh = a->nexthops; nh; nh = nh->next)
2009
    cli_printf(c, -1007, "\tvia %I on %s weight %d", nh->gw, nh->iface->name, nh->weight + 1);
2010
  if (d->verbose)
2011
    rta_show(c, a, tmpa);
2012
}
2013

    
2014
static void
2015
rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
2016
{
2017
  rte *e, *ee;
2018
  byte ia[STD_ADDRESS_P_LENGTH+8];
2019
  struct announce_hook *a;
2020
  int ok;
2021

    
2022
  bsprintf(ia, "%I/%d", n->n.prefix, n->n.pxlen);
2023

    
2024
  for(e=n->routes; e; e=e->next)
2025
    {
2026
      if (rte_is_rejected(e) != d->rejected)
2027
        continue;
2028

    
2029
      struct ea_list *tmpa;
2030
      struct proto *p0 = e->attrs->proto;
2031
      struct proto *p1 = d->export_protocol;
2032
      struct proto *p2 = d->show_protocol;
2033

    
2034
      if (ia[0])
2035
        d->net_counter++;
2036
      d->rt_counter++;
2037
      ee = e;
2038
      rte_update_lock();                /* We use the update buffer for filtering */
2039
      tmpa = p0->make_tmp_attrs ? p0->make_tmp_attrs(e, rte_update_pool) : NULL;
2040
      ok = (d->filter == FILTER_ACCEPT || f_run(d->filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
2041
      if (p2 && p2 != p0) ok = 0;
2042
      if (ok && d->export_mode)
2043
        {
2044
          int ic;
2045
          if ((ic = p1->import_control ? p1->import_control(p1, &e, &tmpa, rte_update_pool) : 0) < 0)
2046
            ok = 0;
2047
          else if (!ic && d->export_mode > 1)
2048
            {
2049
              /* FIXME - this shows what should be exported according
2050
                 to current filters, but not what was really exported.
2051
                 'configure soft' command may change the export filter
2052
                 and do not update routes */
2053

    
2054
              if ((a = proto_find_announce_hook(p1, d->table)) && ((a->out_filter == FILTER_REJECT) ||
2055
                  (a->out_filter && f_run(a->out_filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT)))
2056
                ok = 0;
2057
            }
2058
        }
2059
      if (ok)
2060
        {
2061
          d->show_counter++;
2062
          if (d->stats < 2)
2063
            rt_show_rte(c, ia, e, d, tmpa);
2064
          ia[0] = 0;
2065
        }
2066
      if (e != ee)
2067
      {
2068
        rte_free(e);
2069
        e = ee;
2070
      }
2071
      rte_update_unlock();
2072
      if (d->primary_only)
2073
        break;
2074
    }
2075
}
2076

    
2077
static void
2078
rt_show_cont(struct cli *c)
2079
{
2080
  struct rt_show_data *d = c->rover;
2081
#ifdef DEBUGGING
2082
  unsigned max = 4;
2083
#else
2084
  unsigned max = 64;
2085
#endif
2086
  struct fib *fib = &d->table->fib;
2087
  struct fib_iterator *it = &d->fit;
2088

    
2089
  FIB_ITERATE_START(fib, it, f)
2090
    {
2091
      net *n = (net *) f;
2092
      if (d->running_on_config && d->running_on_config != config)
2093
        {
2094
          cli_printf(c, 8004, "Stopped due to reconfiguration");
2095
          goto done;
2096
        }
2097
      if (d->export_protocol &&
2098
          d->export_protocol->core_state != FS_HAPPY &&
2099
          d->export_protocol->core_state != FS_FEEDING)
2100
        {
2101
          cli_printf(c, 8005, "Protocol is down");
2102
          goto done;
2103
        }
2104
      if (!max--)
2105
        {
2106
          FIB_ITERATE_PUT(it, f);
2107
          return;
2108
        }
2109
      rt_show_net(c, n, d);
2110
    }
2111
  FIB_ITERATE_END(f);
2112
  if (d->stats)
2113
    cli_printf(c, 14, "%d of %d routes for %d networks", d->show_counter, d->rt_counter, d->net_counter);
2114
  else
2115
    cli_printf(c, 0, "");
2116
done:
2117
  c->cont = c->cleanup = NULL;
2118
}
2119

    
2120
static void
2121
rt_show_cleanup(struct cli *c)
2122
{
2123
  struct rt_show_data *d = c->rover;
2124

    
2125
  /* Unlink the iterator */
2126
  fit_get(&d->table->fib, &d->fit);
2127
}
2128

    
2129
void
2130
rt_show(struct rt_show_data *d)
2131
{
2132
  net *n;
2133

    
2134
  if (d->pxlen == 256)
2135
    {
2136
      FIB_ITERATE_INIT(&d->fit, &d->table->fib);
2137
      this_cli->cont = rt_show_cont;
2138
      this_cli->cleanup = rt_show_cleanup;
2139
      this_cli->rover = d;
2140
    }
2141
  else
2142
    {
2143
      if (d->show_for)
2144
        n = net_route(d->table, d->prefix, d->pxlen);
2145
      else
2146
        n = net_find(d->table, d->prefix, d->pxlen);
2147
      if (n)
2148
        {
2149
          rt_show_net(this_cli, n, d);
2150
          cli_msg(0, "");
2151
        }
2152
      else
2153
        cli_msg(8001, "Network not in table");
2154
    }
2155
}
2156

    
2157
/*
2158
 *  Documentation for functions declared inline in route.h
2159
 */
2160
#if 0
2161

2162
/**
2163
 * net_find - find a network entry
2164
 * @tab: a routing table
2165
 * @addr: address of the network
2166
 * @len: length of the network prefix
2167
 *
2168
 * net_find() looks up the given network in routing table @tab and
2169
 * returns a pointer to its &net entry or %NULL if no such network
2170
 * exists.
2171
 */
2172
static inline net *net_find(rtable *tab, ip_addr addr, unsigned len)
2173
{ DUMMY; }
2174

2175
/**
2176
 * net_get - obtain a network entry
2177
 * @tab: a routing table
2178
 * @addr: address of the network
2179
 * @len: length of the network prefix
2180
 *
2181
 * net_get() looks up the given network in routing table @tab and
2182
 * returns a pointer to its &net entry. If no such entry exists, it's
2183
 * created.
2184
 */
2185
static inline net *net_get(rtable *tab, ip_addr addr, unsigned len)
2186
{ DUMMY; }
2187

2188
/**
2189
 * rte_cow - copy a route for writing
2190
 * @r: a route entry to be copied
2191
 *
2192
 * rte_cow() takes a &rte and prepares it for modification. The exact action
2193
 * taken depends on the flags of the &rte -- if it's a temporary entry, it's
2194
 * just returned unchanged, else a new temporary entry with the same contents
2195
 * is created.
2196
 *
2197
 * The primary use of this function is inside the filter machinery -- when
2198
 * a filter wants to modify &rte contents (to change the preference or to
2199
 * attach another set of attributes), it must ensure that the &rte is not
2200
 * shared with anyone else (and especially that it isn't stored in any routing
2201
 * table).
2202
 *
2203
 * Result: a pointer to the new writable &rte.
2204
 */
2205
static inline rte * rte_cow(rte *r)
2206
{ DUMMY; }
2207

2208
#endif