Revision 51270ce4

View differences:

INSTALL
3 3

  
4 4
- This is not (yet) a 'release'; things may be broken.
5 5

  
6
- Mininet is not currently 'installed.' If you want to install it,
7
  so that you can 'import mininet', place mininet.py somewhere in your
8
  python path.
6
- To install mininet, with root privileges:
7

  
8
  python setup.py install
9

  
10
  This places the mininet package in /usr/lib/python-2.5/site-packages/,
11
  so that 'import mininet' will work.
9 12
  
10 13
- A functional netns binary is required to run mininet, but currently you
11 14
  have to compile it and install it yourself from the included .c file:
cleanup
1
#!/usr/bin/python
2

  
3
"""
4
Unfortunately, Mininet and OpenFlow don't always clean up
5
properly after themselves. Until they do (or until cleanup
6
functionality is integrated into the python code), this
7
script may be used to get rid of unwanted garbage. It may
8
also get rid of 'false positives', but hopefully nothing
9
irreplaceable!
10
"""
11

  
12
from subprocess import Popen, PIPE
13
import re
14

  
15
from mininet import quietRun
16

  
17
def sh( cmd ): 
18
   "Print a command and send it to the shell"
19
   print cmd
20
   return Popen( [ '/bin/sh', '-c', cmd ], 
21
      stdout=PIPE ).communicate()[ 0 ]
22

  
23
def cleanUpScreens():
24
   "Remove moldy old screen sessions."      
25
   r = r'(\d+.[hsc]\d+)'
26
   output = sh( 'screen -ls' ).split( '\n' )
27
   for line in output:
28
      m = re.search( r, line )
29
      if m is not None:
30
         quietRun( 'screen -S ' + m.group( 1 ) + ' -X kill' )
31
 
32
def cleanup():
33
   """Clean up junk which might be left over from old runs;
34
      do fast stuff before slow dp and link removal!"""
35
      
36
   print "*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes"
37
   zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core '
38
   zombies += 'udpbwtest'
39
   # Note: real zombie processes can't actually be killed, since they 
40
   # are already (un)dead. Then again,
41
   # you can't connect to them either, so they're mostly harmless.
42
   sh( 'killall -9 ' + zombies + ' 2> /dev/null' )
43

  
44
   print "*** Removing junk from /tmp"
45
   sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' )
46

  
47
   print "*** Removing old screen sessions"
48
   cleanUpScreens()
49

  
50
   print "*** Removing excess kernel datapaths"
51
   dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).split( '\n')
52
   for dp in dps: 
53
      if dp != '': sh( 'dpctl deldp ' + dp )
54
      
55
   print "*** Removing all links of the pattern foo-ethX"
56
   links = sh( "ip link show | egrep -o '(\w+-eth\w+)'" ).split( '\n' )
57
   for link in links: 
58
      if link != '': sh( "ip link del " + link )
59

  
60
   print "*** Cleanup complete."
61

  
62
if __name__ == "__main__":
63
   cleanup()
examples/cli.py
2 2

  
3 3
"Create a tree network and run the CLI on it."
4 4

  
5
from mininet import init, TreeNet, Cli
5
from mininet.mininet import init, TreeNet, Cli
6 6

  
7 7
if __name__ == '__main__':
8 8
   init()
examples/grid.py
2 2

  
3 3
"Instantiate a Grid network and use NOX as the controller."
4 4

  
5
from mininet import init, Controller, GridNet, Cli
5
from mininet.mininet import init, Controller, GridNet, Cli
6 6

  
7 7
class NoxController( Controller ):
8 8
   def __init__( self, name, **kwargs ):
examples/linearbandwidth.py
15 15
your controller to support 100 switches (or more.)
16 16
"""
17 17
   
18
from mininet import init, Network, defaultNames, Host, Switch
19
from mininet import createLink, flush, iperf, pingTestVerbose, Cli
18
from mininet.mininet import init, Network, defaultNames, Host, Switch
19
from mininet.mininet import createLink, flush, iperf, pingTestVerbose, Cli
20 20

  
21 21
class LinearNet( Network ):
22 22
   def __init__( self, switchCount, **kwargs ):
examples/multitest.py
2 2

  
3 3
"Run multiple tests on a network."
4 4
   
5
from mininet import init, TreeNet, pingTestVerbose, iperfTest, Cli
5
from mininet.mininet import init, TreeNet, pingTestVerbose, iperfTest, Cli
6 6

  
7 7
if __name__ == '__main__':
8 8
   init()
examples/nox.py
2 2

  
3 3
"Instantiate a Tree network and use NOX as the controller."
4 4

  
5
from mininet import init, Controller, TreeNet, Cli
5
from mininet.mininet import init, Controller, TreeNet, Cli
6 6

  
7 7
class NoxController( Controller ):
8 8
   def __init__( self, name, **kwargs ):
examples/ripcordtest.py
5 5
import ripcord
6 6
from ripcord.topo import FatTreeTopo
7 7

  
8
from mininet import init, Controller, Network, Host, nameGen, Cli
9
from mininet import createLink, flush
8
from mininet.mininet import init, Controller, Network, Host, nameGen, Cli
9
from mininet.mininet import createLink, flush
10 10

  
11 11
class NoxController( Controller ):
12
   def __init__( self, name, kernel=False **kwargs ):
12
   def __init__( self, name, kernel=False, **kwargs ):
13 13
      Controller.__init__( self, name, kernel=kernel,
14 14
         controller='nox_core', 
15
         cargs='-v --libdir=/usr/local/lib -i ptcp: routing', 
15
         cargs='-v --libdir=/usr/local/lib -i ptcp:', 
16 16
         cdir='/usr/local/bin', **kwargs)
17 17
   
18 18
class FatTree( Network ):
examples/scratchnet.py
6 6
but it exposes the configuration details and allows customization.
7 7
"""
8 8

  
9
from mininet import init, Node, createLink
9
from mininet.mininet import init, Node, createLink
10 10

  
11 11
def scratchNet( cname='controller', cargs='ptcp:'):
12 12
   # Create Network
examples/scratchnetuser.py
8 8
This version uses the user datapath.
9 9
"""
10 10

  
11
from mininet import init, Node, createLink
11
from mininet.mininet import init, Node, createLink
12 12

  
13 13
def scratchNetUser( cname='controller', cargs='ptcp:'):
14 14
   # Create Network
examples/sshd.py
11 11
"""
12 12

  
13 13
import sys ; readline = sys.stdin.readline
14
from mininet import init, Node, createLink, TreeNet, Cli
14
from mininet.mininet import init, Node, createLink, TreeNet, Cli
15 15

  
16 16
def nets( hosts ):
17 17
   "Return list of networks (/24) for hosts."
examples/tree1024.py
7 7
and running sysctl -p.
8 8
"""
9 9
   
10
from mininet import init, TreeNet, Cli
10
from mininet.mininet import init, TreeNet, Cli
11 11

  
12 12
if __name__ == '__main__':
13 13
   init()
examples/treeping64.py
2 2

  
3 3
"Create a 64-node tree network, and test connectivity using ping."
4 4
   
5
from mininet import init, TreeNet, pingTestVerbose
5
from mininet.mininet import init, TreeNet, pingTestVerbose
6 6

  
7 7
def treePing64():
8 8
   results = {}
examples/xterms.py
7 7

  
8 8
import os, re
9 9
from subprocess import Popen
10
from mininet import init, TreeNet, quietRun
10
from mininet.mininet import init, TreeNet, quietRun
11 11

  
12 12
def makeXterm( node, title ):
13 13
   "Run screen on a node, and hook up an xterm."
mininet.py
1
#!/usr/bin/python
2

  
3
"""
4
Mininet: A simple networking testbed for OpenFlow!
5

  
6
Mininet creates scalable OpenFlow test networks by using
7
process-based virtualization and network namespaces. 
8

  
9
Simulated hosts are created as processes in separate network
10
namespaces. This allows a complete OpenFlow network to be simulated on
11
top of a single Linux kernel.
12

  
13
Each host has:
14
   A virtual console (pipes to a shell)
15
   A virtual interfaces (half of a veth pair)
16
   A parent shell (and possibly some child processes) in a namespace
17
   
18
Hosts have a network interface which is configured via ifconfig/ip
19
link/etc. with data network IP addresses (e.g. 192.168.123.2 )
20

  
21
This version supports both the kernel and user space datapaths
22
from the OpenFlow reference implementation.
23

  
24
In kernel datapath mode, the controller and switches are simply
25
processes in the root namespace.
26

  
27
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
28
attached to the one side of a veth pair; the other side resides in the
29
host namespace. In this mode, switch processes can simply connect to the
30
controller via the loopback interface.
31

  
32
In user datapath mode, the controller and switches are full-service
33
nodes that live in their own network namespaces and have management
34
interfaces and IP addresses on a control network (e.g. 10.0.123.1,
35
currently routed although it could be bridged.)
36

  
37
In addition to a management interface, user mode switches also have
38
several switch interfaces, halves of veth pairs whose other halves
39
reside in the host nodes that the switches are connected to.
40

  
41
Naming:
42
   Host nodes are named h1-hN
43
   Switch nodes are named s0-sN
44
   Interfaces are named {nodename}-eth0 .. {nodename}-ethN,
45

  
46
Thoughts/TBD:
47

  
48
   It should be straightforward to add a function to read
49
   OpenFlowVMS spec files, but I haven't done so yet.
50
   For the moment, specifying configurations and tests in Python
51
   is straightforward and relatively concise.
52
   Soon, we may want to split the various subsystems (core,
53
   topology/network, cli, tests, etc.) into multiple modules.
54
   Currently nox support is in nox.py.
55
   We'd like to support OpenVSwitch as well as the reference
56
   implementation.
57
   
58
Bob Lantz
59
rlantz@cs.stanford.edu
60

  
61
History:
62
11/19/09 Initial revision (user datapath only)
63
11/19/09 Mininet demo at OpenFlow SWAI meeting
64
12/08/09 Kernel datapath support complete
65
12/09/09 Moved controller and switch routines into classes
66
12/12/09 Added subdivided network driver workflow
67
12/13/09 Added support for custom controller and switch classes
68
"""
69

  
70
from subprocess import call, check_call, Popen, PIPE, STDOUT
71
from time import sleep
72
import os, re, signal, sys, select
73
flush = sys.stdout.flush
74
from resource import setrlimit, RLIMIT_NPROC, RLIMIT_NOFILE
75

  
76
# Utility routines to make it easier to run commands
77

  
78
def run( cmd ):
79
   "Simple interface to subprocess.call()"
80
   return call( cmd.split( ' ' ) )
81

  
82
def checkRun( cmd ):
83
   "Simple interface to subprocess.check_call()"
84
   check_call( cmd.split( ' ' ) )
85
   
86
def quietRun( cmd ):
87
   "Run a command, routing stderr to stdout, and return the output."
88
   if isinstance( cmd, str ): cmd = cmd.split( ' ' )
89
   popen = Popen( cmd, stdout=PIPE, stderr=STDOUT)
90
   # We can't use Popen.communicate() because it uses 
91
   # select(), which can't handle
92
   # high file descriptor numbers! poll() can, however.
93
   output = ''
94
   readable = select.poll()
95
   readable.register( popen.stdout )
96
   while True:
97
      while readable.poll(): 
98
         data = popen.stdout.read( 1024 )
99
         if len( data ) == 0: break
100
         output += data
101
      popen.poll()
102
      if popen.returncode != None: break
103
   return output
104
   
105
class Node( object ):
106
   """A virtual network node is simply a shell in a network namespace.
107
      We communicate with it using pipes."""
108
   inToNode = {}
109
   outToNode = {}
110
   def __init__( self, name, inNamespace=True ):
111
      self.name = name
112
      closeFds = False # speed vs. memory use
113
      # xpg_echo is needed so we can echo our sentinel in sendCmd
114
      cmd = [ '/bin/bash', '-O', 'xpg_echo' ]
115
      self.inNamespace = inNamespace
116
      if self.inNamespace: cmd = [ 'netns' ] + cmd
117
      self.shell = Popen( cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT,
118
         close_fds=closeFds )
119
      self.stdin = self.shell.stdin
120
      self.stdout = self.shell.stdout
121
      self.pollOut = select.poll() 
122
      self.pollOut.register( self.stdout )
123
      # Maintain mapping between file descriptors and nodes
124
      # This could be useful for monitoring multiple nodes
125
      # using select.poll()
126
      self.outToNode[ self.stdout.fileno() ] = self
127
      self.inToNode[ self.stdin.fileno() ] = self
128
      self.pid = self.shell.pid
129
      self.intfCount = 0
130
      self.intfs = []
131
      self.ips = {}
132
      self.connection = {}
133
      self.waiting = False
134
      self.execed = False
135
   def fdToNode( self, f ):
136
      node = self.outToNode.get( f )
137
      return node or self.inToNode.get( f )
138
   def cleanup( self ):
139
      # Help python collect its garbage
140
      self.shell = None
141
   # Subshell I/O, commands and control
142
   def read( self, max ): return os.read( self.stdout.fileno(), max )
143
   def write( self, data ): os.write( self.stdin.fileno(), data )
144
   def terminate( self ):
145
      os.kill( self.pid, signal.SIGKILL )
146
      self.cleanup()
147
   def stop( self ): self.terminate()
148
   def waitReadable( self ): self.pollOut.poll()
149
   def sendCmd( self, cmd ):
150
      """Send a command, followed by a command to echo a sentinel,
151
         and return without waiting for the command to complete."""
152
      assert not self.waiting
153
      if cmd[ -1 ] == '&':
154
         separator = '&'
155
         cmd = cmd[ : -1 ]
156
      else: separator = ';'
157
      if isinstance( cmd, list): cmd = ' '.join( cmd )
158
      self.write( cmd + separator + " echo -n '\\0177' \n")
159
      self.waiting = True
160
   def monitor( self ):
161
      "Monitor a command's output, returning (done, data)."
162
      assert self.waiting
163
      self.waitReadable()
164
      data = self.read( 1024 )
165
      if len( data ) > 0 and data[ -1 ] == chr( 0177 ):
166
         self.waiting = False
167
         return True, data[ : -1 ]
168
      else:
169
         return False, data
170
   def sendInt( self ):
171
      "Send ^C, hopefully interrupting a running subprocess."
172
      self.write( chr( 3 ) )
173
   def waitOutput( self ):
174
      """Wait for a command to complete (signaled by a sentinel
175
      character, ASCII(127) appearing in the output stream) and return
176
      the output, including trailing newline."""
177
      assert self.waiting
178
      output = ""
179
      while True:
180
         self.waitReadable()
181
         data = self.read( 1024 )
182
         if len(data) > 0  and data[ -1 ] == chr( 0177 ): 
183
            output += data[ : -1 ]
184
            break
185
         else: output += data
186
      self.waiting = False
187
      return output
188
   def cmd( self, cmd ):
189
      "Send a command, wait for output, and return it."
190
      self.sendCmd( cmd )
191
      return self.waitOutput()
192
   def cmdPrint( self, cmd ):
193
      "Call cmd, printing the command and output"
194
      print "***", self.name, ":", cmd
195
      result = self.cmd( cmd )
196
      print result,
197
      return result
198
   # Interface management, configuration, and routing
199
   def intfName( self, n):
200
      "Construct a canonical interface name node-intf for interface N."
201
      return self.name + '-eth' + `n`
202
   def newIntf( self ):
203
      "Reserve and return a new interface name for this node."
204
      intfName = self.intfName( self.intfCount)
205
      self.intfCount += 1
206
      self.intfs += [ intfName ]
207
      return intfName
208
   def setIP( self, intf, ip, bits ):
209
      "Set an interface's IP address."
210
      result = self.cmd( [ 'ifconfig', intf, ip + bits, 'up' ] )
211
      self.ips[ intf ] = ip
212
      return result
213
   def setHostRoute( self, ip, intf ):
214
      "Add a route to the given IP address via intf."
215
      return self.cmd( 'route add -host ' + ip + ' dev ' + intf )
216
   def setDefaultRoute( self, intf ):
217
      "Set the default route to go through intf."
218
      self.cmd( 'ip route flush' )
219
      return self.cmd( 'route add default ' + intf )
220
   def IP( self ):
221
      "Return IP address of first interface"
222
      if len( self.intfs ) > 0:
223
         return self.ips.get( self.intfs[ 0 ], None )
224
   def intfIsUp( self, intf ):
225
      "Check if one of our interfaces is up."
226
      return 'UP' in self.cmd( 'ifconfig ' + self.intfs[ 0 ] )
227
   # Other methods  
228
   def __str__( self ): 
229
      result = self.name
230
      result += ": IP=" + self.IP() + " intfs=" + ','.join( self.intfs )
231
      result += " waiting=" +  `self.waiting`
232
      return result
233

  
234

  
235

  
236
class Host( Node ):
237
   """A host is simply a Node."""
238
   pass
239
      
240
class Controller( Node ):
241
   """A Controller is a Node that is running (or has execed) an 
242
      OpenFlow controller."""
243
   def __init__( self, name, kernel=True, controller='controller',
244
      cargs='-v ptcp:', cdir=None ):
245
      self.controller = controller
246
      self.cargs = cargs
247
      self.cdir = cdir
248
      Node.__init__( self, name, inNamespace=( not kernel ) )
249
   def start( self ):
250
      "Start <controller> <args> on controller, logging to /tmp/cN.log"
251
      cout = '/tmp/' + self.name + '.log'
252
      if self.cdir is not None:
253
         self.cmdPrint( 'cd ' + self.cdir )
254
      self.cmdPrint( self.controller + ' ' + self.cargs + 
255
         ' 1> ' + cout + ' 2> ' + cout + ' &' )
256
      self.execed = False # XXX Until I fix it
257
   def stop( self, controller='controller' ):
258
      "Stop controller cprog on controller"
259
      self.cmd( "kill %" + controller )  
260
      self.terminate()
261
         
262
class Switch( Node ):
263
   """A Switch is a Node that is running (or has execed)
264
      an OpenFlow switch."""
265
   def __init__( self, name, datapath=None ):
266
      self.dp = datapath
267
      Node.__init__( self, name, inNamespace=( datapath == None ) )
268
   def startUserDatapath( self, controller ):
269
      """Start OpenFlow reference user datapath, 
270
         logging to /tmp/sN-{ofd,ofp}.log"""
271
      ofdlog = '/tmp/' + self.name + '-ofd.log'
272
      ofplog = '/tmp/' + self.name + '-ofp.log'
273
      self.cmd( 'ifconfig lo up' )
274
      intfs = self.intfs[ 1 : ] # 0 is mgmt interface
275
      self.cmdPrint( 'ofdatapath -i ' + ','.join( intfs ) +
276
       ' ptcp: 1> ' + ofdlog + ' 2> '+ ofdlog + ' &' )
277
      self.cmdPrint( 'ofprotocol tcp:' + controller.IP() +
278
         ' tcp:localhost --fail=closed 1> ' + ofplog + ' 2>' + ofplog + ' &' )
279
   def stopUserDatapath( self ):
280
      "Stop OpenFlow reference user datapath."
281
      self.cmd( "kill %ofdatapath" )
282
      self.cmd( "kill %ofprotocol" )
283
   def startKernelDatapath( self, controller):
284
      "Start up switch using OpenFlow reference kernel datapath."
285
      ofplog = '/tmp/' + self.name + '-ofp.log'
286
      quietRun( 'ifconfig lo up' )
287
      # Delete local datapath if it exists;
288
      # then create a new one monitoring the given interfaces
289
      quietRun( 'dpctl deldp ' + self.dp )
290
      self.cmdPrint( 'dpctl adddp ' + self.dp )
291
      self.cmdPrint( 'dpctl addif ' + self.dp + ' ' + ' '.join( self.intfs ) )
292
      # Run protocol daemon
293
      self.cmdPrint( 'ofprotocol' +
294
         ' ' + self.dp + ' tcp:127.0.0.1 ' + 
295
         ' --fail=closed 1> ' + ofplog + ' 2>' + ofplog + ' &' )
296
      self.execed = False # XXX until I fix it
297
   def stopKernelDatapath( self ):
298
      "Terminate a switch using OpenFlow reference kernel datapath."
299
      quietRun( 'dpctl deldp ' + self.dp )
300
      # In theory the interfaces should go away after we shut down.
301
      # However, this takes time, so we're better off to remove them
302
      # explicitly so that we won't get errors if we run before they
303
      # have been removed by the kernel. Unfortunately this is very slow.
304
      self.cmd( 'kill %ofprotocol')
305
      for intf in self.intfs:
306
         quietRun( 'ip link del ' + intf )
307
         sys.stdout.write( '.' ) ; flush()
308
   def start( self, controller ): 
309
      if self.dp is None: self.startUserDatapath( controller )
310
      else: self.startKernelDatapath( controller )
311
   def stop( self ):
312
      if self.dp is None: self.stopUserDatapath()
313
      else: self.stopKernelDatapath()
314
   def sendCmd( self, cmd ):
315
      if not self.execed: return Node.sendCmd( self, cmd )
316
      else: print "*** Error:", self.name, "has execed and cannot accept commands"
317
   def monitor( self ):
318
      if not self.execed: return Node.monitor( self )
319
      else: return True, ''
320
         
321
# Interface management
322
# 
323
# Interfaces are managed as strings which are simply the
324
# interface names, of the form "nodeN-ethM".
325
#
326
# To connect nodes, we create a pair of veth interfaces, and then place them
327
# in the pair of nodes that we want to communicate. We then update the node's
328
# list of interfaces and connectivity map.
329
#
330
# For the kernel datapath, switch interfaces
331
# live in the root namespace and thus do not have to be
332
# explicitly moved.
333

  
334
def makeIntfPair( intf1, intf2 ):
335
   "Make a veth pair of intf1 and intf2."
336
   # Delete any old interfaces with the same names
337
   quietRun( 'ip link del ' + intf1 )
338
   quietRun( 'ip link del ' + intf2 )
339
   # Create new pair
340
   cmd = 'ip link add name ' + intf1 + ' type veth peer name ' + intf2
341
   return checkRun( cmd )
342
   
343
def moveIntf( intf, node ):
344
   "Move intf to node."
345
   cmd = 'ip link set ' + intf + ' netns ' + `node.pid`
346
   quietRun( cmd )
347
   links = node.cmd( 'ip link show' )
348
   if not intf in links:
349
      print "*** Error: moveIntf:", intf, "not successfully moved to",
350
      print node.name,":"
351
      return False
352
   return True
353

  
354
def retry( n, fn, *args):
355
   "Try something N times before giving up."
356
   tries = 0
357
   while not apply( fn, args ) and tries < n:
358
      sleep( 1 )
359
      print "*** retrying..."; flush()
360
      tries += 1
361
   if tries >= n: 
362
      print "*** giving up"
363
      exit( 1 )
364
   
365
def createLink( node1, node2 ):
366
   "Create a link node1-intf1 <---> node2-intf2."
367
   intf1 = node1.newIntf()
368
   intf2 = node2.newIntf()
369
   makeIntfPair( intf1, intf2 )
370
   if node1.inNamespace: retry( 3, moveIntf, intf1, node1 )
371
   if node2.inNamespace: retry( 3, moveIntf, intf2, node2 )
372
   node1.connection[ intf1 ] = ( node2, intf2 )
373
   node2.connection[ intf2 ] = ( node1, intf1 )
374
   return intf1, intf2
375

  
376
# Handy utilities
377
 
378
def createNodes( name, count ):
379
   "Create and return a list of nodes."
380
   nodes = [ Node( name + `i` ) for i in range( 0, count ) ]
381
   # print "*** CreateNodes: created:", nodes
382
   return nodes
383
     
384
def dumpNodes( nodes ):
385
   "Dump ifconfig of each node."
386
   for node in nodes:
387
      print "*** Dumping node", node.name
388
      print node.cmd( 'ip link show' )
389
      print node.cmd( 'route' )
390
   
391
def ipGen( A, B, c, d ):
392
   "Generate next IP class B IP address, starting at A.B.c.d"
393
   while True:
394
      yield '%d.%d.%d.%d' % ( A, B, c, d )
395
      d += 1
396
      if d > 254:
397
         d = 1
398
         c += 1
399
         if c > 254: break
400

  
401
def nameGen( prefix ):
402
   "Generate names starting with prefix."
403
   i = 0
404
   while True: yield prefix + `i`; i += 1
405
      
406
# Control network support:
407
#
408
# Create an explicit control network. Currently this is only
409
# used by the user datapath configuration.
410
#
411
# Notes:
412
#
413
# 1. If the controller and switches are in the same (e.g. root)
414
#    namespace, they can just use the loopback connection.
415
#    We may wish to do this for the user datapath as well as the
416
#    kernel datapath.
417
#
418
# 2. If we can get unix domain sockets to work, we can use them
419
#    instead of an explicit control network.
420
#
421
# 3. Instead of routing, we could bridge or use "in-band" control.
422
#
423
# 4. Even if we dispense with this in general, it could still be
424
#    useful for people who wish to simulate a separate control
425
#    network (since real networks may need one!)
426

  
427
def configureRoutedControlNetwork( controller, switches, ips):
428
   """Configure a routed control network on controller and switches,
429
      for use with the user datapath."""
430
   cip = ips.next()
431
   print controller.name, '<->',
432
   for switch in switches:
433
      print switch.name, ; flush()
434
      sip = ips.next()
435
      sintf = switch.intfs[ 0 ]
436
      node, cintf = switch.connection[ sintf ]
437
      if node != controller:
438
         print "*** Error: switch", switch.name, 
439
         print "not connected to correct controller"
440
         exit( 1 )
441
      controller.setIP( cintf, cip,  '/24' )
442
      switch.setIP( sintf, sip, '/24' )
443
      controller.setHostRoute( sip, cintf )
444
      switch.setHostRoute( cip, sintf )
445
   print
446
   print "*** Testing control network"
447
   while not controller.intfIsUp( controller.intfs[ 0 ] ):
448
      print "*** Waiting for ", controller.intfs[ 0 ], "to come up"
449
      sleep( 1 )
450
   for switch in switches:
451
      while not switch.intfIsUp( switch.intfs[ 0 ] ):
452
         print "*** Waiting for ", switch.intfs[ 0 ], "to come up"
453
         sleep( 1 )
454
      if pingTest( hosts=[ switch, controller ] ) != 0:
455
         print "*** Error: control network test failed"
456
         exit( 1 )
457

  
458
def configHosts( hosts, ips ):
459
   "Configure a set of hosts, starting at IP address a.b.c.d"
460
   for host in hosts:
461
      hintf = host.intfs[ 0 ]
462
      host.setIP( hintf, ips.next(), '/24' )
463
      host.setDefaultRoute( hintf )
464
      # You're low priority, dude!
465
      quietRun( 'renice +18 -p ' + `host.pid` )
466
      print host.name, ; flush()
467
   print
468
 
469
# Test driver and topologies
470

  
471
class Network( object ):
472
   "Network topology (and test driver) base class."
473
   def __init__( self,
474
      kernel=True, 
475
      Controller=Controller, Switch=Switch, 
476
      hostIpGen=ipGen, hostIpStart=( 192, 168, 123, 1 ) ):
477
      self.kernel = kernel
478
      self.Controller = Controller
479
      self.Switch = Switch
480
      self.hostIps = apply( hostIpGen, hostIpStart )
481
      # Check for kernel modules
482
      modules = quietRun( 'lsmod' )
483
      if not kernel and 'tun' not in modules:
484
         print "*** Error: kernel module tun not loaded:",
485
         print " user datapath not supported"
486
         exit( 1 )
487
      if kernel and 'ofdatapath' not in modules:
488
         print "*** Error: kernel module ofdatapath not loaded:",
489
         print " kernel datapath not supported"
490
         exit( 1 )
491
      # Create network, but don't start things up yet!
492
      self.prepareNet()
493
   def configureControlNetwork( self,
494
      ipGen=ipGen, ipStart = (10, 0, 123, 1 ) ):
495
      ips = apply( ipGen, ipStart )
496
      configureRoutedControlNetwork( self.controllers[ 0 ],
497
         self.switches, ips = ips)
498
   def configHosts( self ):
499
      configHosts( self.hosts, self.hostIps )
500
   def prepareNet( self ):
501
      """Create a network by calling makeNet as follows: 
502
         (switches, hosts ) = makeNet()
503
         Create a controller here as well."""
504
      kernel = self.kernel
505
      if kernel: print "*** Using kernel datapath"
506
      else: print "*** Using user datapath"
507
      print "*** Creating controller"
508
      self.controller = self.Controller( 'c0', kernel=kernel )
509
      self.controllers = [ self.controller ]
510
      print "*** Creating network"
511
      self.switches, self.hosts = self.makeNet( self.controller )
512
      print
513
      if not kernel:
514
         print "*** Configuring control network"
515
         self.configureControlNetwork()
516
      print "*** Configuring hosts"
517
      self.configHosts()
518
   def start( self ):
519
      "Start controller and switches"
520
      print "*** Starting controller"
521
      for controller in self.controllers:
522
         controller.start()
523
      print "*** Starting", len( self.switches ), "switches"
524
      for switch in self.switches:
525
         switch.start( self.controllers[ 0 ] )
526
   def stop( self ):
527
      "Stop the controller(s), switches and hosts"
528
      print "*** Stopping hosts"
529
      for host in self.hosts: 
530
         host.terminate()
531
      print "*** Stopping switches"
532
      for switch in self.switches:
533
         print switch.name, ; flush()
534
         switch.stop()
535
      print
536
      print "*** Stopping controller"
537
      for controller in self.controllers:
538
         controller.stop(); controller.terminate()
539
      print
540
      print "*** Test complete"
541
   def runTest( self, test ):
542
      "Run a given test, called as test( controllers, switches, hosts)"
543
      return test( self.controllers, self.switches, self.hosts )
544
   def run( self, test ):
545
      """Perform a complete start/test/stop cycle; test is of the form
546
         test( controllers, switches, hosts )"""
547
      self.start()
548
      print "*** Running test"
549
      result = self.runTest( test )
550
      self.stop()
551
      return result
552
   def interact( self ):
553
      "Create a network and run our simple CLI."
554
      self.run( self, Cli )
555
   
556
def defaultNames( snames=None, hnames=None, dpnames=None ):
557
   "Reinitialize default names from generators, if necessary."
558
   if snames is None: snames = nameGen( 's' )
559
   if hnames is None: hnames = nameGen( 'h' )
560
   if dpnames is None: dpnames = nameGen( 'nl:' )
561
   return snames, hnames, dpnames
562

  
563
# Tree network
564

  
565
class TreeNet( Network ):
566
   "A tree-structured network with the specified depth and fanout"
567
   def __init__( self, depth, fanout, **kwargs):
568
      self.depth, self.fanout = depth, fanout
569
      Network.__init__( self, **kwargs )
570
   def treeNet( self, controller, depth, fanout, snames=None,
571
      hnames=None, dpnames=None ):
572
      """Return a tree network of the given depth and fanout as a triple:
573
         ( root, switches, hosts ), using the given switch, host and
574
         datapath name generators, with the switches connected to the given
575
         controller. If kernel=True, use the kernel datapath; otherwise the
576
         user datapath will be used."""
577
      # Ugly, but necessary (?) since defaults are only evaluated once
578
      snames, hnames, dpnames = defaultNames( snames, hnames, dpnames )
579
      if ( depth == 0 ):
580
         host = Host( hnames.next() )
581
         print host.name, ; flush()
582
         return host, [], [ host ]
583
      dp = dpnames.next() if self.kernel else None
584
      switch = Switch( snames.next(), dp )
585
      if not self.kernel: createLink( switch, controller )
586
      print switch.name, ; flush()
587
      switches, hosts = [ switch ], []
588
      for i in range( 0, fanout ):
589
         child, slist, hlist = self.treeNet( controller, 
590
            depth - 1, fanout, snames, hnames, dpnames )
591
         createLink( switch, child )
592
         switches += slist
593
         hosts += hlist
594
      return switch, switches, hosts
595
   def makeNet( self, controller ):
596
      root, switches, hosts = self.treeNet( controller,
597
         self.depth, self.fanout )
598
      return switches, hosts
599
   
600
# Grid network
601

  
602
class GridNet( Network ):
603
   """An N x M grid/mesh network of switches, with hosts at the edges.
604
      This class also demonstrates creating a somewhat complicated
605
      topology."""
606
   def __init__( self, n, m, linear=False, **kwargs ):
607
      self.n, self.m, self.linear = n, m, linear and m == 1
608
      Network.__init__( self, **kwargs )
609
   def makeNet( self, controller ):
610
      snames, hnames, dpnames = defaultNames()
611
      n, m = self.n, self.m
612
      hosts = []
613
      switches = []
614
      kernel = self.kernel
615
      rows = []
616
      if not self.linear:
617
         print "*** gridNet: creating", n, "x", m, "grid of switches" ; flush()
618
      for y in range( 0, m ):
619
         row = []
620
         for x in range( 0, n ):
621
            dp = dpnames.next() if kernel else None
622
            switch = Switch( snames.next(), dp )
623
            if not kernel: createLink( switch, controller )
624
            row.append( switch )
625
            switches += [ switch ]
626
            print switch.name, ; flush()
627
         rows += [ row ]
628
      # Hook up rows
629
      for row in rows:
630
         previous = None
631
         for switch in row:
632
            if previous is not None:
633
               createLink( switch, previous )
634
            previous = switch
635
         h1, h2 = Host( hnames.next() ), Host( hnames.next() )
636
         createLink( h1, row[ 0 ] )
637
         createLink( h2, row[ -1 ] )
638
         hosts += [ h1, h2 ]
639
         print h1.name, h2.name, ; flush()
640
      # Return here if we're using this to make a linear network
641
      if self.linear: return switches, hosts
642
      # Hook up columns
643
      for x in range( 0, n ):
644
         previous = None
645
         for y in range( 0, m ):
646
            switch = rows[ y ][ x ]
647
            if previous is not None:
648
               createLink( switch, previous )
649
            previous = switch
650
         h1, h2 = Host( hnames.next() ), Host( hnames.next() )
651
         createLink( h1, rows[ 0 ][ x ] )
652
         createLink( h2, rows[ -1 ][ x ] )
653
         hosts += [ h1, h2 ]
654
         print h1.name, h2.name, ; flush()
655
      return switches, hosts
656

  
657
class LinearNet( GridNet ):
658
   "A network consisting of two hosts connected by a string of switches."
659
   def __init__( self, switchCount, **kwargs ):
660
      self.switchCount = switchCount
661
      GridNet.__init__( self, switchCount, 1, linear=True, **kwargs )
662
      
663
# Tests
664

  
665
def parsePing( pingOutput ):
666
   "Parse ping output and return packets sent, received."
667
   r = r'(\d+) packets transmitted, (\d+) received'
668
   m = re.search( r, pingOutput )
669
   if m == None:
670
      print "*** Error: could not parse ping output:", pingOutput
671
      exit( 1 )
672
   sent, received  = int( m.group( 1 ) ), int( m.group( 2 ) )
673
   return sent, received
674
   
675
def pingTest( controllers=[], switches=[], hosts=[], verbose=False ):
676
   "Test that each host can reach every other host."
677
   packets = 0 ; lost = 0
678
   for node in hosts:
679
      if verbose: 
680
         print node.name, "->", ; flush()
681
      for dest in hosts: 
682
         if node != dest:
683
            result = node.cmd( 'ping -c1 ' + dest.IP() )
684
            sent, received = parsePing( result )
685
            packets += sent
686
            if received > sent:
687
               print "*** Error: received too many packets"
688
               print result
689
               node.cmdPrint( 'route' )
690
               exit( 1 )
691
            lost += sent - received
692
            if verbose: 
693
               print ( dest.name if received else "X" ), ; flush()
694
      if verbose: print
695
   ploss = 100 * lost/packets
696
   if verbose:
697
      print "%d%% packet loss (%d/%d lost)" % ( ploss, lost, packets )
698
      flush()
699
   return ploss
700

  
701
def pingTestVerbose( controllers, switches, hosts ):
702
   return "%d %% packet loss" % \
703
      pingTest( controllers, switches, hosts, verbose=True )
704
 
705
def parseIperf( iperfOutput ):
706
   "Parse iperf output and return bandwidth."
707
   r = r'([\d\.]+ \w+/sec)'
708
   m = re.search( r, iperfOutput )
709
   return m.group( 1 ) if m is not None else "could not parse iperf output"
710
    
711
def iperf( hosts, verbose=False ):
712
   "Run iperf between two hosts."
713
   assert len( hosts ) == 2
714
   host1, host2 = hosts[ 0 ], hosts[ 1 ]
715
   host1.cmd( 'killall -9 iperf') # XXX shouldn't be global killall
716
   server = host1.cmd( 'iperf -s &' )
717
   if verbose: print server ; flush()
718
   client = host2.cmd( 'iperf -t 5 -c ' + host1.IP() )
719
   if verbose: print client ; flush()
720
   server = host1.cmd( 'kill -9 %iperf' )
721
   if verbose: print server; flush()
722
   return [ parseIperf( server ), parseIperf( client ) ]
723
   
724
def iperfTest( controllers, switches, hosts, verbose=False ):
725
   "Simple iperf test between two hosts."
726
   if verbose: print "*** Starting ping test"   
727
   h0, hN = hosts[ 0 ], hosts[ -1 ]
728
   print "*** iperfTest: Testing bandwidth between", 
729
   print h0.name, "and", hN.name
730
   result = iperf( [ h0, hN], verbose )
731
   print "*** result:", result
732
   return result
733

  
734
# Simple CLI
735

  
736
class Cli( object ):
737
   "Simple command-line interface to talk to nodes."
738
   cmds = [ '?', 'help', 'nodes', 'sh', 'pingtest', 'iperf', 'net', 'exit' ]
739
   def __init__( self, controllers, switches, hosts ):
740
      self.controllers = controllers
741
      self.switches = switches
742
      self.hosts = hosts
743
      self.nodemap = {}
744
      self.nodelist = controllers + switches + hosts
745
      for node in self.nodelist:
746
         self.nodemap[ node.name ] = node
747
      self.run()
748
   # Commands
749
   def help( self, args ):
750
      "Semi-useful help for CLI"
751
      print "Available commands are:", self.cmds
752
      print
753
      print "You may also send a command to a node using:"
754
      print "  <node> command {args}"
755
      print "For example:"
756
      print "  mininet> h0 ifconfig"
757
      print
758
      print "The interpreter automatically substitutes IP addresses"
759
      print "for node names, so commands like"
760
      print "  mininet> h0 ping -c1 h1"
761
      print "should work."
762
      print
763
      print "Interactive commands are not really supported yet,"
764
      print "so please limit commands to ones that do not"
765
      print "require user interaction and will terminate"
766
      print "after a reasonable amount of time."
767
   def nodes( self, args ):
768
      "List available nodes"
769
      print "available nodes are:", [ node.name for node in self.nodelist]
770
   def sh( self, args ):
771
      "Run an external shell command"
772
      call( [ 'sh', '-c' ] + args )
773
   def pingtest( self, args ):
774
      pingTest( self.controllers, self.switches, self.hosts, verbose=True )
775
   def net( self, args ):
776
      for switch in self.switches:
777
         print switch.name, "<->",
778
         for intf in switch.intfs:
779
            node, remoteIntf = switch.connection[ intf ]
780
            print node.name,
781
         print
782
   def iperf( self, args ):
783
      if len( args ) != 2:
784
         print "usage: iperf <h1> <h2>"
785
         return
786
      for host in args:
787
         if host not in self.nodemap:
788
            print "iperf: cannot find host:", host
789
            return
790
      iperf( [ self.nodemap[ h ] for h in args ], verbose=True )
791
   # Interpreter
792
   def run( self ):
793
      "Read and execute commands."
794
      print "*** cli: starting"
795
      while True:
796
         print "mininet> ", ; flush()
797
         input = sys.stdin.readline()
798
         if input == '': break
799
         if input[ -1 ] == '\n': input = input[ : -1 ]
800
         cmd = input.split( ' ' )
801
         first = cmd[ 0 ]
802
         rest = cmd[ 1: ]
803
         if first in self.cmds and hasattr( self, first ):
804
            getattr( self, first )( rest )
805
         elif first in self.nodemap and rest != []:
806
            node = self.nodemap[ first ]
807
            # Substitute IP addresses for node names in command
808
            rest = [ self.nodemap[ arg ].IP() if arg in self.nodemap else arg
809
               for arg in rest ]
810
            rest = ' '.join( rest )
811
            # Interactive commands don't work yet, and
812
            # there are still issues with control-c
813
            print "***", node.name, ": running", rest
814
            node.sendCmd( rest )
815
            while True:
816
               try:
817
                  done, data = node.monitor()
818
                  print data,
819
                  if done: break
820
               except KeyboardInterrupt: node.sendInt()
821
            print
822
         elif first == '': pass
823
         elif first in [ 'exit', 'quit' ]: break
824
         elif first == '?': self.help( rest )
825
         else: print "cli: unknown node or command: <", first, ">"
826
      print "*** cli: exiting"
827
   
828
def fixLimits():
829
   "Fix ridiculously small resource limits."
830
   setrlimit( RLIMIT_NPROC, ( 4096, 8192 ) )
831
   setrlimit( RLIMIT_NOFILE, ( 16384, 32768 ) )
832

  
833
def init():
834
   "Initialize Mininet."
835
   if os.getuid() != 0: 
836
      # Note: this script must be run as root 
837
      # Perhaps we should do so automatically!
838
      print "*** Mininet must run as root."; exit( 1 )
839
   # If which produces no output, then netns is not in the path.
840
   # May want to loosen this to handle netns in the current dir.
841
   if not quietRun( [ 'which', 'netns' ] ):
842
       raise Exception( "Could not find netns; see INSTALL" )
843
   fixLimits()
844

  
845
if __name__ == '__main__':
846
   init()
847
   results = {}
848
   print "*** Welcome to Mininet!"
849
   print "*** Look in examples/ for more examples\n"
850
   print "*** Testing Mininet with kernel and user datapath"
851
   for datapath in [ 'kernel', 'user' ]:
852
      k = datapath == 'kernel'
853
      network = TreeNet( depth=2, fanout=4, kernel=k)
854
      result = network.run( pingTestVerbose )
855
      results[ datapath ] = result
856
   print "*** Test results:", results
mininet/cleanup
1
#!/usr/bin/python
2

  
3
"""
4
Unfortunately, Mininet and OpenFlow don't always clean up
5
properly after themselves. Until they do (or until cleanup
6
functionality is integrated into the python code), this
7
script may be used to get rid of unwanted garbage. It may
8
also get rid of 'false positives', but hopefully nothing
9
irreplaceable!
10
"""
11

  
12
from subprocess import Popen, PIPE
13
import re
14

  
15
from mininet import quietRun
16

  
17
def sh( cmd ): 
18
   "Print a command and send it to the shell"
19
   print cmd
20
   return Popen( [ '/bin/sh', '-c', cmd ], 
21
      stdout=PIPE ).communicate()[ 0 ]
22

  
23
def cleanUpScreens():
24
   "Remove moldy old screen sessions."      
25
   r = r'(\d+.[hsc]\d+)'
26
   output = sh( 'screen -ls' ).split( '\n' )
27
   for line in output:
28
      m = re.search( r, line )
29
      if m is not None:
30
         quietRun( 'screen -S ' + m.group( 1 ) + ' -X kill' )
31
 
32
def cleanup():
33
   """Clean up junk which might be left over from old runs;
34
      do fast stuff before slow dp and link removal!"""
35
      
36
   print "*** Removing excess controllers/ofprotocols/ofdatapaths/pings/noxes"
37
   zombies = 'controller ofprotocol ofdatapath ping nox_core lt-nox_core '
38
   zombies += 'udpbwtest'
39
   # Note: real zombie processes can't actually be killed, since they 
40
   # are already (un)dead. Then again,
41
   # you can't connect to them either, so they're mostly harmless.
42
   sh( 'killall -9 ' + zombies + ' 2> /dev/null' )
43

  
44
   print "*** Removing junk from /tmp"
45
   sh( 'rm -f /tmp/vconn* /tmp/vlogs* /tmp/*.out /tmp/*.log' )
46

  
47
   print "*** Removing old screen sessions"
48
   cleanUpScreens()
49

  
50
   print "*** Removing excess kernel datapaths"
51
   dps = sh( "ps ax | egrep -o 'dp[0-9]+' | sed 's/dp/nl:/'" ).split( '\n')
52
   for dp in dps: 
53
      if dp != '': sh( 'dpctl deldp ' + dp )
54
      
55
   print "*** Removing all links of the pattern foo-ethX"
56
   links = sh( "ip link show | egrep -o '(\w+-eth\w+)'" ).split( '\n' )
57
   for link in links: 
58
      if link != '': sh( "ip link del " + link )
59

  
60
   print "*** Cleanup complete."
61

  
62
if __name__ == "__main__":
63
   cleanup()
mininet/mininet.py
1
#!/usr/bin/python
2

  
3
"""
4
Mininet: A simple networking testbed for OpenFlow!
5

  
6
Mininet creates scalable OpenFlow test networks by using
7
process-based virtualization and network namespaces. 
8

  
9
Simulated hosts are created as processes in separate network
10
namespaces. This allows a complete OpenFlow network to be simulated on
11
top of a single Linux kernel.
12

  
13
Each host has:
14
   A virtual console (pipes to a shell)
15
   A virtual interfaces (half of a veth pair)
16
   A parent shell (and possibly some child processes) in a namespace
17
   
18
Hosts have a network interface which is configured via ifconfig/ip
19
link/etc. with data network IP addresses (e.g. 192.168.123.2 )
20

  
21
This version supports both the kernel and user space datapaths
22
from the OpenFlow reference implementation.
23

  
24
In kernel datapath mode, the controller and switches are simply
25
processes in the root namespace.
26

  
27
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
28
attached to the one side of a veth pair; the other side resides in the
29
host namespace. In this mode, switch processes can simply connect to the
30
controller via the loopback interface.
31

  
32
In user datapath mode, the controller and switches are full-service
33
nodes that live in their own network namespaces and have management
34
interfaces and IP addresses on a control network (e.g. 10.0.123.1,
35
currently routed although it could be bridged.)
36

  
37
In addition to a management interface, user mode switches also have
38
several switch interfaces, halves of veth pairs whose other halves
39
reside in the host nodes that the switches are connected to.
40

  
41
Naming:
42
   Host nodes are named h1-hN
43
   Switch nodes are named s0-sN
44
   Interfaces are named {nodename}-eth0 .. {nodename}-ethN,
45

  
46
Thoughts/TBD:
47

  
48
   It should be straightforward to add a function to read
49
   OpenFlowVMS spec files, but I haven't done so yet.
50
   For the moment, specifying configurations and tests in Python
51
   is straightforward and relatively concise.
52
   Soon, we may want to split the various subsystems (core,
53
   topology/network, cli, tests, etc.) into multiple modules.
54
   Currently nox support is in nox.py.
55
   We'd like to support OpenVSwitch as well as the reference
56
   implementation.
57
   
58
Bob Lantz
59
rlantz@cs.stanford.edu
60

  
61
History:
62
11/19/09 Initial revision (user datapath only)
63
11/19/09 Mininet demo at OpenFlow SWAI meeting
64
12/08/09 Kernel datapath support complete
65
12/09/09 Moved controller and switch routines into classes
66
12/12/09 Added subdivided network driver workflow
67
12/13/09 Added support for custom controller and switch classes
68
"""
69

  
70
from subprocess import call, check_call, Popen, PIPE, STDOUT
71
from time import sleep
72
import os, re, signal, sys, select
73
flush = sys.stdout.flush
74
from resource import setrlimit, RLIMIT_NPROC, RLIMIT_NOFILE
75

  
76
# Utility routines to make it easier to run commands
77

  
78
def run( cmd ):
79
   "Simple interface to subprocess.call()"
80
   return call( cmd.split( ' ' ) )
81

  
82
def checkRun( cmd ):
83
   "Simple interface to subprocess.check_call()"
84
   check_call( cmd.split( ' ' ) )
85
   
86
def quietRun( cmd ):
87
   "Run a command, routing stderr to stdout, and return the output."
88
   if isinstance( cmd, str ): cmd = cmd.split( ' ' )
89
   popen = Popen( cmd, stdout=PIPE, stderr=STDOUT)
90
   # We can't use Popen.communicate() because it uses 
91
   # select(), which can't handle
92
   # high file descriptor numbers! poll() can, however.
93
   output = ''
94
   readable = select.poll()
95
   readable.register( popen.stdout )
96
   while True:
97
      while readable.poll(): 
98
         data = popen.stdout.read( 1024 )
99
         if len( data ) == 0: break
100
         output += data
101
      popen.poll()
102
      if popen.returncode != None: break
103
   return output
104
   
105
class Node( object ):
106
   """A virtual network node is simply a shell in a network namespace.
107
      We communicate with it using pipes."""
108
   inToNode = {}
109
   outToNode = {}
110
   def __init__( self, name, inNamespace=True ):
111
      self.name = name
112
      closeFds = False # speed vs. memory use
113
      # xpg_echo is needed so we can echo our sentinel in sendCmd
114
      cmd = [ '/bin/bash', '-O', 'xpg_echo' ]
115
      self.inNamespace = inNamespace
116
      if self.inNamespace: cmd = [ 'netns' ] + cmd
117
      self.shell = Popen( cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT,
118
         close_fds=closeFds )
119
      self.stdin = self.shell.stdin
120
      self.stdout = self.shell.stdout
121
      self.pollOut = select.poll() 
122
      self.pollOut.register( self.stdout )
123
      # Maintain mapping between file descriptors and nodes
124
      # This could be useful for monitoring multiple nodes
125
      # using select.poll()
126
      self.outToNode[ self.stdout.fileno() ] = self
127
      self.inToNode[ self.stdin.fileno() ] = self
128
      self.pid = self.shell.pid
129
      self.intfCount = 0
130
      self.intfs = []
131
      self.ips = {}
132
      self.connection = {}
133
      self.waiting = False
134
      self.execed = False
135
   def fdToNode( self, f ):
136
      node = self.outToNode.get( f )
137
      return node or self.inToNode.get( f )
138
   def cleanup( self ):
139
      # Help python collect its garbage
140
      self.shell = None
141
   # Subshell I/O, commands and control
142
   def read( self, max ): return os.read( self.stdout.fileno(), max )
143
   def write( self, data ): os.write( self.stdin.fileno(), data )
144
   def terminate( self ):
145
      os.kill( self.pid, signal.SIGKILL )
146
      self.cleanup()
147
   def stop( self ): self.terminate()
148
   def waitReadable( self ): self.pollOut.poll()
149
   def sendCmd( self, cmd ):
150
      """Send a command, followed by a command to echo a sentinel,
151
         and return without waiting for the command to complete."""
152
      assert not self.waiting
153
      if cmd[ -1 ] == '&':
154
         separator = '&'
155
         cmd = cmd[ : -1 ]
156
      else: separator = ';'
157
      if isinstance( cmd, list): cmd = ' '.join( cmd )
158
      self.write( cmd + separator + " echo -n '\\0177' \n")
159
      self.waiting = True
160
   def monitor( self ):
161
      "Monitor a command's output, returning (done, data)."
162
      assert self.waiting
163
      self.waitReadable()
164
      data = self.read( 1024 )
165
      if len( data ) > 0 and data[ -1 ] == chr( 0177 ):
166
         self.waiting = False
167
         return True, data[ : -1 ]
168
      else:
169
         return False, data
170
   def sendInt( self ):
171
      "Send ^C, hopefully interrupting a running subprocess."
172
      self.write( chr( 3 ) )
173
   def waitOutput( self ):
174
      """Wait for a command to complete (signaled by a sentinel
175
      character, ASCII(127) appearing in the output stream) and return
176
      the output, including trailing newline."""
177
      assert self.waiting
178
      output = ""
179
      while True:
180
         self.waitReadable()
181
         data = self.read( 1024 )
182
         if len(data) > 0  and data[ -1 ] == chr( 0177 ): 
183
            output += data[ : -1 ]
184
            break
185
         else: output += data
186
      self.waiting = False
187
      return output
188
   def cmd( self, cmd ):
189
      "Send a command, wait for output, and return it."
190
      self.sendCmd( cmd )
191
      return self.waitOutput()
192
   def cmdPrint( self, cmd ):
193
      "Call cmd, printing the command and output"
194
      print "***", self.name, ":", cmd
195
      result = self.cmd( cmd )
196
      print result,
197
      return result
198
   # Interface management, configuration, and routing
199
   def intfName( self, n):
200
      "Construct a canonical interface name node-intf for interface N."
201
      return self.name + '-eth' + `n`
202
   def newIntf( self ):
203
      "Reserve and return a new interface name for this node."
204
      intfName = self.intfName( self.intfCount)
205
      self.intfCount += 1
206
      self.intfs += [ intfName ]
207
      return intfName
208
   def setIP( self, intf, ip, bits ):
209
      "Set an interface's IP address."
210
      result = self.cmd( [ 'ifconfig', intf, ip + bits, 'up' ] )
211
      self.ips[ intf ] = ip
212
      return result
213
   def setHostRoute( self, ip, intf ):
214
      "Add a route to the given IP address via intf."
215
      return self.cmd( 'route add -host ' + ip + ' dev ' + intf )
216
   def setDefaultRoute( self, intf ):
217
      "Set the default route to go through intf."
218
      self.cmd( 'ip route flush' )
219
      return self.cmd( 'route add default ' + intf )
220
   def IP( self ):
221
      "Return IP address of first interface"
222
      if len( self.intfs ) > 0:
223
         return self.ips.get( self.intfs[ 0 ], None )
224
   def intfIsUp( self, intf ):
225
      "Check if one of our interfaces is up."
226
      return 'UP' in self.cmd( 'ifconfig ' + self.intfs[ 0 ] )
227
   # Other methods  
228
   def __str__( self ): 
229
      result = self.name
230
      result += ": IP=" + self.IP() + " intfs=" + ','.join( self.intfs )
231
      result += " waiting=" +  `self.waiting`
232
      return result
233

  
234

  
235

  
236
class Host( Node ):
237
   """A host is simply a Node."""
238
   pass
239
      
240
class Controller( Node ):
241
   """A Controller is a Node that is running (or has execed) an 
242
      OpenFlow controller."""
243
   def __init__( self, name, kernel=True, controller='controller',
244
      cargs='-v ptcp:', cdir=None ):
245
      self.controller = controller
246
      self.cargs = cargs
247
      self.cdir = cdir
248
      Node.__init__( self, name, inNamespace=( not kernel ) )
249
   def start( self ):
250
      "Start <controller> <args> on controller, logging to /tmp/cN.log"
251
      cout = '/tmp/' + self.name + '.log'
252
      if self.cdir is not None:
253
         self.cmdPrint( 'cd ' + self.cdir )
254
      self.cmdPrint( self.controller + ' ' + self.cargs + 
255
         ' 1> ' + cout + ' 2> ' + cout + ' &' )
256
      self.execed = False # XXX Until I fix it
257
   def stop( self, controller='controller' ):
258
      "Stop controller cprog on controller"
259
      self.cmd( "kill %" + controller )  
260
      self.terminate()
261
         
262
class Switch( Node ):
263
   """A Switch is a Node that is running (or has execed)
264
      an OpenFlow switch."""
265
   def __init__( self, name, datapath=None ):
266
      self.dp = datapath
267
      Node.__init__( self, name, inNamespace=( datapath == None ) )
268
   def startUserDatapath( self, controller ):
269
      """Start OpenFlow reference user datapath, 
270
         logging to /tmp/sN-{ofd,ofp}.log"""
271
      ofdlog = '/tmp/' + self.name + '-ofd.log'
272
      ofplog = '/tmp/' + self.name + '-ofp.log'
273
      self.cmd( 'ifconfig lo up' )
274
      intfs = self.intfs[ 1 : ] # 0 is mgmt interface
275
      self.cmdPrint( 'ofdatapath -i ' + ','.join( intfs ) +
276
       ' ptcp: 1> ' + ofdlog + ' 2> '+ ofdlog + ' &' )
277
      self.cmdPrint( 'ofprotocol tcp:' + controller.IP() +
278
         ' tcp:localhost --fail=closed 1> ' + ofplog + ' 2>' + ofplog + ' &' )
279
   def stopUserDatapath( self ):
280
      "Stop OpenFlow reference user datapath."
281
      self.cmd( "kill %ofdatapath" )
282
      self.cmd( "kill %ofprotocol" )
283
   def startKernelDatapath( self, controller):
284
      "Start up switch using OpenFlow reference kernel datapath."
285
      ofplog = '/tmp/' + self.name + '-ofp.log'
286
      quietRun( 'ifconfig lo up' )
287
      # Delete local datapath if it exists;
288
      # then create a new one monitoring the given interfaces
289
      quietRun( 'dpctl deldp ' + self.dp )
290
      self.cmdPrint( 'dpctl adddp ' + self.dp )
291
      self.cmdPrint( 'dpctl addif ' + self.dp + ' ' + ' '.join( self.intfs ) )
292
      # Run protocol daemon
293
      self.cmdPrint( 'ofprotocol' +
294
         ' ' + self.dp + ' tcp:127.0.0.1 ' + 
295
         ' --fail=closed 1> ' + ofplog + ' 2>' + ofplog + ' &' )
296
      self.execed = False # XXX until I fix it
297
   def stopKernelDatapath( self ):
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff