mininet / mininet / net.py @ d5886525
History | View | Annotate | Download (17.9 KB)
1 |
"""
|
---|---|
2 |
|
3 |
Mininet: A simple networking testbed for OpenFlow!
|
4 |
|
5 |
author: Bob Lantz (rlantz@cs.stanford.edu)
|
6 |
author: Brandon Heller (brandonh@stanford.edu)
|
7 |
|
8 |
Mininet creates scalable OpenFlow test networks by using
|
9 |
process-based virtualization and network namespaces.
|
10 |
|
11 |
Simulated hosts are created as processes in separate network
|
12 |
namespaces. This allows a complete OpenFlow network to be simulated on
|
13 |
top of a single Linux kernel.
|
14 |
|
15 |
Each host has:
|
16 |
|
17 |
A virtual console (pipes to a shell)
|
18 |
A virtual interfaces (half of a veth pair)
|
19 |
A parent shell (and possibly some child processes) in a namespace
|
20 |
|
21 |
Hosts have a network interface which is configured via ifconfig/ip
|
22 |
link/etc.
|
23 |
|
24 |
This version supports both the kernel and user space datapaths
|
25 |
from the OpenFlow reference implementation.
|
26 |
|
27 |
In kernel datapath mode, the controller and switches are simply
|
28 |
processes in the root namespace.
|
29 |
|
30 |
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
|
31 |
attached to the one side of a veth pair; the other side resides in the
|
32 |
host namespace. In this mode, switch processes can simply connect to the
|
33 |
controller via the loopback interface.
|
34 |
|
35 |
In user datapath mode, the controller and switches are full-service
|
36 |
nodes that live in their own network namespaces and have management
|
37 |
interfaces and IP addresses on a control network (e.g. 10.0.123.1,
|
38 |
currently routed although it could be bridged.)
|
39 |
|
40 |
In addition to a management interface, user mode switches also have
|
41 |
several switch interfaces, halves of veth pairs whose other halves
|
42 |
reside in the host nodes that the switches are connected to.
|
43 |
|
44 |
Naming:
|
45 |
|
46 |
Host nodes are named h1-hN
|
47 |
Switch nodes are named s0-sN
|
48 |
Interfaces are named { nodename }-eth0 .. { nodename }-ethN
|
49 |
|
50 |
"""
|
51 |
|
52 |
import os |
53 |
import re |
54 |
import signal |
55 |
from time import sleep |
56 |
|
57 |
from mininet.cli import CLI |
58 |
from mininet.log import info, error |
59 |
from mininet.node import KernelSwitch, OVSKernelSwitch |
60 |
from mininet.util import quietRun, fixLimits |
61 |
from mininet.util import makeIntfPair, moveIntf |
62 |
from mininet.xterm import cleanUpScreens, makeXterms |
63 |
|
64 |
DATAPATHS = [ 'kernel' ] #[ 'user', 'kernel' ] |
65 |
|
66 |
def init(): |
67 |
"Initialize Mininet."
|
68 |
if os.getuid() != 0: |
69 |
# Note: this script must be run as root
|
70 |
# Perhaps we should do so automatically!
|
71 |
print "*** Mininet must run as root." |
72 |
exit( 1 ) |
73 |
# If which produces no output, then netns is not in the path.
|
74 |
# May want to loosen this to handle netns in the current dir.
|
75 |
if not quietRun( [ 'which', 'netns' ] ): |
76 |
raise Exception( "Could not find netns; see INSTALL" ) |
77 |
fixLimits() |
78 |
|
79 |
class Mininet( object ): |
80 |
"Network emulation with hosts spawned in network namespaces."
|
81 |
|
82 |
def __init__( self, topo, switch, host, controller, cparams, |
83 |
build=True, xterms=False, cleanup=False, |
84 |
inNamespace=False,
|
85 |
autoSetMacs=False, autoStaticArp=False ): |
86 |
"""Create Mininet object.
|
87 |
topo: Topo object
|
88 |
switch: Switch class
|
89 |
host: Host class
|
90 |
controller: Controller class
|
91 |
cparams: ControllerParams object
|
92 |
now: build now?
|
93 |
xterms: if build now, spawn xterms?
|
94 |
cleanup: if build now, cleanup before creating?
|
95 |
inNamespace: spawn switches and controller in net namespaces?
|
96 |
autoSetMacs: set MAC addrs to DPIDs?
|
97 |
autoStaticArp: set all-pairs static MAC addrs?"""
|
98 |
self.topo = topo
|
99 |
self.switch = switch
|
100 |
self.host = host
|
101 |
self.controller = controller
|
102 |
self.cparams = cparams
|
103 |
self.nodes = {} # dpid to Node{ Host, Switch } objects |
104 |
self.controllers = {} # controller name to Controller objects |
105 |
self.dps = 0 # number of created kernel datapaths |
106 |
self.inNamespace = inNamespace
|
107 |
self.xterms = xterms
|
108 |
self.cleanup = cleanup
|
109 |
self.autoSetMacs = autoSetMacs
|
110 |
self.autoStaticArp = autoStaticArp
|
111 |
|
112 |
self.terms = [] # list of spawned xterm processes |
113 |
|
114 |
if build:
|
115 |
self.build()
|
116 |
|
117 |
def _addHost( self, dpid ): |
118 |
"""Add host.
|
119 |
dpid: DPID of host to add"""
|
120 |
host = self.host( 'h_' + self.topo.name( dpid ) ) |
121 |
# for now, assume one interface per host.
|
122 |
host.intfs.append( 'h_' + self.topo.name( dpid ) + '-eth0' ) |
123 |
self.nodes[ dpid ] = host
|
124 |
#info( '%s ' % host.name )
|
125 |
|
126 |
def _addSwitch( self, dpid ): |
127 |
"""Add switch.
|
128 |
dpid: DPID of switch to add"""
|
129 |
sw = None
|
130 |
swDpid = None
|
131 |
if self.autoSetMacs: |
132 |
swDpid = dpid |
133 |
if self.switch is KernelSwitch or self.switch is OVSKernelSwitch: |
134 |
sw = self.switch( 's_' + self.topo.name( dpid ), dp = self.dps, |
135 |
dpid = swDpid ) |
136 |
self.dps += 1 |
137 |
else:
|
138 |
sw = self.switch( 's_' + self.topo.name( dpid ) ) |
139 |
self.nodes[ dpid ] = sw
|
140 |
|
141 |
def _addLink( self, src, dst ): |
142 |
"""Add link.
|
143 |
src: source DPID
|
144 |
dst: destination DPID"""
|
145 |
srcPort, dstPort = self.topo.port( src, dst )
|
146 |
srcNode = self.nodes[ src ]
|
147 |
dstNode = self.nodes[ dst ]
|
148 |
srcIntf = srcNode.intfName( srcPort ) |
149 |
dstIntf = dstNode.intfName( dstPort ) |
150 |
makeIntfPair( srcIntf, dstIntf ) |
151 |
srcNode.intfs.append( srcIntf ) |
152 |
dstNode.intfs.append( dstIntf ) |
153 |
srcNode.ports[ srcPort ] = srcIntf |
154 |
dstNode.ports[ dstPort ] = dstIntf |
155 |
#info( '\n' )
|
156 |
#info( 'added intf %s to src node %x\n' % ( srcIntf, src ) )
|
157 |
#info( 'added intf %s to dst node %x\n' % ( dstIntf, dst ) )
|
158 |
if srcNode.inNamespace:
|
159 |
#info( 'moving src w/inNamespace set\n' )
|
160 |
moveIntf( srcIntf, srcNode ) |
161 |
if dstNode.inNamespace:
|
162 |
#info( 'moving dst w/inNamespace set\n' )
|
163 |
moveIntf( dstIntf, dstNode ) |
164 |
srcNode.connection[ srcIntf ] = ( dstNode, dstIntf ) |
165 |
dstNode.connection[ dstIntf ] = ( srcNode, srcIntf ) |
166 |
|
167 |
def _addController( self, controller ): |
168 |
"""Add controller.
|
169 |
controller: Controller class"""
|
170 |
controller = self.controller( 'c0', self.inNamespace ) |
171 |
if controller: # allow controller-less setups |
172 |
self.controllers[ 'c0' ] = controller |
173 |
|
174 |
# Control network support:
|
175 |
#
|
176 |
# Create an explicit control network. Currently this is only
|
177 |
# used by the user datapath configuration.
|
178 |
#
|
179 |
# Notes:
|
180 |
#
|
181 |
# 1. If the controller and switches are in the same ( e.g. root )
|
182 |
# namespace, they can just use the loopback connection.
|
183 |
# We may wish to do this for the user datapath as well as the
|
184 |
# kernel datapath.
|
185 |
#
|
186 |
# 2. If we can get unix domain sockets to work, we can use them
|
187 |
# instead of an explicit control network.
|
188 |
#
|
189 |
# 3. Instead of routing, we could bridge or use 'in-band' control.
|
190 |
#
|
191 |
# 4. Even if we dispense with this in general, it could still be
|
192 |
# useful for people who wish to simulate a separate control
|
193 |
# network ( since real networks may need one! )
|
194 |
|
195 |
def _configureControlNetwork( self ): |
196 |
"Configure control network."
|
197 |
self._configureRoutedControlNetwork()
|
198 |
|
199 |
def _configureRoutedControlNetwork( self ): |
200 |
"""Configure a routed control network on controller and switches.
|
201 |
For use with the user datapath only right now.
|
202 |
TODO( brandonh ) test this code!
|
203 |
"""
|
204 |
# params were: controller, switches, ips
|
205 |
|
206 |
controller = self.controllers[ 'c0' ] |
207 |
info( '%s <-> ' % controller.name )
|
208 |
for switchDpid in self.topo.switches(): |
209 |
switch = self.nodes[ switchDpid ]
|
210 |
info( '%s ' % switch.name )
|
211 |
sip = self.topo.ip( switchDpid )#ips.next() |
212 |
sintf = switch.intfs[ 0 ]
|
213 |
node, cintf = switch.connection[ sintf ] |
214 |
if node != controller:
|
215 |
error( '*** Error: switch %s not connected to correct'
|
216 |
'controller' %
|
217 |
switch.name ) |
218 |
exit( 1 ) |
219 |
controller.setIP( cintf, self.cparams.ip, '/' + |
220 |
self.cparams.subnetSize )
|
221 |
switch.setIP( sintf, sip, '/' + self.cparams.subnetSize ) |
222 |
controller.setHostRoute( sip, cintf ) |
223 |
switch.setHostRoute( self.cparams.ip, sintf )
|
224 |
info( '\n' )
|
225 |
info( '*** Testing control network\n' )
|
226 |
while not controller.intfIsUp( controller.intfs[ 0 ] ): |
227 |
info( '*** Waiting for %s to come up\n',
|
228 |
controller.intfs[ 0 ] )
|
229 |
sleep( 1 )
|
230 |
for switchDpid in self.topo.switches(): |
231 |
switch = self.nodes[ switchDpid ]
|
232 |
while not switch.intfIsUp( switch.intfs[ 0 ] ): |
233 |
info( '*** Waiting for %s to come up\n' %
|
234 |
switch.intfs[ 0 ] )
|
235 |
sleep( 1 )
|
236 |
if self.ping( hosts=[ switch, controller ] ) != 0: |
237 |
error( '*** Error: control network test failed\n' )
|
238 |
exit( 1 ) |
239 |
info( '\n' )
|
240 |
|
241 |
def _configHosts( self ): |
242 |
"Configure a set of hosts."
|
243 |
# params were: hosts, ips
|
244 |
for hostDpid in self.topo.hosts(): |
245 |
host = self.nodes[ hostDpid ]
|
246 |
hintf = host.intfs[ 0 ]
|
247 |
host.setIP( hintf, self.topo.ip( hostDpid ),
|
248 |
'/' + str( self.cparams.subnetSize ) ) |
249 |
host.setDefaultRoute( hintf ) |
250 |
# You're low priority, dude!
|
251 |
quietRun( 'renice +18 -p ' + repr( host.pid ) ) |
252 |
info( '%s ', host.name )
|
253 |
info( '\n' )
|
254 |
|
255 |
def build( self ): |
256 |
"""Build mininet.
|
257 |
At the end of this function, everything should be connected
|
258 |
and up."""
|
259 |
if self.cleanup: |
260 |
pass # cleanup |
261 |
# validate topo?
|
262 |
info( '*** Adding controller\n' )
|
263 |
self._addController( self.controller ) |
264 |
info( '*** Creating network\n' )
|
265 |
info( '*** Adding hosts:\n' )
|
266 |
for host in sorted( self.topo.hosts() ): |
267 |
self._addHost( host )
|
268 |
info( '0x%x ' % host )
|
269 |
info( '\n*** Adding switches:\n' )
|
270 |
for switch in sorted( self.topo.switches() ): |
271 |
self._addSwitch( switch )
|
272 |
info( '0x%x ' % switch )
|
273 |
info( '\n*** Adding edges:\n' )
|
274 |
for src, dst in sorted( self.topo.edges() ): |
275 |
self._addLink( src, dst )
|
276 |
info( '(0x%x, 0x%x) ' % ( src, dst ) )
|
277 |
info( '\n' )
|
278 |
|
279 |
if self.inNamespace: |
280 |
info( '*** Configuring control network\n' )
|
281 |
self._configureControlNetwork()
|
282 |
|
283 |
info( '*** Configuring hosts\n' )
|
284 |
self._configHosts()
|
285 |
|
286 |
if self.xterms: |
287 |
self.startXterms()
|
288 |
if self.autoSetMacs: |
289 |
self.setMacs()
|
290 |
if self.autoStaticArp: |
291 |
self.staticArp()
|
292 |
|
293 |
def switchNodes( self ): |
294 |
"Return switch nodes."
|
295 |
return [ self.nodes[ dpid ] for dpid in self.topo.switches() ] |
296 |
|
297 |
def hostNodes( self ): |
298 |
"Return host nodes."
|
299 |
return [ self.nodes[ dpid ] for dpid in self.topo.hosts() ] |
300 |
|
301 |
def startXterms( self ): |
302 |
"Start an xterm for each node in the topo."
|
303 |
info( "*** Running xterms on %s\n" % os.environ[ 'DISPLAY' ] ) |
304 |
cleanUpScreens() |
305 |
self.terms += makeXterms( self.controllers.values(), 'controller' ) |
306 |
self.terms += makeXterms( self.switchNodes(), 'switch' ) |
307 |
self.terms += makeXterms( self.hostNodes(), 'host' ) |
308 |
|
309 |
def stopXterms( self ): |
310 |
"Kill each xterm."
|
311 |
# Kill xterms
|
312 |
for term in self.terms: |
313 |
os.kill( term.pid, signal.SIGKILL ) |
314 |
cleanUpScreens() |
315 |
|
316 |
def setMacs( self ): |
317 |
"""Set MAC addrs to correspond to datapath IDs on hosts.
|
318 |
Assume that the host only has one interface."""
|
319 |
for dpid in self.topo.hosts(): |
320 |
hostNode = self.nodes[ dpid ]
|
321 |
hostNode.setMAC( hostNode.intfs[ 0 ], dpid )
|
322 |
|
323 |
def staticArp( self ): |
324 |
"Add all-pairs ARP entries to remove the need to handle broadcast."
|
325 |
for src in self.topo.hosts(): |
326 |
srcNode = self.nodes[ src ]
|
327 |
for dst in self.topo.hosts(): |
328 |
if src != dst:
|
329 |
srcNode.setARP( dst, dst ) |
330 |
|
331 |
def start( self ): |
332 |
"Start controller and switches"
|
333 |
info( '*** Starting controller\n' )
|
334 |
for cnode in self.controllers.values(): |
335 |
cnode.start() |
336 |
info( '*** Starting %s switches\n' % len( self.topo.switches() ) ) |
337 |
for switchDpid in self.topo.switches(): |
338 |
switch = self.nodes[ switchDpid ]
|
339 |
#info( 'switch = %s' % switch )
|
340 |
info( '0x%x ' % switchDpid )
|
341 |
switch.start( self.controllers )
|
342 |
info( '\n' )
|
343 |
|
344 |
def stop( self ): |
345 |
"Stop the controller(s), switches and hosts"
|
346 |
if self.terms: |
347 |
info( '*** Stopping %i terms\n' % len( self.terms ) ) |
348 |
self.stopXterms()
|
349 |
info( '*** Stopping %i hosts\n' % len( self.topo.hosts() ) ) |
350 |
for hostDpid in self.topo.hosts(): |
351 |
host = self.nodes[ hostDpid ]
|
352 |
info( '%s ' % host.name )
|
353 |
host.terminate() |
354 |
info( '\n' )
|
355 |
info( '*** Stopping %i switches\n' % len( self.topo.switches() ) ) |
356 |
for switchDpid in self.topo.switches(): |
357 |
switch = self.nodes[ switchDpid ]
|
358 |
info( '%s' % switch.name )
|
359 |
switch.stop() |
360 |
info( '\n' )
|
361 |
info( '*** Stopping controller\n' )
|
362 |
for cnode in self.controllers.values(): |
363 |
cnode.stop() |
364 |
info( '*** Test complete\n' )
|
365 |
|
366 |
def run( self, test, **params ): |
367 |
"Perform a complete start/test/stop cycle."
|
368 |
self.start()
|
369 |
info( '*** Running test\n' )
|
370 |
result = getattr( self, test )( **params ) |
371 |
self.stop()
|
372 |
return result
|
373 |
|
374 |
@staticmethod
|
375 |
def _parsePing( pingOutput ): |
376 |
"Parse ping output and return packets sent, received."
|
377 |
r = r'(\d+) packets transmitted, (\d+) received'
|
378 |
m = re.search( r, pingOutput ) |
379 |
if m == None: |
380 |
error( '*** Error: could not parse ping output: %s\n' %
|
381 |
pingOutput ) |
382 |
exit( 1 ) |
383 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
384 |
return sent, received
|
385 |
|
386 |
def ping( self, hosts=None ): |
387 |
"""Ping between all specified hosts.
|
388 |
hosts: list of host DPIDs
|
389 |
returns: ploss packet loss percentage"""
|
390 |
#self.start()
|
391 |
# check if running - only then, start?
|
392 |
packets = 0
|
393 |
lost = 0
|
394 |
ploss = None
|
395 |
if not hosts: |
396 |
hosts = self.topo.hosts()
|
397 |
info( '*** Ping: testing ping reachability\n' )
|
398 |
for nodeDpid in hosts: |
399 |
node = self.nodes[ nodeDpid ]
|
400 |
info( '%s -> ' % node.name )
|
401 |
for destDpid in hosts: |
402 |
dest = self.nodes[ destDpid ]
|
403 |
if node != dest:
|
404 |
result = node.cmd( 'ping -c1 ' + dest.IP() )
|
405 |
sent, received = self._parsePing( result )
|
406 |
packets += sent |
407 |
if received > sent:
|
408 |
error( '*** Error: received too many packets' )
|
409 |
error( '%s' % result )
|
410 |
node.cmdPrint( 'route' )
|
411 |
exit( 1 ) |
412 |
lost += sent - received |
413 |
info( ( '%s ' % dest.name ) if received else 'X ' ) |
414 |
info( '\n' )
|
415 |
ploss = 100 * lost / packets
|
416 |
info( "*** Results: %i%% dropped (%d/%d lost)\n" %
|
417 |
( ploss, lost, packets ) ) |
418 |
return ploss
|
419 |
|
420 |
def pingAll( self ): |
421 |
"""Ping between all hosts.
|
422 |
returns: ploss packet loss percentage"""
|
423 |
return self.ping() |
424 |
|
425 |
def pingPair( self ): |
426 |
"""Ping between first two hosts, useful for testing.
|
427 |
returns: ploss packet loss percentage"""
|
428 |
hostsSorted = sorted( self.topo.hosts() ) |
429 |
hosts = [ hostsSorted[ 0 ], hostsSorted[ 1 ] ] |
430 |
return self.ping( hosts=hosts ) |
431 |
|
432 |
@staticmethod
|
433 |
def _parseIperf( iperfOutput ): |
434 |
"""Parse iperf output and return bandwidth.
|
435 |
iperfOutput: string
|
436 |
returns: result string"""
|
437 |
r = r'([\d\.]+ \w+/sec)'
|
438 |
m = re.search( r, iperfOutput ) |
439 |
if m:
|
440 |
return m.group( 1 ) |
441 |
else:
|
442 |
raise Exception( 'could not parse iperf output' ) |
443 |
|
444 |
def iperf( self, hosts=None, l4Type='TCP', udpBw='10M', |
445 |
verbose=False ):
|
446 |
"""Run iperf between two hosts.
|
447 |
hosts: list of host DPIDs; if None, uses opposite hosts
|
448 |
l4Type: string, one of [ TCP, UDP ]
|
449 |
verbose: verbose printing
|
450 |
returns: results two-element array of server and client speeds"""
|
451 |
if not hosts: |
452 |
hostsSorted = sorted( self.topo.hosts() ) |
453 |
hosts = [ hostsSorted[ 0 ], hostsSorted[ -1 ] ] |
454 |
else:
|
455 |
assert len( hosts ) == 2 |
456 |
host0 = self.nodes[ hosts[ 0 ] ] |
457 |
host1 = self.nodes[ hosts[ 1 ] ] |
458 |
info( '*** Iperf: testing ' + l4Type + ' bandwidth between ' ) |
459 |
info( "%s and %s\n" % ( host0.name, host1.name ) )
|
460 |
host0.cmd( 'killall -9 iperf' )
|
461 |
iperfArgs = 'iperf '
|
462 |
bwArgs = ''
|
463 |
if l4Type == 'UDP': |
464 |
iperfArgs += '-u '
|
465 |
bwArgs = '-b ' + udpBw + ' ' |
466 |
elif l4Type != 'TCP': |
467 |
raise Exception( 'Unexpected l4 type: %s' % l4Type ) |
468 |
server = host0.cmd( iperfArgs + '-s &' )
|
469 |
if verbose:
|
470 |
info( '%s\n' % server )
|
471 |
client = host1.cmd( iperfArgs + '-t 5 -c ' + host0.IP() + ' ' + |
472 |
bwArgs ) |
473 |
if verbose:
|
474 |
info( '%s\n' % client )
|
475 |
server = host0.cmd( 'killall -9 iperf' )
|
476 |
if verbose:
|
477 |
info( '%s\n' % server )
|
478 |
result = [ self._parseIperf( server ), self._parseIperf( client ) ] |
479 |
if l4Type == 'UDP': |
480 |
result.insert( 0, udpBw )
|
481 |
info( '*** Results: %s\n' % result )
|
482 |
return result
|
483 |
|
484 |
def iperfUdp( self, udpBw='10M' ): |
485 |
"Run iperf UDP test."
|
486 |
return self.iperf( l4Type='UDP', udpBw=udpBw ) |
487 |
|
488 |
def interact( self ): |
489 |
"Start network and run our simple CLI."
|
490 |
self.start()
|
491 |
result = CLI( self )
|
492 |
self.stop()
|
493 |
return result
|