mininet / mininet / net.py @ 92a28881
History | View | Annotate | Download (30.5 KB)
1 |
"""
|
---|---|
2 |
|
3 |
Mininet: A simple networking testbed for OpenFlow/SDN!
|
4 |
|
5 |
author: Bob Lantz (rlantz@cs.stanford.edu)
|
6 |
author: Brandon Heller (brandonh@stanford.edu)
|
7 |
|
8 |
Mininet creates scalable OpenFlow test networks by using
|
9 |
process-based virtualization and network namespaces.
|
10 |
|
11 |
Simulated hosts are created as processes in separate network
|
12 |
namespaces. This allows a complete OpenFlow network to be simulated on
|
13 |
top of a single Linux kernel.
|
14 |
|
15 |
Each host has:
|
16 |
|
17 |
A virtual console (pipes to a shell)
|
18 |
A virtual interfaces (half of a veth pair)
|
19 |
A parent shell (and possibly some child processes) in a namespace
|
20 |
|
21 |
Hosts have a network interface which is configured via ifconfig/ip
|
22 |
link/etc.
|
23 |
|
24 |
This version supports both the kernel and user space datapaths
|
25 |
from the OpenFlow reference implementation (openflowswitch.org)
|
26 |
as well as OpenVSwitch (openvswitch.org.)
|
27 |
|
28 |
In kernel datapath mode, the controller and switches are simply
|
29 |
processes in the root namespace.
|
30 |
|
31 |
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
|
32 |
attached to the one side of a veth pair; the other side resides in the
|
33 |
host namespace. In this mode, switch processes can simply connect to the
|
34 |
controller via the loopback interface.
|
35 |
|
36 |
In user datapath mode, the controller and switches can be full-service
|
37 |
nodes that live in their own network namespaces and have management
|
38 |
interfaces and IP addresses on a control network (e.g. 192.168.123.1,
|
39 |
currently routed although it could be bridged.)
|
40 |
|
41 |
In addition to a management interface, user mode switches also have
|
42 |
several switch interfaces, halves of veth pairs whose other halves
|
43 |
reside in the host nodes that the switches are connected to.
|
44 |
|
45 |
Consistent, straightforward naming is important in order to easily
|
46 |
identify hosts, switches and controllers, both from the CLI and
|
47 |
from program code. Interfaces are named to make it easy to identify
|
48 |
which interfaces belong to which node.
|
49 |
|
50 |
The basic naming scheme is as follows:
|
51 |
|
52 |
Host nodes are named h1-hN
|
53 |
Switch nodes are named s1-sN
|
54 |
Controller nodes are named c0-cN
|
55 |
Interfaces are named {nodename}-eth0 .. {nodename}-ethN
|
56 |
|
57 |
Note: If the network topology is created using mininet.topo, then
|
58 |
node numbers are unique among hosts and switches (e.g. we have
|
59 |
h1..hN and SN..SN+M) and also correspond to their default IP addresses
|
60 |
of 10.x.y.z/8 where x.y.z is the base-256 representation of N for
|
61 |
hN. This mapping allows easy determination of a node's IP
|
62 |
address from its name, e.g. h1 -> 10.0.0.1, h257 -> 10.0.1.1.
|
63 |
|
64 |
Note also that 10.0.0.1 can often be written as 10.1 for short, e.g.
|
65 |
"ping 10.1" is equivalent to "ping 10.0.0.1".
|
66 |
|
67 |
Currently we wrap the entire network in a 'mininet' object, which
|
68 |
constructs a simulated network based on a network topology created
|
69 |
using a topology object (e.g. LinearTopo) from mininet.topo or
|
70 |
mininet.topolib, and a Controller which the switches will connect
|
71 |
to. Several configuration options are provided for functions such as
|
72 |
automatically setting MAC addresses, populating the ARP table, or
|
73 |
even running a set of terminals to allow direct interaction with nodes.
|
74 |
|
75 |
After the network is created, it can be started using start(), and a
|
76 |
variety of useful tasks maybe performed, including basic connectivity
|
77 |
and bandwidth tests and running the mininet CLI.
|
78 |
|
79 |
Once the network is up and running, test code can easily get access
|
80 |
to host and switch objects which can then be used for arbitrary
|
81 |
experiments, typically involving running a series of commands on the
|
82 |
hosts.
|
83 |
|
84 |
After all desired tests or activities have been completed, the stop()
|
85 |
method may be called to shut down the network.
|
86 |
|
87 |
"""
|
88 |
|
89 |
import os |
90 |
import re |
91 |
import select |
92 |
import signal |
93 |
from time import sleep |
94 |
from itertools import chain |
95 |
|
96 |
from mininet.cli import CLI |
97 |
from mininet.log import info, error, debug, output |
98 |
from mininet.node import Host, OVSKernelSwitch, Controller |
99 |
from mininet.link import Link, Intf |
100 |
from mininet.util import quietRun, fixLimits, numCores, ensureRoot |
101 |
from mininet.util import macColonHex, ipStr, ipParse, netParse, ipAdd |
102 |
from mininet.term import cleanUpScreens, makeTerms |
103 |
|
104 |
# Mininet version: should be consistent with README and LICENSE
|
105 |
VERSION = "2.1.0+"
|
106 |
|
107 |
class Mininet( object ): |
108 |
"Network emulation with hosts spawned in network namespaces."
|
109 |
|
110 |
def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, |
111 |
controller=Controller, link=Link, intf=Intf, |
112 |
build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', |
113 |
inNamespace=False,
|
114 |
autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, |
115 |
listenPort=None ):
|
116 |
"""Create Mininet object.
|
117 |
topo: Topo (topology) object or None
|
118 |
switch: default Switch class
|
119 |
host: default Host class/constructor
|
120 |
controller: default Controller class/constructor
|
121 |
link: default Link class/constructor
|
122 |
intf: default Intf class/constructor
|
123 |
ipBase: base IP address for hosts,
|
124 |
build: build now from topo?
|
125 |
xterms: if build now, spawn xterms?
|
126 |
cleanup: if build now, cleanup before creating?
|
127 |
inNamespace: spawn switches and controller in net namespaces?
|
128 |
autoSetMacs: set MAC addrs automatically like IP addresses?
|
129 |
autoStaticArp: set all-pairs static MAC addrs?
|
130 |
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
|
131 |
listenPort: base listening port to open; will be incremented for
|
132 |
each additional switch in the net if inNamespace=False"""
|
133 |
self.topo = topo
|
134 |
self.switch = switch
|
135 |
self.host = host
|
136 |
self.controller = controller
|
137 |
self.link = link
|
138 |
self.intf = intf
|
139 |
self.ipBase = ipBase
|
140 |
self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) |
141 |
self.nextIP = 1 # start for address allocation |
142 |
self.inNamespace = inNamespace
|
143 |
self.xterms = xterms
|
144 |
self.cleanup = cleanup
|
145 |
self.autoSetMacs = autoSetMacs
|
146 |
self.autoStaticArp = autoStaticArp
|
147 |
self.autoPinCpus = autoPinCpus
|
148 |
self.numCores = numCores()
|
149 |
self.nextCore = 0 # next core for pinning hosts to CPUs |
150 |
self.listenPort = listenPort
|
151 |
|
152 |
self.hosts = []
|
153 |
self.switches = []
|
154 |
self.controllers = []
|
155 |
|
156 |
self.nameToNode = {} # name to Node (Host/Switch) objects |
157 |
|
158 |
self.terms = [] # list of spawned xterm processes |
159 |
|
160 |
Mininet.init() # Initialize Mininet if necessary
|
161 |
|
162 |
self.built = False |
163 |
if topo and build: |
164 |
self.build()
|
165 |
|
166 |
def addHost( self, name, cls=None, **params ): |
167 |
"""Add host.
|
168 |
name: name of host to add
|
169 |
cls: custom host class/constructor (optional)
|
170 |
params: parameters for host
|
171 |
returns: added host"""
|
172 |
# Default IP and MAC addresses
|
173 |
defaults = { 'ip': ipAdd( self.nextIP, |
174 |
ipBaseNum=self.ipBaseNum,
|
175 |
prefixLen=self.prefixLen ) +
|
176 |
'/%s' % self.prefixLen } |
177 |
if self.autoSetMacs: |
178 |
defaults[ 'mac'] = macColonHex( self.nextIP ) |
179 |
if self.autoPinCpus: |
180 |
defaults[ 'cores' ] = self.nextCore |
181 |
self.nextCore = ( self.nextCore + 1 ) % self.numCores |
182 |
self.nextIP += 1 |
183 |
defaults.update( params ) |
184 |
if not cls: |
185 |
cls = self.host
|
186 |
h = cls( name, **defaults ) |
187 |
self.hosts.append( h )
|
188 |
self.nameToNode[ name ] = h
|
189 |
return h
|
190 |
|
191 |
def addSwitch( self, name, cls=None, **params ): |
192 |
"""Add switch.
|
193 |
name: name of switch to add
|
194 |
cls: custom switch class/constructor (optional)
|
195 |
returns: added switch
|
196 |
side effect: increments listenPort ivar ."""
|
197 |
defaults = { 'listenPort': self.listenPort, |
198 |
'inNamespace': self.inNamespace } |
199 |
defaults.update( params ) |
200 |
if not cls: |
201 |
cls = self.switch
|
202 |
sw = cls( name, **defaults ) |
203 |
if not self.inNamespace and self.listenPort: |
204 |
self.listenPort += 1 |
205 |
self.switches.append( sw )
|
206 |
self.nameToNode[ name ] = sw
|
207 |
return sw
|
208 |
|
209 |
def addController( self, name='c0', controller=None, **params ): |
210 |
"""Add controller.
|
211 |
controller: Controller class"""
|
212 |
# Get controller class
|
213 |
if not controller: |
214 |
controller = self.controller
|
215 |
# Construct new controller if one is not given
|
216 |
if isinstance(name, Controller): |
217 |
controller_new = name |
218 |
# Pylint thinks controller is a str()
|
219 |
# pylint: disable=E1103
|
220 |
name = controller_new.name |
221 |
# pylint: enable=E1103
|
222 |
else:
|
223 |
controller_new = controller( name, **params ) |
224 |
# Add new controller to net
|
225 |
if controller_new: # allow controller-less setups |
226 |
self.controllers.append( controller_new )
|
227 |
self.nameToNode[ name ] = controller_new
|
228 |
return controller_new
|
229 |
|
230 |
# BL: We now have four ways to look up nodes
|
231 |
# This may (should?) be cleaned up in the future.
|
232 |
def getNodeByName( self, *args ): |
233 |
"Return node(s) with given name(s)"
|
234 |
if len( args ) == 1: |
235 |
return self.nameToNode[ args[ 0 ] ] |
236 |
return [ self.nameToNode[ n ] for n in args ] |
237 |
|
238 |
def get( self, *args ): |
239 |
"Convenience alias for getNodeByName"
|
240 |
return self.getNodeByName( *args ) |
241 |
|
242 |
# Even more convenient syntax for node lookup and iteration
|
243 |
def __getitem__( self, key ): |
244 |
"""net [ name ] operator: Return node(s) with given name(s)"""
|
245 |
return self.nameToNode[ key ] |
246 |
|
247 |
def __iter__( self ): |
248 |
"return iterator over node names"
|
249 |
for node in chain( self.hosts, self.switches, self.controllers ): |
250 |
yield node.name
|
251 |
|
252 |
def __len__( self ): |
253 |
"returns number of nodes in net"
|
254 |
return ( len( self.hosts ) + len( self.switches ) + |
255 |
len( self.controllers ) ) |
256 |
|
257 |
def __contains__( self, item ): |
258 |
"returns True if net contains named node"
|
259 |
return item in self.nameToNode |
260 |
|
261 |
def keys( self ): |
262 |
"return a list of all node names or net's keys"
|
263 |
return list( self ) |
264 |
|
265 |
def values( self ): |
266 |
"return a list of all nodes or net's values"
|
267 |
return [ self[name] for name in self ] |
268 |
|
269 |
def items( self ): |
270 |
"return (key,value) tuple list for every node in net"
|
271 |
return zip( self.keys(), self.values() ) |
272 |
|
273 |
def addLink( self, node1, node2, port1=None, port2=None, |
274 |
cls=None, **params ):
|
275 |
""""Add a link from node1 to node2
|
276 |
node1: source node
|
277 |
node2: dest node
|
278 |
port1: source port
|
279 |
port2: dest port
|
280 |
returns: link object"""
|
281 |
defaults = { 'port1': port1,
|
282 |
'port2': port2,
|
283 |
'intf': self.intf } |
284 |
defaults.update( params ) |
285 |
if not cls: |
286 |
cls = self.link
|
287 |
return cls( node1, node2, **defaults )
|
288 |
|
289 |
def configHosts( self ): |
290 |
"Configure a set of hosts."
|
291 |
for host in self.hosts: |
292 |
info( host.name + ' ' )
|
293 |
intf = host.defaultIntf() |
294 |
if intf:
|
295 |
host.configDefault() |
296 |
else:
|
297 |
# Don't configure nonexistent intf
|
298 |
host.configDefault( ip=None, mac=None ) |
299 |
# You're low priority, dude!
|
300 |
# BL: do we want to do this here or not?
|
301 |
# May not make sense if we have CPU lmiting...
|
302 |
# quietRun( 'renice +18 -p ' + repr( host.pid ) )
|
303 |
# This may not be the right place to do this, but
|
304 |
# it needs to be done somewhere.
|
305 |
host.cmd( 'ifconfig lo up' )
|
306 |
info( '\n' )
|
307 |
|
308 |
def buildFromTopo( self, topo=None ): |
309 |
"""Build mininet from a topology object
|
310 |
At the end of this function, everything should be connected
|
311 |
and up."""
|
312 |
|
313 |
# Possibly we should clean up here and/or validate
|
314 |
# the topo
|
315 |
if self.cleanup: |
316 |
pass
|
317 |
|
318 |
info( '*** Creating network\n' )
|
319 |
|
320 |
if not self.controllers and self.controller: |
321 |
# Add a default controller
|
322 |
info( '*** Adding controller\n' )
|
323 |
classes = self.controller
|
324 |
if type( classes ) is not list: |
325 |
classes = [ classes ] |
326 |
for i, cls in enumerate( classes ): |
327 |
self.addController( 'c%d' % i, cls ) |
328 |
|
329 |
info( '*** Adding hosts:\n' )
|
330 |
for hostName in topo.hosts(): |
331 |
self.addHost( hostName, **topo.nodeInfo( hostName ) )
|
332 |
info( hostName + ' ' )
|
333 |
|
334 |
info( '\n*** Adding switches:\n' )
|
335 |
for switchName in topo.switches(): |
336 |
self.addSwitch( switchName, **topo.nodeInfo( switchName) )
|
337 |
info( switchName + ' ' )
|
338 |
|
339 |
info( '\n*** Adding links:\n' )
|
340 |
for srcName, dstName in topo.links(sort=True): |
341 |
src, dst = self.nameToNode[ srcName ], self.nameToNode[ dstName ] |
342 |
params = topo.linkInfo( srcName, dstName ) |
343 |
srcPort, dstPort = topo.port( srcName, dstName ) |
344 |
self.addLink( src, dst, srcPort, dstPort, **params )
|
345 |
info( '(%s, %s) ' % ( src.name, dst.name ) )
|
346 |
|
347 |
info( '\n' )
|
348 |
|
349 |
def configureControlNetwork( self ): |
350 |
"Control net config hook: override in subclass"
|
351 |
raise Exception( 'configureControlNetwork: ' |
352 |
'should be overriden in subclass', self ) |
353 |
|
354 |
def build( self ): |
355 |
"Build mininet."
|
356 |
if self.topo: |
357 |
self.buildFromTopo( self.topo ) |
358 |
if self.inNamespace: |
359 |
self.configureControlNetwork()
|
360 |
info( '*** Configuring hosts\n' )
|
361 |
self.configHosts()
|
362 |
if self.xterms: |
363 |
self.startTerms()
|
364 |
if self.autoStaticArp: |
365 |
self.staticArp()
|
366 |
self.built = True |
367 |
|
368 |
def startTerms( self ): |
369 |
"Start a terminal for each node."
|
370 |
if 'DISPLAY' not in os.environ: |
371 |
error( "Error starting terms: Cannot connect to display\n" )
|
372 |
return
|
373 |
info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) |
374 |
cleanUpScreens() |
375 |
self.terms += makeTerms( self.controllers, 'controller' ) |
376 |
self.terms += makeTerms( self.switches, 'switch' ) |
377 |
self.terms += makeTerms( self.hosts, 'host' ) |
378 |
|
379 |
def stopXterms( self ): |
380 |
"Kill each xterm."
|
381 |
for term in self.terms: |
382 |
os.kill( term.pid, signal.SIGKILL ) |
383 |
cleanUpScreens() |
384 |
|
385 |
def staticArp( self ): |
386 |
"Add all-pairs ARP entries to remove the need to handle broadcast."
|
387 |
for src in self.hosts: |
388 |
for dst in self.hosts: |
389 |
if src != dst:
|
390 |
src.setARP( ip=dst.IP(), mac=dst.MAC() ) |
391 |
|
392 |
def start( self ): |
393 |
"Start controller and switches."
|
394 |
if not self.built: |
395 |
self.build()
|
396 |
info( '*** Starting controller\n' )
|
397 |
for controller in self.controllers: |
398 |
controller.start() |
399 |
info( '*** Starting %s switches\n' % len( self.switches ) ) |
400 |
for switch in self.switches: |
401 |
info( switch.name + ' ')
|
402 |
switch.start( self.controllers )
|
403 |
info( '\n' )
|
404 |
|
405 |
def stop( self ): |
406 |
"Stop the controller(s), switches and hosts"
|
407 |
if self.terms: |
408 |
info( '*** Stopping %i terms\n' % len( self.terms ) ) |
409 |
self.stopXterms()
|
410 |
info( '*** Stopping %i switches\n' % len( self.switches ) ) |
411 |
swclass = type( self.switches[ 0 ] ) |
412 |
if False and self.switches and hasattr( swclass, 'batchShutdown' ): |
413 |
swclass.batchShutdown( self.switches )
|
414 |
for switch in self.switches: |
415 |
info( switch.name + ' ' )
|
416 |
switch.stop() |
417 |
info( '\n' )
|
418 |
info( '*** Stopping %i hosts\n' % len( self.hosts ) ) |
419 |
for host in self.hosts: |
420 |
info( host.name + ' ' )
|
421 |
host.terminate() |
422 |
info( '\n' )
|
423 |
info( '*** Stopping %i controllers\n' % len( self.controllers ) ) |
424 |
for controller in self.controllers: |
425 |
info( controller.name + ' ' )
|
426 |
controller.stop() |
427 |
info( '\n*** Done\n' )
|
428 |
|
429 |
def run( self, test, *args, **kwargs ): |
430 |
"Perform a complete start/test/stop cycle."
|
431 |
self.start()
|
432 |
info( '*** Running test\n' )
|
433 |
result = test( *args, **kwargs ) |
434 |
self.stop()
|
435 |
return result
|
436 |
|
437 |
def monitor( self, hosts=None, timeoutms=-1 ): |
438 |
"""Monitor a set of hosts (or all hosts by default),
|
439 |
and return their output, a line at a time.
|
440 |
hosts: (optional) set of hosts to monitor
|
441 |
timeoutms: (optional) timeout value in ms
|
442 |
returns: iterator which returns host, line"""
|
443 |
if hosts is None: |
444 |
hosts = self.hosts
|
445 |
poller = select.poll() |
446 |
Node = hosts[ 0 ] # so we can call class method fdToNode |
447 |
for host in hosts: |
448 |
poller.register( host.stdout ) |
449 |
while True: |
450 |
ready = poller.poll( timeoutms ) |
451 |
for fd, event in ready: |
452 |
host = Node.fdToNode( fd ) |
453 |
if event & select.POLLIN:
|
454 |
line = host.readline() |
455 |
if line is not None: |
456 |
yield host, line
|
457 |
# Return if non-blocking
|
458 |
if not ready and timeoutms >= 0: |
459 |
yield None, None |
460 |
|
461 |
# XXX These test methods should be moved out of this class.
|
462 |
# Probably we should create a tests.py for them
|
463 |
|
464 |
@staticmethod
|
465 |
def _parsePing( pingOutput ): |
466 |
"Parse ping output and return packets sent, received."
|
467 |
# Check for downed link
|
468 |
if 'connect: Network is unreachable' in pingOutput: |
469 |
return 1, 0 |
470 |
r = r'(\d+) packets transmitted, (\d+) received'
|
471 |
m = re.search( r, pingOutput ) |
472 |
if m is None: |
473 |
error( '*** Error: could not parse ping output: %s\n' %
|
474 |
pingOutput ) |
475 |
return 1, 0 |
476 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
477 |
return sent, received
|
478 |
|
479 |
def ping( self, hosts=None, timeout=None ): |
480 |
"""Ping between all specified hosts.
|
481 |
hosts: list of hosts
|
482 |
timeout: time to wait for a response, as string
|
483 |
returns: ploss packet loss percentage"""
|
484 |
# should we check if running?
|
485 |
packets = 0
|
486 |
lost = 0
|
487 |
ploss = None
|
488 |
if not hosts: |
489 |
hosts = self.hosts
|
490 |
output( '*** Ping: testing ping reachability\n' )
|
491 |
for node in hosts: |
492 |
output( '%s -> ' % node.name )
|
493 |
for dest in hosts: |
494 |
if node != dest:
|
495 |
opts = ''
|
496 |
if timeout:
|
497 |
opts = '-W %s' % timeout
|
498 |
result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) )
|
499 |
sent, received = self._parsePing( result )
|
500 |
packets += sent |
501 |
if received > sent:
|
502 |
error( '*** Error: received too many packets' )
|
503 |
error( '%s' % result )
|
504 |
node.cmdPrint( 'route' )
|
505 |
exit( 1 ) |
506 |
lost += sent - received |
507 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
508 |
output( '\n' )
|
509 |
if packets > 0: |
510 |
ploss = 100.0 * lost / packets
|
511 |
received = packets - lost |
512 |
output( "*** Results: %i%% dropped (%d/%d received)\n" %
|
513 |
( ploss, received, packets ) ) |
514 |
else:
|
515 |
ploss = 0
|
516 |
output( "*** Warning: No packets sent\n" )
|
517 |
return ploss
|
518 |
|
519 |
@staticmethod
|
520 |
def _parsePingFull( pingOutput ): |
521 |
"Parse ping output and return all data."
|
522 |
errorTuple = (1, 0, 0, 0, 0, 0) |
523 |
# Check for downed link
|
524 |
r = r'[uU]nreachable'
|
525 |
m = re.search( r, pingOutput ) |
526 |
if m is not None: |
527 |
return errorTuple
|
528 |
r = r'(\d+) packets transmitted, (\d+) received'
|
529 |
m = re.search( r, pingOutput ) |
530 |
if m is None: |
531 |
error( '*** Error: could not parse ping output: %s\n' %
|
532 |
pingOutput ) |
533 |
return errorTuple
|
534 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
535 |
r = r'rtt min/avg/max/mdev = '
|
536 |
r += r'(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+) ms'
|
537 |
m = re.search( r, pingOutput ) |
538 |
if m is None: |
539 |
error( '*** Error: could not parse ping output: %s\n' %
|
540 |
pingOutput ) |
541 |
return errorTuple
|
542 |
rttmin = float( m.group( 1 ) ) |
543 |
rttavg = float( m.group( 2 ) ) |
544 |
rttmax = float( m.group( 3 ) ) |
545 |
rttdev = float( m.group( 4 ) ) |
546 |
return sent, received, rttmin, rttavg, rttmax, rttdev
|
547 |
|
548 |
def pingFull( self, hosts=None, timeout=None ): |
549 |
"""Ping between all specified hosts and return all data.
|
550 |
hosts: list of hosts
|
551 |
timeout: time to wait for a response, as string
|
552 |
returns: all ping data; see function body."""
|
553 |
# should we check if running?
|
554 |
# Each value is a tuple: (src, dsd, [all ping outputs])
|
555 |
all_outputs = [] |
556 |
if not hosts: |
557 |
hosts = self.hosts
|
558 |
output( '*** Ping: testing ping reachability\n' )
|
559 |
for node in hosts: |
560 |
output( '%s -> ' % node.name )
|
561 |
for dest in hosts: |
562 |
if node != dest:
|
563 |
opts = ''
|
564 |
if timeout:
|
565 |
opts = '-W %s' % timeout
|
566 |
result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) )
|
567 |
outputs = self._parsePingFull( result )
|
568 |
sent, received, rttmin, rttavg, rttmax, rttdev = outputs |
569 |
all_outputs.append( (node, dest, outputs) ) |
570 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
571 |
output( '\n' )
|
572 |
output( "*** Results: \n" )
|
573 |
for outputs in all_outputs: |
574 |
src, dest, ping_outputs = outputs |
575 |
sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs |
576 |
output( " %s->%s: %s/%s, " % (src, dest, sent, received ) )
|
577 |
output( "rtt min/avg/max/mdev %0.3f/%0.3f/%0.3f/%0.3f ms\n" %
|
578 |
(rttmin, rttavg, rttmax, rttdev) ) |
579 |
return all_outputs
|
580 |
|
581 |
def pingAll( self ): |
582 |
"""Ping between all hosts.
|
583 |
returns: ploss packet loss percentage"""
|
584 |
return self.ping() |
585 |
|
586 |
def pingPair( self ): |
587 |
"""Ping between first two hosts, useful for testing.
|
588 |
returns: ploss packet loss percentage"""
|
589 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
590 |
return self.ping( hosts=hosts ) |
591 |
|
592 |
def pingAllFull( self ): |
593 |
"""Ping between all hosts.
|
594 |
returns: ploss packet loss percentage"""
|
595 |
return self.pingFull() |
596 |
|
597 |
def pingPairFull( self ): |
598 |
"""Ping between first two hosts, useful for testing.
|
599 |
returns: ploss packet loss percentage"""
|
600 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
601 |
return self.pingFull( hosts=hosts ) |
602 |
|
603 |
@staticmethod
|
604 |
def _parseIperf( iperfOutput ): |
605 |
"""Parse iperf output and return bandwidth.
|
606 |
iperfOutput: string
|
607 |
returns: result string"""
|
608 |
r = r'([\d\.]+ \w+/sec)'
|
609 |
m = re.findall( r, iperfOutput ) |
610 |
if m:
|
611 |
return m[-1] |
612 |
else:
|
613 |
# was: raise Exception(...)
|
614 |
error( 'could not parse iperf output: ' + iperfOutput )
|
615 |
return '' |
616 |
|
617 |
# XXX This should be cleaned up
|
618 |
|
619 |
def iperf( self, hosts=None, l4Type='TCP', udpBw='10M' ): |
620 |
"""Run iperf between two hosts.
|
621 |
hosts: list of hosts; if None, uses opposite hosts
|
622 |
l4Type: string, one of [ TCP, UDP ]
|
623 |
returns: results two-element array of server and client speeds"""
|
624 |
if not quietRun( 'which telnet' ): |
625 |
error( 'Cannot find telnet in $PATH - required for iperf test' )
|
626 |
return
|
627 |
if not hosts: |
628 |
hosts = [ self.hosts[ 0 ], self.hosts[ -1 ] ] |
629 |
else:
|
630 |
assert len( hosts ) == 2 |
631 |
client, server = hosts |
632 |
output( '*** Iperf: testing ' + l4Type + ' bandwidth between ' ) |
633 |
output( "%s and %s\n" % ( client.name, server.name ) )
|
634 |
server.cmd( 'killall -9 iperf' )
|
635 |
iperfArgs = 'iperf '
|
636 |
bwArgs = ''
|
637 |
if l4Type == 'UDP': |
638 |
iperfArgs += '-u '
|
639 |
bwArgs = '-b ' + udpBw + ' ' |
640 |
elif l4Type != 'TCP': |
641 |
raise Exception( 'Unexpected l4 type: %s' % l4Type ) |
642 |
server.sendCmd( iperfArgs + '-s', printPid=True ) |
643 |
servout = ''
|
644 |
while server.lastPid is None: |
645 |
servout += server.monitor() |
646 |
if l4Type == 'TCP': |
647 |
while 'Connected' not in client.cmd( |
648 |
'sh -c "echo A | telnet -e A %s 5001"' % server.IP()):
|
649 |
output('waiting for iperf to start up...')
|
650 |
sleep(.5)
|
651 |
cliout = client.cmd( iperfArgs + '-t 5 -c ' + server.IP() + ' ' + |
652 |
bwArgs ) |
653 |
debug( 'Client output: %s\n' % cliout )
|
654 |
server.sendInt() |
655 |
servout += server.waitOutput() |
656 |
debug( 'Server output: %s\n' % servout )
|
657 |
result = [ self._parseIperf( servout ), self._parseIperf( cliout ) ] |
658 |
if l4Type == 'UDP': |
659 |
result.insert( 0, udpBw )
|
660 |
output( '*** Results: %s\n' % result )
|
661 |
return result
|
662 |
|
663 |
def runCpuLimitTest( self, cpu, duration=5 ): |
664 |
"""run CPU limit test with 'while true' processes.
|
665 |
cpu: desired CPU fraction of each host
|
666 |
duration: test duration in seconds
|
667 |
returns a single list of measured CPU fractions as floats.
|
668 |
"""
|
669 |
pct = cpu * 100
|
670 |
info('*** Testing CPU %.0f%% bandwidth limit\n' % pct)
|
671 |
hosts = self.hosts
|
672 |
for h in hosts: |
673 |
h.cmd( 'while true; do a=1; done &' )
|
674 |
pids = [h.cmd( 'echo $!' ).strip() for h in hosts] |
675 |
pids_str = ",".join(["%s" % pid for pid in pids]) |
676 |
cmd = 'ps -p %s -o pid,%%cpu,args' % pids_str
|
677 |
# It's a shame that this is what pylint prefers
|
678 |
outputs = [] |
679 |
for _ in range( duration ): |
680 |
sleep( 1 )
|
681 |
outputs.append( quietRun( cmd ).strip() ) |
682 |
for h in hosts: |
683 |
h.cmd( 'kill %1' )
|
684 |
cpu_fractions = [] |
685 |
for test_output in outputs: |
686 |
# Split by line. Ignore first line, which looks like this:
|
687 |
# PID %CPU COMMAND\n
|
688 |
for line in test_output.split('\n')[1:]: |
689 |
r = r'\d+\s*(\d+\.\d+)'
|
690 |
m = re.search( r, line ) |
691 |
if m is None: |
692 |
error( '*** Error: could not extract CPU fraction: %s\n' %
|
693 |
line ) |
694 |
return None |
695 |
cpu_fractions.append( float( m.group( 1 ) ) ) |
696 |
output( '*** Results: %s\n' % cpu_fractions )
|
697 |
return cpu_fractions
|
698 |
|
699 |
# BL: I think this can be rewritten now that we have
|
700 |
# a real link class.
|
701 |
def configLinkStatus( self, src, dst, status ): |
702 |
"""Change status of src <-> dst links.
|
703 |
src: node name
|
704 |
dst: node name
|
705 |
status: string {up, down}"""
|
706 |
if src not in self.nameToNode: |
707 |
error( 'src not in network: %s\n' % src )
|
708 |
elif dst not in self.nameToNode: |
709 |
error( 'dst not in network: %s\n' % dst )
|
710 |
else:
|
711 |
if type( src ) is str: |
712 |
src = self.nameToNode[ src ]
|
713 |
if type( dst ) is str: |
714 |
dst = self.nameToNode[ dst ]
|
715 |
connections = src.connectionsTo( dst ) |
716 |
if len( connections ) == 0: |
717 |
error( 'src and dst not connected: %s %s\n' % ( src, dst) )
|
718 |
for srcIntf, dstIntf in connections: |
719 |
result = srcIntf.ifconfig( status ) |
720 |
if result:
|
721 |
error( 'link src status change failed: %s\n' % result )
|
722 |
result = dstIntf.ifconfig( status ) |
723 |
if result:
|
724 |
error( 'link dst status change failed: %s\n' % result )
|
725 |
|
726 |
def interact( self ): |
727 |
"Start network and run our simple CLI."
|
728 |
self.start()
|
729 |
result = CLI( self )
|
730 |
self.stop()
|
731 |
return result
|
732 |
|
733 |
inited = False
|
734 |
|
735 |
@classmethod
|
736 |
def init( cls ): |
737 |
"Initialize Mininet"
|
738 |
if cls.inited:
|
739 |
return
|
740 |
ensureRoot() |
741 |
fixLimits() |
742 |
cls.inited = True
|
743 |
|
744 |
|
745 |
class MininetWithControlNet( Mininet ): |
746 |
|
747 |
"""Control network support:
|
748 |
|
749 |
Create an explicit control network. Currently this is only
|
750 |
used/usable with the user datapath.
|
751 |
|
752 |
Notes:
|
753 |
|
754 |
1. If the controller and switches are in the same (e.g. root)
|
755 |
namespace, they can just use the loopback connection.
|
756 |
|
757 |
2. If we can get unix domain sockets to work, we can use them
|
758 |
instead of an explicit control network.
|
759 |
|
760 |
3. Instead of routing, we could bridge or use 'in-band' control.
|
761 |
|
762 |
4. Even if we dispense with this in general, it could still be
|
763 |
useful for people who wish to simulate a separate control
|
764 |
network (since real networks may need one!)
|
765 |
|
766 |
5. Basically nobody ever used this code, so it has been moved
|
767 |
into its own class.
|
768 |
|
769 |
6. Ultimately we may wish to extend this to allow us to create a
|
770 |
control network which every node's control interface is
|
771 |
attached to."""
|
772 |
|
773 |
def configureControlNetwork( self ): |
774 |
"Configure control network."
|
775 |
self.configureRoutedControlNetwork()
|
776 |
|
777 |
# We still need to figure out the right way to pass
|
778 |
# in the control network location.
|
779 |
|
780 |
def configureRoutedControlNetwork( self, ip='192.168.123.1', |
781 |
prefixLen=16 ):
|
782 |
"""Configure a routed control network on controller and switches.
|
783 |
For use with the user datapath only right now."""
|
784 |
controller = self.controllers[ 0 ] |
785 |
info( controller.name + ' <->' )
|
786 |
cip = ip |
787 |
snum = ipParse( ip ) |
788 |
for switch in self.switches: |
789 |
info( ' ' + switch.name )
|
790 |
link = self.link( switch, controller, port1=0 ) |
791 |
sintf, cintf = link.intf1, link.intf2 |
792 |
switch.controlIntf = sintf |
793 |
snum += 1
|
794 |
while snum & 0xff in [ 0, 255 ]: |
795 |
snum += 1
|
796 |
sip = ipStr( snum ) |
797 |
cintf.setIP( cip, prefixLen ) |
798 |
sintf.setIP( sip, prefixLen ) |
799 |
controller.setHostRoute( sip, cintf ) |
800 |
switch.setHostRoute( cip, sintf ) |
801 |
info( '\n' )
|
802 |
info( '*** Testing control network\n' )
|
803 |
while not cintf.isUp(): |
804 |
info( '*** Waiting for', cintf, 'to come up\n' ) |
805 |
sleep( 1 )
|
806 |
for switch in self.switches: |
807 |
while not sintf.isUp(): |
808 |
info( '*** Waiting for', sintf, 'to come up\n' ) |
809 |
sleep( 1 )
|
810 |
if self.ping( hosts=[ switch, controller ] ) != 0: |
811 |
error( '*** Error: control network test failed\n' )
|
812 |
exit( 1 ) |
813 |
info( '\n' )
|