mininet / mininet / net.py @ 21366afc
History | View | Annotate | Download (28.4 KB)
1 |
"""
|
---|---|
2 |
|
3 |
Mininet: A simple networking testbed for OpenFlow/SDN!
|
4 |
|
5 |
author: Bob Lantz (rlantz@cs.stanford.edu)
|
6 |
author: Brandon Heller (brandonh@stanford.edu)
|
7 |
|
8 |
Mininet creates scalable OpenFlow test networks by using
|
9 |
process-based virtualization and network namespaces.
|
10 |
|
11 |
Simulated hosts are created as processes in separate network
|
12 |
namespaces. This allows a complete OpenFlow network to be simulated on
|
13 |
top of a single Linux kernel.
|
14 |
|
15 |
Each host has:
|
16 |
|
17 |
A virtual console (pipes to a shell)
|
18 |
A virtual interfaces (half of a veth pair)
|
19 |
A parent shell (and possibly some child processes) in a namespace
|
20 |
|
21 |
Hosts have a network interface which is configured via ifconfig/ip
|
22 |
link/etc.
|
23 |
|
24 |
This version supports both the kernel and user space datapaths
|
25 |
from the OpenFlow reference implementation (openflowswitch.org)
|
26 |
as well as OpenVSwitch (openvswitch.org.)
|
27 |
|
28 |
In kernel datapath mode, the controller and switches are simply
|
29 |
processes in the root namespace.
|
30 |
|
31 |
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
|
32 |
attached to the one side of a veth pair; the other side resides in the
|
33 |
host namespace. In this mode, switch processes can simply connect to the
|
34 |
controller via the loopback interface.
|
35 |
|
36 |
In user datapath mode, the controller and switches can be full-service
|
37 |
nodes that live in their own network namespaces and have management
|
38 |
interfaces and IP addresses on a control network (e.g. 192.168.123.1,
|
39 |
currently routed although it could be bridged.)
|
40 |
|
41 |
In addition to a management interface, user mode switches also have
|
42 |
several switch interfaces, halves of veth pairs whose other halves
|
43 |
reside in the host nodes that the switches are connected to.
|
44 |
|
45 |
Consistent, straightforward naming is important in order to easily
|
46 |
identify hosts, switches and controllers, both from the CLI and
|
47 |
from program code. Interfaces are named to make it easy to identify
|
48 |
which interfaces belong to which node.
|
49 |
|
50 |
The basic naming scheme is as follows:
|
51 |
|
52 |
Host nodes are named h1-hN
|
53 |
Switch nodes are named s1-sN
|
54 |
Controller nodes are named c0-cN
|
55 |
Interfaces are named {nodename}-eth0 .. {nodename}-ethN
|
56 |
|
57 |
Note: If the network topology is created using mininet.topo, then
|
58 |
node numbers are unique among hosts and switches (e.g. we have
|
59 |
h1..hN and SN..SN+M) and also correspond to their default IP addresses
|
60 |
of 10.x.y.z/8 where x.y.z is the base-256 representation of N for
|
61 |
hN. This mapping allows easy determination of a node's IP
|
62 |
address from its name, e.g. h1 -> 10.0.0.1, h257 -> 10.0.1.1.
|
63 |
|
64 |
Note also that 10.0.0.1 can often be written as 10.1 for short, e.g.
|
65 |
"ping 10.1" is equivalent to "ping 10.0.0.1".
|
66 |
|
67 |
Currently we wrap the entire network in a 'mininet' object, which
|
68 |
constructs a simulated network based on a network topology created
|
69 |
using a topology object (e.g. LinearTopo) from mininet.topo or
|
70 |
mininet.topolib, and a Controller which the switches will connect
|
71 |
to. Several configuration options are provided for functions such as
|
72 |
automatically setting MAC addresses, populating the ARP table, or
|
73 |
even running a set of terminals to allow direct interaction with nodes.
|
74 |
|
75 |
After the network is created, it can be started using start(), and a
|
76 |
variety of useful tasks maybe performed, including basic connectivity
|
77 |
and bandwidth tests and running the mininet CLI.
|
78 |
|
79 |
Once the network is up and running, test code can easily get access
|
80 |
to host and switch objects which can then be used for arbitrary
|
81 |
experiments, typically involving running a series of commands on the
|
82 |
hosts.
|
83 |
|
84 |
After all desired tests or activities have been completed, the stop()
|
85 |
method may be called to shut down the network.
|
86 |
|
87 |
"""
|
88 |
|
89 |
import os |
90 |
import re |
91 |
import select |
92 |
import signal |
93 |
from time import sleep |
94 |
|
95 |
from mininet.cli import CLI |
96 |
from mininet.log import info, error, debug, output |
97 |
from mininet.node import Host, OVSKernelSwitch, Controller |
98 |
from mininet.link import Link, Intf |
99 |
from mininet.util import quietRun, fixLimits, numCores, ensureRoot |
100 |
from mininet.util import macColonHex, ipStr, ipParse, netParse, ipAdd |
101 |
from mininet.term import cleanUpScreens, makeTerms |
102 |
|
103 |
# Mininet version: should be consistent with README and LICENSE
|
104 |
VERSION = "2.0.0"
|
105 |
|
106 |
class Mininet( object ): |
107 |
"Network emulation with hosts spawned in network namespaces."
|
108 |
|
109 |
def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, |
110 |
controller=Controller, link=Link, intf=Intf, |
111 |
build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', |
112 |
inNamespace=False,
|
113 |
autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, |
114 |
listenPort=None ):
|
115 |
"""Create Mininet object.
|
116 |
topo: Topo (topology) object or None
|
117 |
switch: default Switch class
|
118 |
host: default Host class/constructor
|
119 |
controller: default Controller class/constructor
|
120 |
link: default Link class/constructor
|
121 |
intf: default Intf class/constructor
|
122 |
ipBase: base IP address for hosts,
|
123 |
build: build now from topo?
|
124 |
xterms: if build now, spawn xterms?
|
125 |
cleanup: if build now, cleanup before creating?
|
126 |
inNamespace: spawn switches and controller in net namespaces?
|
127 |
autoSetMacs: set MAC addrs automatically like IP addresses?
|
128 |
autoStaticArp: set all-pairs static MAC addrs?
|
129 |
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
|
130 |
listenPort: base listening port to open; will be incremented for
|
131 |
each additional switch in the net if inNamespace=False"""
|
132 |
self.topo = topo
|
133 |
self.switch = switch
|
134 |
self.host = host
|
135 |
self.controller = controller
|
136 |
self.link = link
|
137 |
self.intf = intf
|
138 |
self.ipBase = ipBase
|
139 |
self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) |
140 |
self.nextIP = 1 # start for address allocation |
141 |
self.inNamespace = inNamespace
|
142 |
self.xterms = xterms
|
143 |
self.cleanup = cleanup
|
144 |
self.autoSetMacs = autoSetMacs
|
145 |
self.autoStaticArp = autoStaticArp
|
146 |
self.autoPinCpus = autoPinCpus
|
147 |
self.numCores = numCores()
|
148 |
self.nextCore = 0 # next core for pinning hosts to CPUs |
149 |
self.listenPort = listenPort
|
150 |
|
151 |
self.hosts = []
|
152 |
self.switches = []
|
153 |
self.controllers = []
|
154 |
|
155 |
self.nameToNode = {} # name to Node (Host/Switch) objects |
156 |
|
157 |
self.terms = [] # list of spawned xterm processes |
158 |
|
159 |
Mininet.init() # Initialize Mininet if necessary
|
160 |
|
161 |
self.built = False |
162 |
if topo and build: |
163 |
self.build()
|
164 |
|
165 |
def addHost( self, name, cls=None, **params ): |
166 |
"""Add host.
|
167 |
name: name of host to add
|
168 |
cls: custom host class/constructor (optional)
|
169 |
params: parameters for host
|
170 |
returns: added host"""
|
171 |
# Default IP and MAC addresses
|
172 |
defaults = { 'ip': ipAdd( self.nextIP, |
173 |
ipBaseNum=self.ipBaseNum,
|
174 |
prefixLen=self.prefixLen ) +
|
175 |
'/%s' % self.prefixLen } |
176 |
if self.autoSetMacs: |
177 |
defaults[ 'mac'] = macColonHex( self.nextIP ) |
178 |
if self.autoPinCpus: |
179 |
defaults[ 'cores' ] = self.nextCore |
180 |
self.nextCore = ( self.nextCore + 1 ) % self.numCores |
181 |
self.nextIP += 1 |
182 |
defaults.update( params ) |
183 |
if not cls: |
184 |
cls = self.host
|
185 |
h = cls( name, **defaults ) |
186 |
self.hosts.append( h )
|
187 |
self.nameToNode[ name ] = h
|
188 |
return h
|
189 |
|
190 |
def addSwitch( self, name, cls=None, **params ): |
191 |
"""Add switch.
|
192 |
name: name of switch to add
|
193 |
cls: custom switch class/constructor (optional)
|
194 |
returns: added switch
|
195 |
side effect: increments listenPort ivar ."""
|
196 |
defaults = { 'listenPort': self.listenPort, |
197 |
'inNamespace': self.inNamespace } |
198 |
defaults.update( params ) |
199 |
if not cls: |
200 |
cls = self.switch
|
201 |
sw = cls( name, **defaults ) |
202 |
if not self.inNamespace and self.listenPort: |
203 |
self.listenPort += 1 |
204 |
self.switches.append( sw )
|
205 |
self.nameToNode[ name ] = sw
|
206 |
return sw
|
207 |
|
208 |
def addController( self, name='c0', controller=None, **params ): |
209 |
"""Add controller.
|
210 |
controller: Controller class"""
|
211 |
if not controller: |
212 |
controller = self.controller
|
213 |
controller_new = controller( name, **params ) |
214 |
if controller_new: # allow controller-less setups |
215 |
self.controllers.append( controller_new )
|
216 |
self.nameToNode[ name ] = controller_new
|
217 |
return controller_new
|
218 |
|
219 |
# BL: is this better than just using nameToNode[] ?
|
220 |
# Should it have a better name?
|
221 |
def getNodeByName( self, *args ): |
222 |
"Return node(s) with given name(s)"
|
223 |
if len( args ) == 1: |
224 |
return self.nameToNode[ args[ 0 ] ] |
225 |
return [ self.nameToNode[ n ] for n in args ] |
226 |
|
227 |
def get( self, *args ): |
228 |
"Convenience alias for getNodeByName"
|
229 |
return self.getNodeByName( *args ) |
230 |
|
231 |
def addLink( self, node1, node2, port1=None, port2=None, |
232 |
cls=None, **params ):
|
233 |
""""Add a link from node1 to node2
|
234 |
node1: source node
|
235 |
node2: dest node
|
236 |
port1: source port
|
237 |
port2: dest port
|
238 |
returns: link object"""
|
239 |
defaults = { 'port1': port1,
|
240 |
'port2': port2,
|
241 |
'intf': self.intf } |
242 |
defaults.update( params ) |
243 |
if not cls: |
244 |
cls = self.link
|
245 |
return cls( node1, node2, **defaults )
|
246 |
|
247 |
def configHosts( self ): |
248 |
"Configure a set of hosts."
|
249 |
for host in self.hosts: |
250 |
info( host.name + ' ' )
|
251 |
intf = host.defaultIntf() |
252 |
if intf:
|
253 |
host.configDefault( defaultRoute=intf ) |
254 |
else:
|
255 |
# Don't configure nonexistent intf
|
256 |
host.configDefault( ip=None, mac=None ) |
257 |
# You're low priority, dude!
|
258 |
# BL: do we want to do this here or not?
|
259 |
# May not make sense if we have CPU lmiting...
|
260 |
# quietRun( 'renice +18 -p ' + repr( host.pid ) )
|
261 |
# This may not be the right place to do this, but
|
262 |
# it needs to be done somewhere.
|
263 |
host.cmd( 'ifconfig lo up' )
|
264 |
info( '\n' )
|
265 |
|
266 |
def buildFromTopo( self, topo=None ): |
267 |
"""Build mininet from a topology object
|
268 |
At the end of this function, everything should be connected
|
269 |
and up."""
|
270 |
|
271 |
# Possibly we should clean up here and/or validate
|
272 |
# the topo
|
273 |
if self.cleanup: |
274 |
pass
|
275 |
|
276 |
info( '*** Creating network\n' )
|
277 |
|
278 |
if not self.controllers: |
279 |
# Add a default controller
|
280 |
info( '*** Adding controller\n' )
|
281 |
classes = self.controller
|
282 |
if type( classes ) is not list: |
283 |
classes = [ classes ] |
284 |
for i, cls in enumerate( classes ): |
285 |
self.addController( 'c%d' % i, cls ) |
286 |
|
287 |
info( '*** Adding hosts:\n' )
|
288 |
for hostName in topo.hosts(): |
289 |
self.addHost( hostName, **topo.nodeInfo( hostName ) )
|
290 |
info( hostName + ' ' )
|
291 |
|
292 |
info( '\n*** Adding switches:\n' )
|
293 |
for switchName in topo.switches(): |
294 |
self.addSwitch( switchName, **topo.nodeInfo( switchName) )
|
295 |
info( switchName + ' ' )
|
296 |
|
297 |
info( '\n*** Adding links:\n' )
|
298 |
for srcName, dstName in topo.links(sort=True): |
299 |
src, dst = self.nameToNode[ srcName ], self.nameToNode[ dstName ] |
300 |
params = topo.linkInfo( srcName, dstName ) |
301 |
srcPort, dstPort = topo.port( srcName, dstName ) |
302 |
self.addLink( src, dst, srcPort, dstPort, **params )
|
303 |
info( '(%s, %s) ' % ( src.name, dst.name ) )
|
304 |
|
305 |
info( '\n' )
|
306 |
|
307 |
def configureControlNetwork( self ): |
308 |
"Control net config hook: override in subclass"
|
309 |
raise Exception( 'configureControlNetwork: ' |
310 |
'should be overriden in subclass', self ) |
311 |
|
312 |
def build( self ): |
313 |
"Build mininet."
|
314 |
if self.topo: |
315 |
self.buildFromTopo( self.topo ) |
316 |
if ( self.inNamespace ): |
317 |
self.configureControlNetwork()
|
318 |
info( '*** Configuring hosts\n' )
|
319 |
self.configHosts()
|
320 |
if self.xterms: |
321 |
self.startTerms()
|
322 |
if self.autoStaticArp: |
323 |
self.staticArp()
|
324 |
self.built = True |
325 |
|
326 |
def startTerms( self ): |
327 |
"Start a terminal for each node."
|
328 |
info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) |
329 |
cleanUpScreens() |
330 |
self.terms += makeTerms( self.controllers, 'controller' ) |
331 |
self.terms += makeTerms( self.switches, 'switch' ) |
332 |
self.terms += makeTerms( self.hosts, 'host' ) |
333 |
|
334 |
def stopXterms( self ): |
335 |
"Kill each xterm."
|
336 |
for term in self.terms: |
337 |
os.kill( term.pid, signal.SIGKILL ) |
338 |
cleanUpScreens() |
339 |
|
340 |
def staticArp( self ): |
341 |
"Add all-pairs ARP entries to remove the need to handle broadcast."
|
342 |
for src in self.hosts: |
343 |
for dst in self.hosts: |
344 |
if src != dst:
|
345 |
src.setARP( ip=dst.IP(), mac=dst.MAC() ) |
346 |
|
347 |
def start( self ): |
348 |
"Start controller and switches."
|
349 |
if not self.built: |
350 |
self.build()
|
351 |
info( '*** Starting controller\n' )
|
352 |
for controller in self.controllers: |
353 |
controller.start() |
354 |
info( '*** Starting %s switches\n' % len( self.switches ) ) |
355 |
for switch in self.switches: |
356 |
info( switch.name + ' ')
|
357 |
switch.start( self.controllers )
|
358 |
info( '\n' )
|
359 |
|
360 |
def stop( self ): |
361 |
"Stop the controller(s), switches and hosts"
|
362 |
if self.terms: |
363 |
info( '*** Stopping %i terms\n' % len( self.terms ) ) |
364 |
self.stopXterms()
|
365 |
info( '*** Stopping %i hosts\n' % len( self.hosts ) ) |
366 |
for host in self.hosts: |
367 |
info( host.name + ' ' )
|
368 |
host.terminate() |
369 |
info( '\n' )
|
370 |
info( '*** Stopping %i switches\n' % len( self.switches ) ) |
371 |
for switch in self.switches: |
372 |
info( switch.name + ' ' )
|
373 |
switch.stop() |
374 |
info( '\n' )
|
375 |
info( '*** Stopping %i controllers\n' % len( self.controllers ) ) |
376 |
for controller in self.controllers: |
377 |
info( controller.name + ' ' )
|
378 |
controller.stop() |
379 |
info( '\n*** Done\n' )
|
380 |
|
381 |
def run( self, test, *args, **kwargs ): |
382 |
"Perform a complete start/test/stop cycle."
|
383 |
self.start()
|
384 |
info( '*** Running test\n' )
|
385 |
result = test( *args, **kwargs ) |
386 |
self.stop()
|
387 |
return result
|
388 |
|
389 |
def monitor( self, hosts=None, timeoutms=-1 ): |
390 |
"""Monitor a set of hosts (or all hosts by default),
|
391 |
and return their output, a line at a time.
|
392 |
hosts: (optional) set of hosts to monitor
|
393 |
timeoutms: (optional) timeout value in ms
|
394 |
returns: iterator which returns host, line"""
|
395 |
if hosts is None: |
396 |
hosts = self.hosts
|
397 |
poller = select.poll() |
398 |
Node = hosts[ 0 ] # so we can call class method fdToNode |
399 |
for host in hosts: |
400 |
poller.register( host.stdout ) |
401 |
while True: |
402 |
ready = poller.poll( timeoutms ) |
403 |
for fd, event in ready: |
404 |
host = Node.fdToNode( fd ) |
405 |
if event & select.POLLIN:
|
406 |
line = host.readline() |
407 |
if line is not None: |
408 |
yield host, line
|
409 |
# Return if non-blocking
|
410 |
if not ready and timeoutms >= 0: |
411 |
yield None, None |
412 |
|
413 |
# XXX These test methods should be moved out of this class.
|
414 |
# Probably we should create a tests.py for them
|
415 |
|
416 |
@staticmethod
|
417 |
def _parsePing( pingOutput ): |
418 |
"Parse ping output and return packets sent, received."
|
419 |
# Check for downed link
|
420 |
if 'connect: Network is unreachable' in pingOutput: |
421 |
return (1, 0) |
422 |
r = r'(\d+) packets transmitted, (\d+) received'
|
423 |
m = re.search( r, pingOutput ) |
424 |
if m is None: |
425 |
error( '*** Error: could not parse ping output: %s\n' %
|
426 |
pingOutput ) |
427 |
return (1, 0) |
428 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
429 |
return sent, received
|
430 |
|
431 |
def ping( self, hosts=None, timeout=None ): |
432 |
"""Ping between all specified hosts.
|
433 |
hosts: list of hosts
|
434 |
timeout: time to wait for a response, as string
|
435 |
returns: ploss packet loss percentage"""
|
436 |
# should we check if running?
|
437 |
packets = 0
|
438 |
lost = 0
|
439 |
ploss = None
|
440 |
if not hosts: |
441 |
hosts = self.hosts
|
442 |
output( '*** Ping: testing ping reachability\n' )
|
443 |
for node in hosts: |
444 |
output( '%s -> ' % node.name )
|
445 |
for dest in hosts: |
446 |
if node != dest:
|
447 |
opts = ''
|
448 |
if timeout:
|
449 |
opts = '-W %s' % timeout
|
450 |
result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) )
|
451 |
sent, received = self._parsePing( result )
|
452 |
packets += sent |
453 |
if received > sent:
|
454 |
error( '*** Error: received too many packets' )
|
455 |
error( '%s' % result )
|
456 |
node.cmdPrint( 'route' )
|
457 |
exit( 1 ) |
458 |
lost += sent - received |
459 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
460 |
output( '\n' )
|
461 |
ploss = 100 * lost / packets
|
462 |
output( "*** Results: %i%% dropped (%d/%d lost)\n" %
|
463 |
( ploss, lost, packets ) ) |
464 |
return ploss
|
465 |
|
466 |
@staticmethod
|
467 |
def _parsePingFull( pingOutput ): |
468 |
"Parse ping output and return all data."
|
469 |
# Check for downed link
|
470 |
if 'connect: Network is unreachable' in pingOutput: |
471 |
return (1, 0) |
472 |
r = r'(\d+) packets transmitted, (\d+) received'
|
473 |
m = re.search( r, pingOutput ) |
474 |
if m is None: |
475 |
error( '*** Error: could not parse ping output: %s\n' %
|
476 |
pingOutput ) |
477 |
return (1, 0, 0, 0, 0, 0) |
478 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
479 |
r = r'rtt min/avg/max/mdev = '
|
480 |
r += r'(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+) ms'
|
481 |
m = re.search( r, pingOutput ) |
482 |
rttmin = float( m.group( 1 ) ) |
483 |
rttavg = float( m.group( 2 ) ) |
484 |
rttmax = float( m.group( 3 ) ) |
485 |
rttdev = float( m.group( 4 ) ) |
486 |
return sent, received, rttmin, rttavg, rttmax, rttdev
|
487 |
|
488 |
def pingFull( self, hosts=None, timeout=None ): |
489 |
"""Ping between all specified hosts and return all data.
|
490 |
hosts: list of hosts
|
491 |
timeout: time to wait for a response, as string
|
492 |
returns: all ping data; see function body."""
|
493 |
# should we check if running?
|
494 |
# Each value is a tuple: (src, dsd, [all ping outputs])
|
495 |
all_outputs = [] |
496 |
if not hosts: |
497 |
hosts = self.hosts
|
498 |
output( '*** Ping: testing ping reachability\n' )
|
499 |
for node in hosts: |
500 |
output( '%s -> ' % node.name )
|
501 |
for dest in hosts: |
502 |
if node != dest:
|
503 |
opts = ''
|
504 |
if timeout:
|
505 |
opts = '-W %s' % timeout
|
506 |
result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) )
|
507 |
outputs = self._parsePingFull( result )
|
508 |
sent, received, rttmin, rttavg, rttmax, rttdev = outputs |
509 |
all_outputs.append( (node, dest, outputs) ) |
510 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
511 |
output( '\n' )
|
512 |
output( "*** Results: \n" )
|
513 |
for outputs in all_outputs: |
514 |
src, dest, ping_outputs = outputs |
515 |
sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs |
516 |
output( " %s->%s: %s/%s, " % (src, dest, sent, received ) )
|
517 |
output( "rtt min/avg/max/mdev %0.3f/%0.3f/%0.3f/%0.3f ms\n" %
|
518 |
(rttmin, rttavg, rttmax, rttdev) ) |
519 |
return all_outputs
|
520 |
|
521 |
def pingAll( self ): |
522 |
"""Ping between all hosts.
|
523 |
returns: ploss packet loss percentage"""
|
524 |
return self.ping() |
525 |
|
526 |
def pingPair( self ): |
527 |
"""Ping between first two hosts, useful for testing.
|
528 |
returns: ploss packet loss percentage"""
|
529 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
530 |
return self.ping( hosts=hosts ) |
531 |
|
532 |
def pingAllFull( self ): |
533 |
"""Ping between all hosts.
|
534 |
returns: ploss packet loss percentage"""
|
535 |
return self.pingFull() |
536 |
|
537 |
def pingPairFull( self ): |
538 |
"""Ping between first two hosts, useful for testing.
|
539 |
returns: ploss packet loss percentage"""
|
540 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
541 |
return self.pingFull( hosts=hosts ) |
542 |
|
543 |
@staticmethod
|
544 |
def _parseIperf( iperfOutput ): |
545 |
"""Parse iperf output and return bandwidth.
|
546 |
iperfOutput: string
|
547 |
returns: result string"""
|
548 |
r = r'([\d\.]+ \w+/sec)'
|
549 |
m = re.findall( r, iperfOutput ) |
550 |
if m:
|
551 |
return m[-1] |
552 |
else:
|
553 |
# was: raise Exception(...)
|
554 |
error( 'could not parse iperf output: ' + iperfOutput )
|
555 |
return '' |
556 |
|
557 |
# XXX This should be cleaned up
|
558 |
|
559 |
def iperf( self, hosts=None, l4Type='TCP', udpBw='10M' ): |
560 |
"""Run iperf between two hosts.
|
561 |
hosts: list of hosts; if None, uses opposite hosts
|
562 |
l4Type: string, one of [ TCP, UDP ]
|
563 |
returns: results two-element array of server and client speeds"""
|
564 |
if not quietRun( 'which telnet' ): |
565 |
error( 'Cannot find telnet in $PATH - required for iperf test' )
|
566 |
return
|
567 |
if not hosts: |
568 |
hosts = [ self.hosts[ 0 ], self.hosts[ -1 ] ] |
569 |
else:
|
570 |
assert len( hosts ) == 2 |
571 |
client, server = hosts |
572 |
output( '*** Iperf: testing ' + l4Type + ' bandwidth between ' ) |
573 |
output( "%s and %s\n" % ( client.name, server.name ) )
|
574 |
server.cmd( 'killall -9 iperf' )
|
575 |
iperfArgs = 'iperf '
|
576 |
bwArgs = ''
|
577 |
if l4Type == 'UDP': |
578 |
iperfArgs += '-u '
|
579 |
bwArgs = '-b ' + udpBw + ' ' |
580 |
elif l4Type != 'TCP': |
581 |
raise Exception( 'Unexpected l4 type: %s' % l4Type ) |
582 |
server.sendCmd( iperfArgs + '-s', printPid=True ) |
583 |
servout = ''
|
584 |
while server.lastPid is None: |
585 |
servout += server.monitor() |
586 |
if l4Type == 'TCP': |
587 |
while 'Connected' not in client.cmd( |
588 |
'sh -c "echo A | telnet -e A %s 5001"' % server.IP()):
|
589 |
output('waiting for iperf to start up...')
|
590 |
sleep(.5)
|
591 |
cliout = client.cmd( iperfArgs + '-t 5 -c ' + server.IP() + ' ' + |
592 |
bwArgs ) |
593 |
debug( 'Client output: %s\n' % cliout )
|
594 |
server.sendInt() |
595 |
servout += server.waitOutput() |
596 |
debug( 'Server output: %s\n' % servout )
|
597 |
result = [ self._parseIperf( servout ), self._parseIperf( cliout ) ] |
598 |
if l4Type == 'UDP': |
599 |
result.insert( 0, udpBw )
|
600 |
output( '*** Results: %s\n' % result )
|
601 |
return result
|
602 |
|
603 |
def runCpuLimitTest( self, cpu, duration=5 ): |
604 |
"""run CPU limit test with 'while true' processes.
|
605 |
cpu: desired CPU fraction of each host
|
606 |
duration: test duration in seconds
|
607 |
returns a single list of measured CPU fractions as floats.
|
608 |
"""
|
609 |
pct = cpu * 100
|
610 |
info('*** Testing CPU %.0f%% bandwidth limit\n' % pct)
|
611 |
hosts = self.hosts
|
612 |
for h in hosts: |
613 |
h.cmd( 'while true; do a=1; done &' )
|
614 |
pids = [h.cmd( 'echo $!' ).strip() for h in hosts] |
615 |
pids_str = ",".join(["%s" % pid for pid in pids]) |
616 |
cmd = 'ps -p %s -o pid,%%cpu,args' % pids_str
|
617 |
# It's a shame that this is what pylint prefers
|
618 |
outputs = [] |
619 |
for _ in range( duration ): |
620 |
sleep( 1 )
|
621 |
outputs.append( quietRun( cmd ).strip() ) |
622 |
for h in hosts: |
623 |
h.cmd( 'kill %1' )
|
624 |
cpu_fractions = [] |
625 |
for test_output in outputs: |
626 |
# Split by line. Ignore first line, which looks like this:
|
627 |
# PID %CPU COMMAND\n
|
628 |
for line in test_output.split('\n')[1:]: |
629 |
r = r'\d+\s*(\d+\.\d+)'
|
630 |
m = re.search( r, line ) |
631 |
if m is None: |
632 |
error( '*** Error: could not extract CPU fraction: %s\n' %
|
633 |
line ) |
634 |
return None |
635 |
cpu_fractions.append( float( m.group( 1 ) ) ) |
636 |
output( '*** Results: %s\n' % cpu_fractions )
|
637 |
return cpu_fractions
|
638 |
|
639 |
# BL: I think this can be rewritten now that we have
|
640 |
# a real link class.
|
641 |
def configLinkStatus( self, src, dst, status ): |
642 |
"""Change status of src <-> dst links.
|
643 |
src: node name
|
644 |
dst: node name
|
645 |
status: string {up, down}"""
|
646 |
if src not in self.nameToNode: |
647 |
error( 'src not in network: %s\n' % src )
|
648 |
elif dst not in self.nameToNode: |
649 |
error( 'dst not in network: %s\n' % dst )
|
650 |
else:
|
651 |
if type( src ) is str: |
652 |
src = self.nameToNode[ src ]
|
653 |
if type( dst ) is str: |
654 |
dst = self.nameToNode[ dst ]
|
655 |
connections = src.connectionsTo( dst ) |
656 |
if len( connections ) == 0: |
657 |
error( 'src and dst not connected: %s %s\n' % ( src, dst) )
|
658 |
for srcIntf, dstIntf in connections: |
659 |
result = srcIntf.ifconfig( status ) |
660 |
if result:
|
661 |
error( 'link src status change failed: %s\n' % result )
|
662 |
result = dstIntf.ifconfig( status ) |
663 |
if result:
|
664 |
error( 'link dst status change failed: %s\n' % result )
|
665 |
|
666 |
def interact( self ): |
667 |
"Start network and run our simple CLI."
|
668 |
self.start()
|
669 |
result = CLI( self )
|
670 |
self.stop()
|
671 |
return result
|
672 |
|
673 |
inited = False
|
674 |
|
675 |
@classmethod
|
676 |
def init( cls ): |
677 |
"Initialize Mininet"
|
678 |
if cls.inited:
|
679 |
return
|
680 |
ensureRoot() |
681 |
fixLimits() |
682 |
cls.inited = True
|
683 |
|
684 |
|
685 |
class MininetWithControlNet( Mininet ): |
686 |
|
687 |
"""Control network support:
|
688 |
|
689 |
Create an explicit control network. Currently this is only
|
690 |
used/usable with the user datapath.
|
691 |
|
692 |
Notes:
|
693 |
|
694 |
1. If the controller and switches are in the same (e.g. root)
|
695 |
namespace, they can just use the loopback connection.
|
696 |
|
697 |
2. If we can get unix domain sockets to work, we can use them
|
698 |
instead of an explicit control network.
|
699 |
|
700 |
3. Instead of routing, we could bridge or use 'in-band' control.
|
701 |
|
702 |
4. Even if we dispense with this in general, it could still be
|
703 |
useful for people who wish to simulate a separate control
|
704 |
network (since real networks may need one!)
|
705 |
|
706 |
5. Basically nobody ever used this code, so it has been moved
|
707 |
into its own class.
|
708 |
|
709 |
6. Ultimately we may wish to extend this to allow us to create a
|
710 |
control network which every node's control interface is
|
711 |
attached to."""
|
712 |
|
713 |
def configureControlNetwork( self ): |
714 |
"Configure control network."
|
715 |
self.configureRoutedControlNetwork()
|
716 |
|
717 |
# We still need to figure out the right way to pass
|
718 |
# in the control network location.
|
719 |
|
720 |
def configureRoutedControlNetwork( self, ip='192.168.123.1', |
721 |
prefixLen=16 ):
|
722 |
"""Configure a routed control network on controller and switches.
|
723 |
For use with the user datapath only right now."""
|
724 |
controller = self.controllers[ 0 ] |
725 |
info( controller.name + ' <->' )
|
726 |
cip = ip |
727 |
snum = ipParse( ip ) |
728 |
for switch in self.switches: |
729 |
info( ' ' + switch.name )
|
730 |
link = self.link( switch, controller, port1=0 ) |
731 |
sintf, cintf = link.intf1, link.intf2 |
732 |
switch.controlIntf = sintf |
733 |
snum += 1
|
734 |
while snum & 0xff in [ 0, 255 ]: |
735 |
snum += 1
|
736 |
sip = ipStr( snum ) |
737 |
cintf.setIP( cip, prefixLen ) |
738 |
sintf.setIP( sip, prefixLen ) |
739 |
controller.setHostRoute( sip, cintf ) |
740 |
switch.setHostRoute( cip, sintf ) |
741 |
info( '\n' )
|
742 |
info( '*** Testing control network\n' )
|
743 |
while not cintf.isUp(): |
744 |
info( '*** Waiting for', cintf, 'to come up\n' ) |
745 |
sleep( 1 )
|
746 |
for switch in self.switches: |
747 |
while not sintf.isUp(): |
748 |
info( '*** Waiting for', sintf, 'to come up\n' ) |
749 |
sleep( 1 )
|
750 |
if self.ping( hosts=[ switch, controller ] ) != 0: |
751 |
error( '*** Error: control network test failed\n' )
|
752 |
exit( 1 ) |
753 |
info( '\n' )
|