mininet / mininet / net.py @ bd558875
History | View | Annotate | Download (28.7 KB)
1 |
"""
|
---|---|
2 |
|
3 |
Mininet: A simple networking testbed for OpenFlow/SDN!
|
4 |
|
5 |
author: Bob Lantz (rlantz@cs.stanford.edu)
|
6 |
author: Brandon Heller (brandonh@stanford.edu)
|
7 |
|
8 |
Mininet creates scalable OpenFlow test networks by using
|
9 |
process-based virtualization and network namespaces.
|
10 |
|
11 |
Simulated hosts are created as processes in separate network
|
12 |
namespaces. This allows a complete OpenFlow network to be simulated on
|
13 |
top of a single Linux kernel.
|
14 |
|
15 |
Each host has:
|
16 |
|
17 |
A virtual console (pipes to a shell)
|
18 |
A virtual interfaces (half of a veth pair)
|
19 |
A parent shell (and possibly some child processes) in a namespace
|
20 |
|
21 |
Hosts have a network interface which is configured via ifconfig/ip
|
22 |
link/etc.
|
23 |
|
24 |
This version supports both the kernel and user space datapaths
|
25 |
from the OpenFlow reference implementation (openflowswitch.org)
|
26 |
as well as OpenVSwitch (openvswitch.org.)
|
27 |
|
28 |
In kernel datapath mode, the controller and switches are simply
|
29 |
processes in the root namespace.
|
30 |
|
31 |
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
|
32 |
attached to the one side of a veth pair; the other side resides in the
|
33 |
host namespace. In this mode, switch processes can simply connect to the
|
34 |
controller via the loopback interface.
|
35 |
|
36 |
In user datapath mode, the controller and switches can be full-service
|
37 |
nodes that live in their own network namespaces and have management
|
38 |
interfaces and IP addresses on a control network (e.g. 192.168.123.1,
|
39 |
currently routed although it could be bridged.)
|
40 |
|
41 |
In addition to a management interface, user mode switches also have
|
42 |
several switch interfaces, halves of veth pairs whose other halves
|
43 |
reside in the host nodes that the switches are connected to.
|
44 |
|
45 |
Consistent, straightforward naming is important in order to easily
|
46 |
identify hosts, switches and controllers, both from the CLI and
|
47 |
from program code. Interfaces are named to make it easy to identify
|
48 |
which interfaces belong to which node.
|
49 |
|
50 |
The basic naming scheme is as follows:
|
51 |
|
52 |
Host nodes are named h1-hN
|
53 |
Switch nodes are named s1-sN
|
54 |
Controller nodes are named c0-cN
|
55 |
Interfaces are named {nodename}-eth0 .. {nodename}-ethN
|
56 |
|
57 |
Note: If the network topology is created using mininet.topo, then
|
58 |
node numbers are unique among hosts and switches (e.g. we have
|
59 |
h1..hN and SN..SN+M) and also correspond to their default IP addresses
|
60 |
of 10.x.y.z/8 where x.y.z is the base-256 representation of N for
|
61 |
hN. This mapping allows easy determination of a node's IP
|
62 |
address from its name, e.g. h1 -> 10.0.0.1, h257 -> 10.0.1.1.
|
63 |
|
64 |
Note also that 10.0.0.1 can often be written as 10.1 for short, e.g.
|
65 |
"ping 10.1" is equivalent to "ping 10.0.0.1".
|
66 |
|
67 |
Currently we wrap the entire network in a 'mininet' object, which
|
68 |
constructs a simulated network based on a network topology created
|
69 |
using a topology object (e.g. LinearTopo) from mininet.topo or
|
70 |
mininet.topolib, and a Controller which the switches will connect
|
71 |
to. Several configuration options are provided for functions such as
|
72 |
automatically setting MAC addresses, populating the ARP table, or
|
73 |
even running a set of terminals to allow direct interaction with nodes.
|
74 |
|
75 |
After the network is created, it can be started using start(), and a
|
76 |
variety of useful tasks maybe performed, including basic connectivity
|
77 |
and bandwidth tests and running the mininet CLI.
|
78 |
|
79 |
Once the network is up and running, test code can easily get access
|
80 |
to host and switch objects which can then be used for arbitrary
|
81 |
experiments, typically involving running a series of commands on the
|
82 |
hosts.
|
83 |
|
84 |
After all desired tests or activities have been completed, the stop()
|
85 |
method may be called to shut down the network.
|
86 |
|
87 |
"""
|
88 |
|
89 |
import os |
90 |
import re |
91 |
import select |
92 |
import signal |
93 |
from time import sleep |
94 |
from itertools import chain |
95 |
|
96 |
from mininet.cli import CLI |
97 |
from mininet.log import info, error, debug, output |
98 |
from mininet.node import Host, OVSKernelSwitch, Controller |
99 |
from mininet.link import Link, Intf |
100 |
from mininet.util import quietRun, fixLimits, numCores, ensureRoot |
101 |
from mininet.util import macColonHex, ipStr, ipParse, netParse, ipAdd |
102 |
from mininet.term import cleanUpScreens, makeTerms |
103 |
|
104 |
# Mininet version: should be consistent with README and LICENSE
|
105 |
VERSION = "2.0.0"
|
106 |
|
107 |
class Mininet( object ): |
108 |
"Network emulation with hosts spawned in network namespaces."
|
109 |
|
110 |
def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, |
111 |
controller=Controller, link=Link, intf=Intf, |
112 |
build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', |
113 |
inNamespace=False,
|
114 |
autoSetMacs=False, autoStaticArp=False, autoPinCpus=False, |
115 |
listenPort=None ):
|
116 |
"""Create Mininet object.
|
117 |
topo: Topo (topology) object or None
|
118 |
switch: default Switch class
|
119 |
host: default Host class/constructor
|
120 |
controller: default Controller class/constructor
|
121 |
link: default Link class/constructor
|
122 |
intf: default Intf class/constructor
|
123 |
ipBase: base IP address for hosts,
|
124 |
build: build now from topo?
|
125 |
xterms: if build now, spawn xterms?
|
126 |
cleanup: if build now, cleanup before creating?
|
127 |
inNamespace: spawn switches and controller in net namespaces?
|
128 |
autoSetMacs: set MAC addrs automatically like IP addresses?
|
129 |
autoStaticArp: set all-pairs static MAC addrs?
|
130 |
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
|
131 |
listenPort: base listening port to open; will be incremented for
|
132 |
each additional switch in the net if inNamespace=False"""
|
133 |
self.topo = topo
|
134 |
self.switch = switch
|
135 |
self.host = host
|
136 |
self.controller = controller
|
137 |
self.link = link
|
138 |
self.intf = intf
|
139 |
self.ipBase = ipBase
|
140 |
self.ipBaseNum, self.prefixLen = netParse( self.ipBase ) |
141 |
self.nextIP = 1 # start for address allocation |
142 |
self.inNamespace = inNamespace
|
143 |
self.xterms = xterms
|
144 |
self.cleanup = cleanup
|
145 |
self.autoSetMacs = autoSetMacs
|
146 |
self.autoStaticArp = autoStaticArp
|
147 |
self.autoPinCpus = autoPinCpus
|
148 |
self.numCores = numCores()
|
149 |
self.nextCore = 0 # next core for pinning hosts to CPUs |
150 |
self.listenPort = listenPort
|
151 |
|
152 |
self.hosts = []
|
153 |
self.switches = []
|
154 |
self.controllers = []
|
155 |
|
156 |
self.nameToNode = {} # name to Node (Host/Switch) objects |
157 |
|
158 |
self.terms = [] # list of spawned xterm processes |
159 |
|
160 |
Mininet.init() # Initialize Mininet if necessary
|
161 |
|
162 |
self.built = False |
163 |
if topo and build: |
164 |
self.build()
|
165 |
|
166 |
def addHost( self, name, cls=None, **params ): |
167 |
"""Add host.
|
168 |
name: name of host to add
|
169 |
cls: custom host class/constructor (optional)
|
170 |
params: parameters for host
|
171 |
returns: added host"""
|
172 |
# Default IP and MAC addresses
|
173 |
defaults = { 'ip': ipAdd( self.nextIP, |
174 |
ipBaseNum=self.ipBaseNum,
|
175 |
prefixLen=self.prefixLen ) +
|
176 |
'/%s' % self.prefixLen } |
177 |
if self.autoSetMacs: |
178 |
defaults[ 'mac'] = macColonHex( self.nextIP ) |
179 |
if self.autoPinCpus: |
180 |
defaults[ 'cores' ] = self.nextCore |
181 |
self.nextCore = ( self.nextCore + 1 ) % self.numCores |
182 |
self.nextIP += 1 |
183 |
defaults.update( params ) |
184 |
if not cls: |
185 |
cls = self.host
|
186 |
h = cls( name, **defaults ) |
187 |
self.hosts.append( h )
|
188 |
self.nameToNode[ name ] = h
|
189 |
return h
|
190 |
|
191 |
def addSwitch( self, name, cls=None, **params ): |
192 |
"""Add switch.
|
193 |
name: name of switch to add
|
194 |
cls: custom switch class/constructor (optional)
|
195 |
returns: added switch
|
196 |
side effect: increments listenPort ivar ."""
|
197 |
defaults = { 'listenPort': self.listenPort, |
198 |
'inNamespace': self.inNamespace } |
199 |
defaults.update( params ) |
200 |
if not cls: |
201 |
cls = self.switch
|
202 |
sw = cls( name, **defaults ) |
203 |
if not self.inNamespace and self.listenPort: |
204 |
self.listenPort += 1 |
205 |
self.switches.append( sw )
|
206 |
self.nameToNode[ name ] = sw
|
207 |
return sw
|
208 |
|
209 |
def addController( self, name='c0', controller=None, **params ): |
210 |
"""Add controller.
|
211 |
controller: Controller class"""
|
212 |
if not controller: |
213 |
controller = self.controller
|
214 |
controller_new = controller( name, **params ) |
215 |
if controller_new: # allow controller-less setups |
216 |
self.controllers.append( controller_new )
|
217 |
self.nameToNode[ name ] = controller_new
|
218 |
return controller_new
|
219 |
|
220 |
# BL: We now have four ways to look up nodes
|
221 |
# This may (should?) be cleaned up in the future.
|
222 |
def getNodeByName( self, *args ): |
223 |
"Return node(s) with given name(s)"
|
224 |
if len( args ) == 1: |
225 |
return self.nameToNode[ args[ 0 ] ] |
226 |
return [ self.nameToNode[ n ] for n in args ] |
227 |
|
228 |
def get( self, *args ): |
229 |
"Convenience alias for getNodeByName"
|
230 |
return self.getNodeByName( *args ) |
231 |
|
232 |
# Even more convenient syntax for node lookup and iteration
|
233 |
def __getitem__( self, *args ): |
234 |
"""net [ name ] operator: Return node(s) with given name(s)"""
|
235 |
return self.getNodeByName( *args ) |
236 |
|
237 |
def __iter__( self ): |
238 |
"return iterator over nodes"
|
239 |
return chain( self.hosts, self.switches, self.controllers ) |
240 |
|
241 |
def addLink( self, node1, node2, port1=None, port2=None, |
242 |
cls=None, **params ):
|
243 |
""""Add a link from node1 to node2
|
244 |
node1: source node
|
245 |
node2: dest node
|
246 |
port1: source port
|
247 |
port2: dest port
|
248 |
returns: link object"""
|
249 |
defaults = { 'port1': port1,
|
250 |
'port2': port2,
|
251 |
'intf': self.intf } |
252 |
defaults.update( params ) |
253 |
if not cls: |
254 |
cls = self.link
|
255 |
return cls( node1, node2, **defaults )
|
256 |
|
257 |
def configHosts( self ): |
258 |
"Configure a set of hosts."
|
259 |
for host in self.hosts: |
260 |
info( host.name + ' ' )
|
261 |
intf = host.defaultIntf() |
262 |
if intf:
|
263 |
host.configDefault( defaultRoute=intf ) |
264 |
else:
|
265 |
# Don't configure nonexistent intf
|
266 |
host.configDefault( ip=None, mac=None ) |
267 |
# You're low priority, dude!
|
268 |
# BL: do we want to do this here or not?
|
269 |
# May not make sense if we have CPU lmiting...
|
270 |
# quietRun( 'renice +18 -p ' + repr( host.pid ) )
|
271 |
# This may not be the right place to do this, but
|
272 |
# it needs to be done somewhere.
|
273 |
host.cmd( 'ifconfig lo up' )
|
274 |
info( '\n' )
|
275 |
|
276 |
def buildFromTopo( self, topo=None ): |
277 |
"""Build mininet from a topology object
|
278 |
At the end of this function, everything should be connected
|
279 |
and up."""
|
280 |
|
281 |
# Possibly we should clean up here and/or validate
|
282 |
# the topo
|
283 |
if self.cleanup: |
284 |
pass
|
285 |
|
286 |
info( '*** Creating network\n' )
|
287 |
|
288 |
if not self.controllers: |
289 |
# Add a default controller
|
290 |
info( '*** Adding controller\n' )
|
291 |
classes = self.controller
|
292 |
if type( classes ) is not list: |
293 |
classes = [ classes ] |
294 |
for i, cls in enumerate( classes ): |
295 |
self.addController( 'c%d' % i, cls ) |
296 |
|
297 |
info( '*** Adding hosts:\n' )
|
298 |
for hostName in topo.hosts(): |
299 |
self.addHost( hostName, **topo.nodeInfo( hostName ) )
|
300 |
info( hostName + ' ' )
|
301 |
|
302 |
info( '\n*** Adding switches:\n' )
|
303 |
for switchName in topo.switches(): |
304 |
self.addSwitch( switchName, **topo.nodeInfo( switchName) )
|
305 |
info( switchName + ' ' )
|
306 |
|
307 |
info( '\n*** Adding links:\n' )
|
308 |
for srcName, dstName in topo.links(sort=True): |
309 |
src, dst = self.nameToNode[ srcName ], self.nameToNode[ dstName ] |
310 |
params = topo.linkInfo( srcName, dstName ) |
311 |
srcPort, dstPort = topo.port( srcName, dstName ) |
312 |
self.addLink( src, dst, srcPort, dstPort, **params )
|
313 |
info( '(%s, %s) ' % ( src.name, dst.name ) )
|
314 |
|
315 |
info( '\n' )
|
316 |
|
317 |
def configureControlNetwork( self ): |
318 |
"Control net config hook: override in subclass"
|
319 |
raise Exception( 'configureControlNetwork: ' |
320 |
'should be overriden in subclass', self ) |
321 |
|
322 |
def build( self ): |
323 |
"Build mininet."
|
324 |
if self.topo: |
325 |
self.buildFromTopo( self.topo ) |
326 |
if ( self.inNamespace ): |
327 |
self.configureControlNetwork()
|
328 |
info( '*** Configuring hosts\n' )
|
329 |
self.configHosts()
|
330 |
if self.xterms: |
331 |
self.startTerms()
|
332 |
if self.autoStaticArp: |
333 |
self.staticArp()
|
334 |
self.built = True |
335 |
|
336 |
def startTerms( self ): |
337 |
"Start a terminal for each node."
|
338 |
info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) |
339 |
cleanUpScreens() |
340 |
self.terms += makeTerms( self.controllers, 'controller' ) |
341 |
self.terms += makeTerms( self.switches, 'switch' ) |
342 |
self.terms += makeTerms( self.hosts, 'host' ) |
343 |
|
344 |
def stopXterms( self ): |
345 |
"Kill each xterm."
|
346 |
for term in self.terms: |
347 |
os.kill( term.pid, signal.SIGKILL ) |
348 |
cleanUpScreens() |
349 |
|
350 |
def staticArp( self ): |
351 |
"Add all-pairs ARP entries to remove the need to handle broadcast."
|
352 |
for src in self.hosts: |
353 |
for dst in self.hosts: |
354 |
if src != dst:
|
355 |
src.setARP( ip=dst.IP(), mac=dst.MAC() ) |
356 |
|
357 |
def start( self ): |
358 |
"Start controller and switches."
|
359 |
if not self.built: |
360 |
self.build()
|
361 |
info( '*** Starting controller\n' )
|
362 |
for controller in self.controllers: |
363 |
controller.start() |
364 |
info( '*** Starting %s switches\n' % len( self.switches ) ) |
365 |
for switch in self.switches: |
366 |
info( switch.name + ' ')
|
367 |
switch.start( self.controllers )
|
368 |
info( '\n' )
|
369 |
|
370 |
def stop( self ): |
371 |
"Stop the controller(s), switches and hosts"
|
372 |
if self.terms: |
373 |
info( '*** Stopping %i terms\n' % len( self.terms ) ) |
374 |
self.stopXterms()
|
375 |
info( '*** Stopping %i hosts\n' % len( self.hosts ) ) |
376 |
for host in self.hosts: |
377 |
info( host.name + ' ' )
|
378 |
host.terminate() |
379 |
info( '\n' )
|
380 |
info( '*** Stopping %i switches\n' % len( self.switches ) ) |
381 |
for switch in self.switches: |
382 |
info( switch.name + ' ' )
|
383 |
switch.stop() |
384 |
info( '\n' )
|
385 |
info( '*** Stopping %i controllers\n' % len( self.controllers ) ) |
386 |
for controller in self.controllers: |
387 |
info( controller.name + ' ' )
|
388 |
controller.stop() |
389 |
info( '\n*** Done\n' )
|
390 |
|
391 |
def run( self, test, *args, **kwargs ): |
392 |
"Perform a complete start/test/stop cycle."
|
393 |
self.start()
|
394 |
info( '*** Running test\n' )
|
395 |
result = test( *args, **kwargs ) |
396 |
self.stop()
|
397 |
return result
|
398 |
|
399 |
def monitor( self, hosts=None, timeoutms=-1 ): |
400 |
"""Monitor a set of hosts (or all hosts by default),
|
401 |
and return their output, a line at a time.
|
402 |
hosts: (optional) set of hosts to monitor
|
403 |
timeoutms: (optional) timeout value in ms
|
404 |
returns: iterator which returns host, line"""
|
405 |
if hosts is None: |
406 |
hosts = self.hosts
|
407 |
poller = select.poll() |
408 |
Node = hosts[ 0 ] # so we can call class method fdToNode |
409 |
for host in hosts: |
410 |
poller.register( host.stdout ) |
411 |
while True: |
412 |
ready = poller.poll( timeoutms ) |
413 |
for fd, event in ready: |
414 |
host = Node.fdToNode( fd ) |
415 |
if event & select.POLLIN:
|
416 |
line = host.readline() |
417 |
if line is not None: |
418 |
yield host, line
|
419 |
# Return if non-blocking
|
420 |
if not ready and timeoutms >= 0: |
421 |
yield None, None |
422 |
|
423 |
# XXX These test methods should be moved out of this class.
|
424 |
# Probably we should create a tests.py for them
|
425 |
|
426 |
@staticmethod
|
427 |
def _parsePing( pingOutput ): |
428 |
"Parse ping output and return packets sent, received."
|
429 |
# Check for downed link
|
430 |
if 'connect: Network is unreachable' in pingOutput: |
431 |
return (1, 0) |
432 |
r = r'(\d+) packets transmitted, (\d+) received'
|
433 |
m = re.search( r, pingOutput ) |
434 |
if m is None: |
435 |
error( '*** Error: could not parse ping output: %s\n' %
|
436 |
pingOutput ) |
437 |
return (1, 0) |
438 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
439 |
return sent, received
|
440 |
|
441 |
def ping( self, hosts=None, timeout=None ): |
442 |
"""Ping between all specified hosts.
|
443 |
hosts: list of hosts
|
444 |
timeout: time to wait for a response, as string
|
445 |
returns: ploss packet loss percentage"""
|
446 |
# should we check if running?
|
447 |
packets = 0
|
448 |
lost = 0
|
449 |
ploss = None
|
450 |
if not hosts: |
451 |
hosts = self.hosts
|
452 |
output( '*** Ping: testing ping reachability\n' )
|
453 |
for node in hosts: |
454 |
output( '%s -> ' % node.name )
|
455 |
for dest in hosts: |
456 |
if node != dest:
|
457 |
opts = ''
|
458 |
if timeout:
|
459 |
opts = '-W %s' % timeout
|
460 |
result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) )
|
461 |
sent, received = self._parsePing( result )
|
462 |
packets += sent |
463 |
if received > sent:
|
464 |
error( '*** Error: received too many packets' )
|
465 |
error( '%s' % result )
|
466 |
node.cmdPrint( 'route' )
|
467 |
exit( 1 ) |
468 |
lost += sent - received |
469 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
470 |
output( '\n' )
|
471 |
ploss = 100 * lost / packets
|
472 |
output( "*** Results: %i%% dropped (%d/%d lost)\n" %
|
473 |
( ploss, lost, packets ) ) |
474 |
return ploss
|
475 |
|
476 |
@staticmethod
|
477 |
def _parsePingFull( pingOutput ): |
478 |
"Parse ping output and return all data."
|
479 |
# Check for downed link
|
480 |
if 'connect: Network is unreachable' in pingOutput: |
481 |
return (1, 0) |
482 |
r = r'(\d+) packets transmitted, (\d+) received'
|
483 |
m = re.search( r, pingOutput ) |
484 |
if m is None: |
485 |
error( '*** Error: could not parse ping output: %s\n' %
|
486 |
pingOutput ) |
487 |
return (1, 0, 0, 0, 0, 0) |
488 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
489 |
r = r'rtt min/avg/max/mdev = '
|
490 |
r += r'(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+) ms'
|
491 |
m = re.search( r, pingOutput ) |
492 |
rttmin = float( m.group( 1 ) ) |
493 |
rttavg = float( m.group( 2 ) ) |
494 |
rttmax = float( m.group( 3 ) ) |
495 |
rttdev = float( m.group( 4 ) ) |
496 |
return sent, received, rttmin, rttavg, rttmax, rttdev
|
497 |
|
498 |
def pingFull( self, hosts=None, timeout=None ): |
499 |
"""Ping between all specified hosts and return all data.
|
500 |
hosts: list of hosts
|
501 |
timeout: time to wait for a response, as string
|
502 |
returns: all ping data; see function body."""
|
503 |
# should we check if running?
|
504 |
# Each value is a tuple: (src, dsd, [all ping outputs])
|
505 |
all_outputs = [] |
506 |
if not hosts: |
507 |
hosts = self.hosts
|
508 |
output( '*** Ping: testing ping reachability\n' )
|
509 |
for node in hosts: |
510 |
output( '%s -> ' % node.name )
|
511 |
for dest in hosts: |
512 |
if node != dest:
|
513 |
opts = ''
|
514 |
if timeout:
|
515 |
opts = '-W %s' % timeout
|
516 |
result = node.cmd( 'ping -c1 %s %s' % (opts, dest.IP()) )
|
517 |
outputs = self._parsePingFull( result )
|
518 |
sent, received, rttmin, rttavg, rttmax, rttdev = outputs |
519 |
all_outputs.append( (node, dest, outputs) ) |
520 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
521 |
output( '\n' )
|
522 |
output( "*** Results: \n" )
|
523 |
for outputs in all_outputs: |
524 |
src, dest, ping_outputs = outputs |
525 |
sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs |
526 |
output( " %s->%s: %s/%s, " % (src, dest, sent, received ) )
|
527 |
output( "rtt min/avg/max/mdev %0.3f/%0.3f/%0.3f/%0.3f ms\n" %
|
528 |
(rttmin, rttavg, rttmax, rttdev) ) |
529 |
return all_outputs
|
530 |
|
531 |
def pingAll( self ): |
532 |
"""Ping between all hosts.
|
533 |
returns: ploss packet loss percentage"""
|
534 |
return self.ping() |
535 |
|
536 |
def pingPair( self ): |
537 |
"""Ping between first two hosts, useful for testing.
|
538 |
returns: ploss packet loss percentage"""
|
539 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
540 |
return self.ping( hosts=hosts ) |
541 |
|
542 |
def pingAllFull( self ): |
543 |
"""Ping between all hosts.
|
544 |
returns: ploss packet loss percentage"""
|
545 |
return self.pingFull() |
546 |
|
547 |
def pingPairFull( self ): |
548 |
"""Ping between first two hosts, useful for testing.
|
549 |
returns: ploss packet loss percentage"""
|
550 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
551 |
return self.pingFull( hosts=hosts ) |
552 |
|
553 |
@staticmethod
|
554 |
def _parseIperf( iperfOutput ): |
555 |
"""Parse iperf output and return bandwidth.
|
556 |
iperfOutput: string
|
557 |
returns: result string"""
|
558 |
r = r'([\d\.]+ \w+/sec)'
|
559 |
m = re.findall( r, iperfOutput ) |
560 |
if m:
|
561 |
return m[-1] |
562 |
else:
|
563 |
# was: raise Exception(...)
|
564 |
error( 'could not parse iperf output: ' + iperfOutput )
|
565 |
return '' |
566 |
|
567 |
# XXX This should be cleaned up
|
568 |
|
569 |
def iperf( self, hosts=None, l4Type='TCP', udpBw='10M' ): |
570 |
"""Run iperf between two hosts.
|
571 |
hosts: list of hosts; if None, uses opposite hosts
|
572 |
l4Type: string, one of [ TCP, UDP ]
|
573 |
returns: results two-element array of server and client speeds"""
|
574 |
if not quietRun( 'which telnet' ): |
575 |
error( 'Cannot find telnet in $PATH - required for iperf test' )
|
576 |
return
|
577 |
if not hosts: |
578 |
hosts = [ self.hosts[ 0 ], self.hosts[ -1 ] ] |
579 |
else:
|
580 |
assert len( hosts ) == 2 |
581 |
client, server = hosts |
582 |
output( '*** Iperf: testing ' + l4Type + ' bandwidth between ' ) |
583 |
output( "%s and %s\n" % ( client.name, server.name ) )
|
584 |
server.cmd( 'killall -9 iperf' )
|
585 |
iperfArgs = 'iperf '
|
586 |
bwArgs = ''
|
587 |
if l4Type == 'UDP': |
588 |
iperfArgs += '-u '
|
589 |
bwArgs = '-b ' + udpBw + ' ' |
590 |
elif l4Type != 'TCP': |
591 |
raise Exception( 'Unexpected l4 type: %s' % l4Type ) |
592 |
server.sendCmd( iperfArgs + '-s', printPid=True ) |
593 |
servout = ''
|
594 |
while server.lastPid is None: |
595 |
servout += server.monitor() |
596 |
if l4Type == 'TCP': |
597 |
while 'Connected' not in client.cmd( |
598 |
'sh -c "echo A | telnet -e A %s 5001"' % server.IP()):
|
599 |
output('waiting for iperf to start up...')
|
600 |
sleep(.5)
|
601 |
cliout = client.cmd( iperfArgs + '-t 5 -c ' + server.IP() + ' ' + |
602 |
bwArgs ) |
603 |
debug( 'Client output: %s\n' % cliout )
|
604 |
server.sendInt() |
605 |
servout += server.waitOutput() |
606 |
debug( 'Server output: %s\n' % servout )
|
607 |
result = [ self._parseIperf( servout ), self._parseIperf( cliout ) ] |
608 |
if l4Type == 'UDP': |
609 |
result.insert( 0, udpBw )
|
610 |
output( '*** Results: %s\n' % result )
|
611 |
return result
|
612 |
|
613 |
def runCpuLimitTest( self, cpu, duration=5 ): |
614 |
"""run CPU limit test with 'while true' processes.
|
615 |
cpu: desired CPU fraction of each host
|
616 |
duration: test duration in seconds
|
617 |
returns a single list of measured CPU fractions as floats.
|
618 |
"""
|
619 |
pct = cpu * 100
|
620 |
info('*** Testing CPU %.0f%% bandwidth limit\n' % pct)
|
621 |
hosts = self.hosts
|
622 |
for h in hosts: |
623 |
h.cmd( 'while true; do a=1; done &' )
|
624 |
pids = [h.cmd( 'echo $!' ).strip() for h in hosts] |
625 |
pids_str = ",".join(["%s" % pid for pid in pids]) |
626 |
cmd = 'ps -p %s -o pid,%%cpu,args' % pids_str
|
627 |
# It's a shame that this is what pylint prefers
|
628 |
outputs = [] |
629 |
for _ in range( duration ): |
630 |
sleep( 1 )
|
631 |
outputs.append( quietRun( cmd ).strip() ) |
632 |
for h in hosts: |
633 |
h.cmd( 'kill %1' )
|
634 |
cpu_fractions = [] |
635 |
for test_output in outputs: |
636 |
# Split by line. Ignore first line, which looks like this:
|
637 |
# PID %CPU COMMAND\n
|
638 |
for line in test_output.split('\n')[1:]: |
639 |
r = r'\d+\s*(\d+\.\d+)'
|
640 |
m = re.search( r, line ) |
641 |
if m is None: |
642 |
error( '*** Error: could not extract CPU fraction: %s\n' %
|
643 |
line ) |
644 |
return None |
645 |
cpu_fractions.append( float( m.group( 1 ) ) ) |
646 |
output( '*** Results: %s\n' % cpu_fractions )
|
647 |
return cpu_fractions
|
648 |
|
649 |
# BL: I think this can be rewritten now that we have
|
650 |
# a real link class.
|
651 |
def configLinkStatus( self, src, dst, status ): |
652 |
"""Change status of src <-> dst links.
|
653 |
src: node name
|
654 |
dst: node name
|
655 |
status: string {up, down}"""
|
656 |
if src not in self.nameToNode: |
657 |
error( 'src not in network: %s\n' % src )
|
658 |
elif dst not in self.nameToNode: |
659 |
error( 'dst not in network: %s\n' % dst )
|
660 |
else:
|
661 |
if type( src ) is str: |
662 |
src = self.nameToNode[ src ]
|
663 |
if type( dst ) is str: |
664 |
dst = self.nameToNode[ dst ]
|
665 |
connections = src.connectionsTo( dst ) |
666 |
if len( connections ) == 0: |
667 |
error( 'src and dst not connected: %s %s\n' % ( src, dst) )
|
668 |
for srcIntf, dstIntf in connections: |
669 |
result = srcIntf.ifconfig( status ) |
670 |
if result:
|
671 |
error( 'link src status change failed: %s\n' % result )
|
672 |
result = dstIntf.ifconfig( status ) |
673 |
if result:
|
674 |
error( 'link dst status change failed: %s\n' % result )
|
675 |
|
676 |
def interact( self ): |
677 |
"Start network and run our simple CLI."
|
678 |
self.start()
|
679 |
result = CLI( self )
|
680 |
self.stop()
|
681 |
return result
|
682 |
|
683 |
inited = False
|
684 |
|
685 |
@classmethod
|
686 |
def init( cls ): |
687 |
"Initialize Mininet"
|
688 |
if cls.inited:
|
689 |
return
|
690 |
ensureRoot() |
691 |
fixLimits() |
692 |
cls.inited = True
|
693 |
|
694 |
|
695 |
class MininetWithControlNet( Mininet ): |
696 |
|
697 |
"""Control network support:
|
698 |
|
699 |
Create an explicit control network. Currently this is only
|
700 |
used/usable with the user datapath.
|
701 |
|
702 |
Notes:
|
703 |
|
704 |
1. If the controller and switches are in the same (e.g. root)
|
705 |
namespace, they can just use the loopback connection.
|
706 |
|
707 |
2. If we can get unix domain sockets to work, we can use them
|
708 |
instead of an explicit control network.
|
709 |
|
710 |
3. Instead of routing, we could bridge or use 'in-band' control.
|
711 |
|
712 |
4. Even if we dispense with this in general, it could still be
|
713 |
useful for people who wish to simulate a separate control
|
714 |
network (since real networks may need one!)
|
715 |
|
716 |
5. Basically nobody ever used this code, so it has been moved
|
717 |
into its own class.
|
718 |
|
719 |
6. Ultimately we may wish to extend this to allow us to create a
|
720 |
control network which every node's control interface is
|
721 |
attached to."""
|
722 |
|
723 |
def configureControlNetwork( self ): |
724 |
"Configure control network."
|
725 |
self.configureRoutedControlNetwork()
|
726 |
|
727 |
# We still need to figure out the right way to pass
|
728 |
# in the control network location.
|
729 |
|
730 |
def configureRoutedControlNetwork( self, ip='192.168.123.1', |
731 |
prefixLen=16 ):
|
732 |
"""Configure a routed control network on controller and switches.
|
733 |
For use with the user datapath only right now."""
|
734 |
controller = self.controllers[ 0 ] |
735 |
info( controller.name + ' <->' )
|
736 |
cip = ip |
737 |
snum = ipParse( ip ) |
738 |
for switch in self.switches: |
739 |
info( ' ' + switch.name )
|
740 |
link = self.link( switch, controller, port1=0 ) |
741 |
sintf, cintf = link.intf1, link.intf2 |
742 |
switch.controlIntf = sintf |
743 |
snum += 1
|
744 |
while snum & 0xff in [ 0, 255 ]: |
745 |
snum += 1
|
746 |
sip = ipStr( snum ) |
747 |
cintf.setIP( cip, prefixLen ) |
748 |
sintf.setIP( sip, prefixLen ) |
749 |
controller.setHostRoute( sip, cintf ) |
750 |
switch.setHostRoute( cip, sintf ) |
751 |
info( '\n' )
|
752 |
info( '*** Testing control network\n' )
|
753 |
while not cintf.isUp(): |
754 |
info( '*** Waiting for', cintf, 'to come up\n' ) |
755 |
sleep( 1 )
|
756 |
for switch in self.switches: |
757 |
while not sintf.isUp(): |
758 |
info( '*** Waiting for', sintf, 'to come up\n' ) |
759 |
sleep( 1 )
|
760 |
if self.ping( hosts=[ switch, controller ] ) != 0: |
761 |
error( '*** Error: control network test failed\n' )
|
762 |
exit( 1 ) |
763 |
info( '\n' )
|