Revision d5886525
mininet/net.py | ||
---|---|---|
55 | 55 |
from time import sleep |
56 | 56 |
|
57 | 57 |
from mininet.cli import CLI |
58 |
from mininet.log import lg
|
|
58 |
from mininet.log import info, error
|
|
59 | 59 |
from mininet.node import KernelSwitch, OVSKernelSwitch |
60 | 60 |
from mininet.util import quietRun, fixLimits |
61 | 61 |
from mininet.util import makeIntfPair, moveIntf |
... | ... | |
121 | 121 |
# for now, assume one interface per host. |
122 | 122 |
host.intfs.append( 'h_' + self.topo.name( dpid ) + '-eth0' ) |
123 | 123 |
self.nodes[ dpid ] = host |
124 |
#lg.info( '%s ' % host.name )
|
|
124 |
#info( '%s ' % host.name ) |
|
125 | 125 |
|
126 | 126 |
def _addSwitch( self, dpid ): |
127 | 127 |
"""Add switch. |
... | ... | |
152 | 152 |
dstNode.intfs.append( dstIntf ) |
153 | 153 |
srcNode.ports[ srcPort ] = srcIntf |
154 | 154 |
dstNode.ports[ dstPort ] = dstIntf |
155 |
#lg.info( '\n' )
|
|
156 |
#lg.info( 'added intf %s to src node %x\n' % ( srcIntf, src ) )
|
|
157 |
#lg.info( 'added intf %s to dst node %x\n' % ( dstIntf, dst ) )
|
|
155 |
#info( '\n' ) |
|
156 |
#info( 'added intf %s to src node %x\n' % ( srcIntf, src ) ) |
|
157 |
#info( 'added intf %s to dst node %x\n' % ( dstIntf, dst ) ) |
|
158 | 158 |
if srcNode.inNamespace: |
159 |
#lg.info( 'moving src w/inNamespace set\n' )
|
|
159 |
#info( 'moving src w/inNamespace set\n' ) |
|
160 | 160 |
moveIntf( srcIntf, srcNode ) |
161 | 161 |
if dstNode.inNamespace: |
162 |
#lg.info( 'moving dst w/inNamespace set\n' )
|
|
162 |
#info( 'moving dst w/inNamespace set\n' ) |
|
163 | 163 |
moveIntf( dstIntf, dstNode ) |
164 | 164 |
srcNode.connection[ srcIntf ] = ( dstNode, dstIntf ) |
165 | 165 |
dstNode.connection[ dstIntf ] = ( srcNode, srcIntf ) |
... | ... | |
201 | 201 |
For use with the user datapath only right now. |
202 | 202 |
TODO( brandonh ) test this code! |
203 | 203 |
""" |
204 |
|
|
205 | 204 |
# params were: controller, switches, ips |
206 | 205 |
|
207 | 206 |
controller = self.controllers[ 'c0' ] |
208 |
lg.info( '%s <-> ' % controller.name )
|
|
207 |
info( '%s <-> ' % controller.name ) |
|
209 | 208 |
for switchDpid in self.topo.switches(): |
210 | 209 |
switch = self.nodes[ switchDpid ] |
211 |
lg.info( '%s ' % switch.name )
|
|
210 |
info( '%s ' % switch.name ) |
|
212 | 211 |
sip = self.topo.ip( switchDpid )#ips.next() |
213 | 212 |
sintf = switch.intfs[ 0 ] |
214 | 213 |
node, cintf = switch.connection[ sintf ] |
215 | 214 |
if node != controller: |
216 |
lg.error( '*** Error: switch %s not connected to correct'
|
|
215 |
error( '*** Error: switch %s not connected to correct' |
|
217 | 216 |
'controller' % |
218 | 217 |
switch.name ) |
219 | 218 |
exit( 1 ) |
... | ... | |
222 | 221 |
switch.setIP( sintf, sip, '/' + self.cparams.subnetSize ) |
223 | 222 |
controller.setHostRoute( sip, cintf ) |
224 | 223 |
switch.setHostRoute( self.cparams.ip, sintf ) |
225 |
lg.info( '\n' )
|
|
226 |
lg.info( '*** Testing control network\n' )
|
|
224 |
info( '\n' ) |
|
225 |
info( '*** Testing control network\n' ) |
|
227 | 226 |
while not controller.intfIsUp( controller.intfs[ 0 ] ): |
228 |
lg.info( '*** Waiting for %s to come up\n',
|
|
227 |
info( '*** Waiting for %s to come up\n', |
|
229 | 228 |
controller.intfs[ 0 ] ) |
230 | 229 |
sleep( 1 ) |
231 | 230 |
for switchDpid in self.topo.switches(): |
232 | 231 |
switch = self.nodes[ switchDpid ] |
233 | 232 |
while not switch.intfIsUp( switch.intfs[ 0 ] ): |
234 |
lg.info( '*** Waiting for %s to come up\n' %
|
|
233 |
info( '*** Waiting for %s to come up\n' % |
|
235 | 234 |
switch.intfs[ 0 ] ) |
236 | 235 |
sleep( 1 ) |
237 | 236 |
if self.ping( hosts=[ switch, controller ] ) != 0: |
238 |
lg.error( '*** Error: control network test failed\n' )
|
|
237 |
error( '*** Error: control network test failed\n' ) |
|
239 | 238 |
exit( 1 ) |
240 |
lg.info( '\n' )
|
|
239 |
info( '\n' ) |
|
241 | 240 |
|
242 | 241 |
def _configHosts( self ): |
243 | 242 |
"Configure a set of hosts." |
... | ... | |
250 | 249 |
host.setDefaultRoute( hintf ) |
251 | 250 |
# You're low priority, dude! |
252 | 251 |
quietRun( 'renice +18 -p ' + repr( host.pid ) ) |
253 |
lg.info( '%s ', host.name )
|
|
254 |
lg.info( '\n' )
|
|
252 |
info( '%s ', host.name ) |
|
253 |
info( '\n' ) |
|
255 | 254 |
|
256 | 255 |
def build( self ): |
257 | 256 |
"""Build mininet. |
... | ... | |
260 | 259 |
if self.cleanup: |
261 | 260 |
pass # cleanup |
262 | 261 |
# validate topo? |
263 |
lg.info( '*** Adding controller\n' )
|
|
262 |
info( '*** Adding controller\n' ) |
|
264 | 263 |
self._addController( self.controller ) |
265 |
lg.info( '*** Creating network\n' )
|
|
266 |
lg.info( '*** Adding hosts:\n' )
|
|
264 |
info( '*** Creating network\n' ) |
|
265 |
info( '*** Adding hosts:\n' ) |
|
267 | 266 |
for host in sorted( self.topo.hosts() ): |
268 | 267 |
self._addHost( host ) |
269 |
lg.info( '0x%x ' % host )
|
|
270 |
lg.info( '\n*** Adding switches:\n' )
|
|
268 |
info( '0x%x ' % host ) |
|
269 |
info( '\n*** Adding switches:\n' ) |
|
271 | 270 |
for switch in sorted( self.topo.switches() ): |
272 | 271 |
self._addSwitch( switch ) |
273 |
lg.info( '0x%x ' % switch )
|
|
274 |
lg.info( '\n*** Adding edges:\n' )
|
|
272 |
info( '0x%x ' % switch ) |
|
273 |
info( '\n*** Adding edges:\n' ) |
|
275 | 274 |
for src, dst in sorted( self.topo.edges() ): |
276 | 275 |
self._addLink( src, dst ) |
277 |
lg.info( '(0x%x, 0x%x) ' % ( src, dst ) )
|
|
278 |
lg.info( '\n' )
|
|
276 |
info( '(0x%x, 0x%x) ' % ( src, dst ) ) |
|
277 |
info( '\n' ) |
|
279 | 278 |
|
280 | 279 |
if self.inNamespace: |
281 |
lg.info( '*** Configuring control network\n' )
|
|
280 |
info( '*** Configuring control network\n' ) |
|
282 | 281 |
self._configureControlNetwork() |
283 | 282 |
|
284 |
lg.info( '*** Configuring hosts\n' )
|
|
283 |
info( '*** Configuring hosts\n' ) |
|
285 | 284 |
self._configHosts() |
286 | 285 |
|
287 | 286 |
if self.xterms: |
... | ... | |
301 | 300 |
|
302 | 301 |
def startXterms( self ): |
303 | 302 |
"Start an xterm for each node in the topo." |
304 |
lg.info( "*** Running xterms on %s\n" % os.environ[ 'DISPLAY' ] )
|
|
303 |
info( "*** Running xterms on %s\n" % os.environ[ 'DISPLAY' ] ) |
|
305 | 304 |
cleanUpScreens() |
306 | 305 |
self.terms += makeXterms( self.controllers.values(), 'controller' ) |
307 | 306 |
self.terms += makeXterms( self.switchNodes(), 'switch' ) |
... | ... | |
330 | 329 |
srcNode.setARP( dst, dst ) |
331 | 330 |
|
332 | 331 |
def start( self ): |
333 |
"Start controller and switches\n"
|
|
334 |
lg.info( '*** Starting controller\n' )
|
|
332 |
"Start controller and switches" |
|
333 |
info( '*** Starting controller\n' ) |
|
335 | 334 |
for cnode in self.controllers.values(): |
336 | 335 |
cnode.start() |
337 |
lg.info( '*** Starting %s switches\n' % len( self.topo.switches() ) )
|
|
336 |
info( '*** Starting %s switches\n' % len( self.topo.switches() ) ) |
|
338 | 337 |
for switchDpid in self.topo.switches(): |
339 | 338 |
switch = self.nodes[ switchDpid ] |
340 |
#lg.info( 'switch = %s' % switch )
|
|
341 |
lg.info( '0x%x ' % switchDpid )
|
|
339 |
#info( 'switch = %s' % switch ) |
|
340 |
info( '0x%x ' % switchDpid ) |
|
342 | 341 |
switch.start( self.controllers ) |
343 |
lg.info( '\n' )
|
|
342 |
info( '\n' ) |
|
344 | 343 |
|
345 | 344 |
def stop( self ): |
346 |
"Stop the controller(s), switches and hosts\n"
|
|
345 |
"Stop the controller(s), switches and hosts" |
|
347 | 346 |
if self.terms: |
348 |
lg.info( '*** Stopping %i terms\n' % len( self.terms ) )
|
|
347 |
info( '*** Stopping %i terms\n' % len( self.terms ) ) |
|
349 | 348 |
self.stopXterms() |
350 |
lg.info( '*** Stopping %i hosts\n' % len( self.topo.hosts() ) )
|
|
349 |
info( '*** Stopping %i hosts\n' % len( self.topo.hosts() ) ) |
|
351 | 350 |
for hostDpid in self.topo.hosts(): |
352 | 351 |
host = self.nodes[ hostDpid ] |
353 |
lg.info( '%s ' % host.name )
|
|
352 |
info( '%s ' % host.name ) |
|
354 | 353 |
host.terminate() |
355 |
lg.info( '\n' )
|
|
356 |
lg.info( '*** Stopping %i switches\n' % len( self.topo.switches() ) )
|
|
354 |
info( '\n' ) |
|
355 |
info( '*** Stopping %i switches\n' % len( self.topo.switches() ) ) |
|
357 | 356 |
for switchDpid in self.topo.switches(): |
358 | 357 |
switch = self.nodes[ switchDpid ] |
359 |
lg.info( '%s' % switch.name )
|
|
358 |
info( '%s' % switch.name ) |
|
360 | 359 |
switch.stop() |
361 |
lg.info( '\n' )
|
|
362 |
lg.info( '*** Stopping controller\n' )
|
|
360 |
info( '\n' ) |
|
361 |
info( '*** Stopping controller\n' ) |
|
363 | 362 |
for cnode in self.controllers.values(): |
364 | 363 |
cnode.stop() |
365 |
lg.info( '*** Test complete\n' )
|
|
364 |
info( '*** Test complete\n' ) |
|
366 | 365 |
|
367 | 366 |
def run( self, test, **params ): |
368 | 367 |
"Perform a complete start/test/stop cycle." |
369 | 368 |
self.start() |
370 |
lg.info( '*** Running test\n' )
|
|
369 |
info( '*** Running test\n' ) |
|
371 | 370 |
result = getattr( self, test )( **params ) |
372 | 371 |
self.stop() |
373 | 372 |
return result |
... | ... | |
378 | 377 |
r = r'(\d+) packets transmitted, (\d+) received' |
379 | 378 |
m = re.search( r, pingOutput ) |
380 | 379 |
if m == None: |
381 |
lg.error( '*** Error: could not parse ping output: %s\n' %
|
|
380 |
error( '*** Error: could not parse ping output: %s\n' % |
|
382 | 381 |
pingOutput ) |
383 | 382 |
exit( 1 ) |
384 | 383 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
... | ... | |
395 | 394 |
ploss = None |
396 | 395 |
if not hosts: |
397 | 396 |
hosts = self.topo.hosts() |
398 |
lg.info( '*** Ping: testing ping reachability\n' )
|
|
397 |
info( '*** Ping: testing ping reachability\n' ) |
|
399 | 398 |
for nodeDpid in hosts: |
400 | 399 |
node = self.nodes[ nodeDpid ] |
401 |
lg.info( '%s -> ' % node.name )
|
|
400 |
info( '%s -> ' % node.name ) |
|
402 | 401 |
for destDpid in hosts: |
403 | 402 |
dest = self.nodes[ destDpid ] |
404 | 403 |
if node != dest: |
... | ... | |
406 | 405 |
sent, received = self._parsePing( result ) |
407 | 406 |
packets += sent |
408 | 407 |
if received > sent: |
409 |
lg.error( '*** Error: received too many packets' )
|
|
410 |
lg.error( '%s' % result )
|
|
408 |
error( '*** Error: received too many packets' ) |
|
409 |
error( '%s' % result ) |
|
411 | 410 |
node.cmdPrint( 'route' ) |
412 | 411 |
exit( 1 ) |
413 | 412 |
lost += sent - received |
414 |
lg.info( ( '%s ' % dest.name ) if received else 'X ' )
|
|
415 |
lg.info( '\n' )
|
|
413 |
info( ( '%s ' % dest.name ) if received else 'X ' ) |
|
414 |
info( '\n' ) |
|
416 | 415 |
ploss = 100 * lost / packets |
417 |
lg.info( "*** Results: %i%% dropped (%d/%d lost)\n" %
|
|
416 |
info( "*** Results: %i%% dropped (%d/%d lost)\n" % |
|
418 | 417 |
( ploss, lost, packets ) ) |
419 | 418 |
return ploss |
420 | 419 |
|
... | ... | |
456 | 455 |
assert len( hosts ) == 2 |
457 | 456 |
host0 = self.nodes[ hosts[ 0 ] ] |
458 | 457 |
host1 = self.nodes[ hosts[ 1 ] ] |
459 |
lg.info( '*** Iperf: testing ' + l4Type + ' bandwidth between ' )
|
|
460 |
lg.info( "%s and %s\n" % ( host0.name, host1.name ) )
|
|
458 |
info( '*** Iperf: testing ' + l4Type + ' bandwidth between ' ) |
|
459 |
info( "%s and %s\n" % ( host0.name, host1.name ) ) |
|
461 | 460 |
host0.cmd( 'killall -9 iperf' ) |
462 | 461 |
iperfArgs = 'iperf ' |
463 | 462 |
bwArgs = '' |
... | ... | |
468 | 467 |
raise Exception( 'Unexpected l4 type: %s' % l4Type ) |
469 | 468 |
server = host0.cmd( iperfArgs + '-s &' ) |
470 | 469 |
if verbose: |
471 |
lg.info( '%s\n' % server )
|
|
470 |
info( '%s\n' % server ) |
|
472 | 471 |
client = host1.cmd( iperfArgs + '-t 5 -c ' + host0.IP() + ' ' + |
473 | 472 |
bwArgs ) |
474 | 473 |
if verbose: |
475 |
lg.info( '%s\n' % client )
|
|
474 |
info( '%s\n' % client ) |
|
476 | 475 |
server = host0.cmd( 'killall -9 iperf' ) |
477 | 476 |
if verbose: |
478 |
lg.info( '%s\n' % server )
|
|
477 |
info( '%s\n' % server ) |
|
479 | 478 |
result = [ self._parseIperf( server ), self._parseIperf( client ) ] |
480 | 479 |
if l4Type == 'UDP': |
481 | 480 |
result.insert( 0, udpBw ) |
482 |
lg.info( '*** Results: %s\n' % result )
|
|
481 |
info( '*** Results: %s\n' % result ) |
|
483 | 482 |
return result |
484 | 483 |
|
485 | 484 |
def iperfUdp( self, udpBw='10M' ): |
Also available in: Unified diff