mininet / mininet / net.py @ eaf5888a
History | View | Annotate | Download (21.2 KB)
1 |
"""
|
---|---|
2 |
|
3 |
Mininet: A simple networking testbed for OpenFlow!
|
4 |
|
5 |
author: Bob Lantz (rlantz@cs.stanford.edu)
|
6 |
author: Brandon Heller (brandonh@stanford.edu)
|
7 |
|
8 |
Mininet creates scalable OpenFlow test networks by using
|
9 |
process-based virtualization and network namespaces.
|
10 |
|
11 |
Simulated hosts are created as processes in separate network
|
12 |
namespaces. This allows a complete OpenFlow network to be simulated on
|
13 |
top of a single Linux kernel.
|
14 |
|
15 |
Each host has:
|
16 |
|
17 |
A virtual console (pipes to a shell)
|
18 |
A virtual interfaces (half of a veth pair)
|
19 |
A parent shell (and possibly some child processes) in a namespace
|
20 |
|
21 |
Hosts have a network interface which is configured via ifconfig/ip
|
22 |
link/etc.
|
23 |
|
24 |
This version supports both the kernel and user space datapaths
|
25 |
from the OpenFlow reference implementation (openflowswitch.org)
|
26 |
as well as OpenVSwitch (openvswitch.org.)
|
27 |
|
28 |
In kernel datapath mode, the controller and switches are simply
|
29 |
processes in the root namespace.
|
30 |
|
31 |
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
|
32 |
attached to the one side of a veth pair; the other side resides in the
|
33 |
host namespace. In this mode, switch processes can simply connect to the
|
34 |
controller via the loopback interface.
|
35 |
|
36 |
In user datapath mode, the controller and switches can be full-service
|
37 |
nodes that live in their own network namespaces and have management
|
38 |
interfaces and IP addresses on a control network (e.g. 192.168.123.1,
|
39 |
currently routed although it could be bridged.)
|
40 |
|
41 |
In addition to a management interface, user mode switches also have
|
42 |
several switch interfaces, halves of veth pairs whose other halves
|
43 |
reside in the host nodes that the switches are connected to.
|
44 |
|
45 |
Consistent, straightforward naming is important in order to easily
|
46 |
identify hosts, switches and controllers, both from the CLI and
|
47 |
from program code. Interfaces are named to make it easy to identify
|
48 |
which interfaces belong to which node.
|
49 |
|
50 |
The basic naming scheme is as follows:
|
51 |
|
52 |
Host nodes are named h1-hN
|
53 |
Switch nodes are named s1-sN
|
54 |
Controller nodes are named c0-cN
|
55 |
Interfaces are named {nodename}-eth0 .. {nodename}-ethN
|
56 |
|
57 |
Note: If the network topology is created using mininet.topo, then
|
58 |
node numbers are unique among hosts and switches (e.g. we have
|
59 |
h1..hN and SN..SN+M) and also correspond to their default IP addresses
|
60 |
of 10.x.y.z/8 where x.y.z is the base-256 representation of N for
|
61 |
hN. This mapping allows easy determination of a node's IP
|
62 |
address from its name, e.g. h1 -> 10.0.0.1, h257 -> 10.0.1.1.
|
63 |
|
64 |
Note also that 10.0.0.1 can often be written as 10.1 for short, e.g.
|
65 |
"ping 10.1" is equivalent to "ping 10.0.0.1".
|
66 |
|
67 |
Currently we wrap the entire network in a 'mininet' object, which
|
68 |
constructs a simulated network based on a network topology created
|
69 |
using a topology object (e.g. LinearTopo) from mininet.topo or
|
70 |
mininet.topolib, and a Controller which the switches will connect
|
71 |
to. Several configuration options are provided for functions such as
|
72 |
automatically setting MAC addresses, populating the ARP table, or
|
73 |
even running a set of terminals to allow direct interaction with nodes.
|
74 |
|
75 |
After the network is created, it can be started using start(), and a
|
76 |
variety of useful tasks maybe performed, including basic connectivity
|
77 |
and bandwidth tests and running the mininet CLI.
|
78 |
|
79 |
Once the network is up and running, test code can easily get access
|
80 |
to host and switch objects which can then be used for arbitrary
|
81 |
experiments, typically involving running a series of commands on the
|
82 |
hosts.
|
83 |
|
84 |
After all desired tests or activities have been completed, the stop()
|
85 |
method may be called to shut down the network.
|
86 |
|
87 |
"""
|
88 |
|
89 |
import os |
90 |
import re |
91 |
import select |
92 |
import signal |
93 |
from time import sleep |
94 |
|
95 |
from mininet.cli import CLI |
96 |
from mininet.log import info, error, debug, output |
97 |
from mininet.node import Host, UserSwitch, OVSKernelSwitch, Controller |
98 |
from mininet.node import ControllerParams |
99 |
from mininet.util import quietRun, fixLimits |
100 |
from mininet.util import createLink, macColonHex, ipStr, ipParse |
101 |
from mininet.term import cleanUpScreens, makeTerms |
102 |
|
103 |
class Mininet( object ): |
104 |
"Network emulation with hosts spawned in network namespaces."
|
105 |
|
106 |
def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host, |
107 |
controller=Controller, |
108 |
cparams=ControllerParams( '10.0.0.0', 8 ), |
109 |
build=True, xterms=False, cleanup=False, |
110 |
inNamespace=False,
|
111 |
autoSetMacs=False, autoStaticArp=False, listenPort=None ): |
112 |
"""Create Mininet object.
|
113 |
topo: Topo (topology) object or None
|
114 |
switch: Switch class
|
115 |
host: Host class
|
116 |
controller: Controller class
|
117 |
cparams: ControllerParams object
|
118 |
build: build now from topo?
|
119 |
xterms: if build now, spawn xterms?
|
120 |
cleanup: if build now, cleanup before creating?
|
121 |
inNamespace: spawn switches and controller in net namespaces?
|
122 |
autoSetMacs: set MAC addrs from topo?
|
123 |
autoStaticArp: set all-pairs static MAC addrs?
|
124 |
listenPort: base listening port to open; will be incremented for
|
125 |
each additional switch in the net if inNamespace=False"""
|
126 |
self.switch = switch
|
127 |
self.host = host
|
128 |
self.controller = controller
|
129 |
self.cparams = cparams
|
130 |
self.topo = topo
|
131 |
self.inNamespace = inNamespace
|
132 |
self.xterms = xterms
|
133 |
self.cleanup = cleanup
|
134 |
self.autoSetMacs = autoSetMacs
|
135 |
self.autoStaticArp = autoStaticArp
|
136 |
self.listenPort = listenPort
|
137 |
|
138 |
self.hosts = []
|
139 |
self.switches = []
|
140 |
self.controllers = []
|
141 |
self.nameToNode = {} # name to Node (Host/Switch) objects |
142 |
self.idToNode = {} # dpid to Node (Host/Switch) objects |
143 |
self.dps = 0 # number of created kernel datapaths |
144 |
self.terms = [] # list of spawned xterm processes |
145 |
|
146 |
init() |
147 |
switch.setup() |
148 |
|
149 |
self.built = False |
150 |
if topo and build: |
151 |
self.build()
|
152 |
|
153 |
def addHost( self, name, mac=None, ip=None ): |
154 |
"""Add host.
|
155 |
name: name of host to add
|
156 |
mac: default MAC address for intf 0
|
157 |
ip: default IP address for intf 0
|
158 |
returns: added host"""
|
159 |
host = self.host( name, defaultMAC=mac, defaultIP=ip )
|
160 |
self.hosts.append( host )
|
161 |
self.nameToNode[ name ] = host
|
162 |
return host
|
163 |
|
164 |
def addSwitch( self, name, mac=None, ip=None ): |
165 |
"""Add switch.
|
166 |
name: name of switch to add
|
167 |
mac: default MAC address for kernel/OVS switch intf 0
|
168 |
returns: added switch
|
169 |
side effect: increments the listenPort member variable."""
|
170 |
if self.switch == UserSwitch: |
171 |
sw = self.switch( name, listenPort=self.listenPort, |
172 |
defaultMAC=mac, defaultIP=ip, inNamespace=self.inNamespace )
|
173 |
else:
|
174 |
sw = self.switch( name, listenPort=self.listenPort, |
175 |
defaultMAC=mac, defaultIP=ip, dp=self.dps,
|
176 |
inNamespace=self.inNamespace )
|
177 |
if not self.inNamespace and self.listenPort: |
178 |
self.listenPort += 1 |
179 |
self.dps += 1 |
180 |
self.switches.append( sw )
|
181 |
self.nameToNode[ name ] = sw
|
182 |
return sw
|
183 |
|
184 |
def addController( self, name='c0', Controller=None, **kwargs ): |
185 |
"""Add controller.
|
186 |
Controller: Controller class"""
|
187 |
if not Controller: |
188 |
Controller = self.controller
|
189 |
controller_new = Controller( name, **kwargs ) |
190 |
if controller_new: # allow controller-less setups |
191 |
self.controllers.append( controller_new )
|
192 |
self.nameToNode[ name ] = controller_new
|
193 |
return controller_new
|
194 |
|
195 |
# Control network support:
|
196 |
#
|
197 |
# Create an explicit control network. Currently this is only
|
198 |
# used by the user datapath configuration.
|
199 |
#
|
200 |
# Notes:
|
201 |
#
|
202 |
# 1. If the controller and switches are in the same (e.g. root)
|
203 |
# namespace, they can just use the loopback connection.
|
204 |
#
|
205 |
# 2. If we can get unix domain sockets to work, we can use them
|
206 |
# instead of an explicit control network.
|
207 |
#
|
208 |
# 3. Instead of routing, we could bridge or use 'in-band' control.
|
209 |
#
|
210 |
# 4. Even if we dispense with this in general, it could still be
|
211 |
# useful for people who wish to simulate a separate control
|
212 |
# network (since real networks may need one!)
|
213 |
|
214 |
def configureControlNetwork( self ): |
215 |
"Configure control network."
|
216 |
self.configureRoutedControlNetwork()
|
217 |
|
218 |
# We still need to figure out the right way to pass
|
219 |
# in the control network location.
|
220 |
|
221 |
def configureRoutedControlNetwork( self, ip='192.168.123.1', |
222 |
prefixLen=16 ):
|
223 |
"""Configure a routed control network on controller and switches.
|
224 |
For use with the user datapath only right now.
|
225 |
"""
|
226 |
controller = self.controllers[ 0 ] |
227 |
info( controller.name + ' <->' )
|
228 |
cip = ip |
229 |
snum = ipParse( ip ) |
230 |
for switch in self.switches: |
231 |
info( ' ' + switch.name )
|
232 |
sintf, cintf = createLink( switch, controller ) |
233 |
snum += 1
|
234 |
while snum & 0xff in [ 0, 255 ]: |
235 |
snum += 1
|
236 |
sip = ipStr( snum ) |
237 |
controller.setIP( cintf, cip, prefixLen ) |
238 |
switch.setIP( sintf, sip, prefixLen ) |
239 |
controller.setHostRoute( sip, cintf ) |
240 |
switch.setHostRoute( cip, sintf ) |
241 |
info( '\n' )
|
242 |
info( '*** Testing control network\n' )
|
243 |
while not controller.intfIsUp( cintf ): |
244 |
info( '*** Waiting for', cintf, 'to come up\n' ) |
245 |
sleep( 1 )
|
246 |
for switch in self.switches: |
247 |
while not switch.intfIsUp( sintf ): |
248 |
info( '*** Waiting for', sintf, 'to come up\n' ) |
249 |
sleep( 1 )
|
250 |
if self.ping( hosts=[ switch, controller ] ) != 0: |
251 |
error( '*** Error: control network test failed\n' )
|
252 |
exit( 1 ) |
253 |
info( '\n' )
|
254 |
|
255 |
def configHosts( self ): |
256 |
"Configure a set of hosts."
|
257 |
# params were: hosts, ips
|
258 |
for host in self.hosts: |
259 |
hintf = host.intfs[ 0 ]
|
260 |
host.setIP( hintf, host.defaultIP, self.cparams.prefixLen )
|
261 |
host.setDefaultRoute( hintf ) |
262 |
# You're low priority, dude!
|
263 |
quietRun( 'renice +18 -p ' + repr( host.pid ) ) |
264 |
info( host.name + ' ' )
|
265 |
info( '\n' )
|
266 |
|
267 |
def buildFromTopo( self, topo ): |
268 |
"""Build mininet from a topology object
|
269 |
At the end of this function, everything should be connected
|
270 |
and up."""
|
271 |
|
272 |
def addNode( prefix, addMethod, nodeId ): |
273 |
"Add a host or a switch."
|
274 |
name = prefix + topo.name( nodeId ) |
275 |
mac = macColonHex( nodeId ) if self.setMacs else None |
276 |
ip = topo.ip( nodeId ) |
277 |
node = addMethod( name, mac=mac, ip=ip ) |
278 |
self.idToNode[ nodeId ] = node
|
279 |
info( name + ' ' )
|
280 |
|
281 |
# Possibly we should clean up here and/or validate
|
282 |
# the topo
|
283 |
if self.cleanup: |
284 |
pass
|
285 |
|
286 |
info( '*** Adding controller\n' )
|
287 |
self.addController( 'c0' ) |
288 |
info( '*** Creating network\n' )
|
289 |
info( '*** Adding hosts:\n' )
|
290 |
for hostId in sorted( topo.hosts() ): |
291 |
addNode( 'h', self.addHost, hostId ) |
292 |
info( '\n*** Adding switches:\n' )
|
293 |
for switchId in sorted( topo.switches() ): |
294 |
addNode( 's', self.addSwitch, switchId ) |
295 |
info( '\n*** Adding links:\n' )
|
296 |
for srcId, dstId in sorted( topo.edges() ): |
297 |
src, dst = self.idToNode[ srcId ], self.idToNode[ dstId ] |
298 |
srcPort, dstPort = topo.port( srcId, dstId ) |
299 |
createLink( src, dst, srcPort, dstPort ) |
300 |
info( '(%s, %s) ' % ( src.name, dst.name ) )
|
301 |
info( '\n' )
|
302 |
|
303 |
def build( self ): |
304 |
"Build mininet."
|
305 |
if self.topo: |
306 |
self.buildFromTopo( self.topo ) |
307 |
if self.inNamespace: |
308 |
info( '*** Configuring control network\n' )
|
309 |
self.configureControlNetwork()
|
310 |
info( '*** Configuring hosts\n' )
|
311 |
self.configHosts()
|
312 |
if self.xterms: |
313 |
self.startTerms()
|
314 |
if self.autoSetMacs: |
315 |
self.setMacs()
|
316 |
if self.autoStaticArp: |
317 |
self.staticArp()
|
318 |
self.built = True |
319 |
|
320 |
def startTerms( self ): |
321 |
"Start a terminal for each node."
|
322 |
info( "*** Running terms on %s\n" % os.environ[ 'DISPLAY' ] ) |
323 |
cleanUpScreens() |
324 |
self.terms += makeTerms( self.controllers, 'controller' ) |
325 |
self.terms += makeTerms( self.switches, 'switch' ) |
326 |
self.terms += makeTerms( self.hosts, 'host' ) |
327 |
|
328 |
def stopXterms( self ): |
329 |
"Kill each xterm."
|
330 |
# Kill xterms
|
331 |
for term in self.terms: |
332 |
os.kill( term.pid, signal.SIGKILL ) |
333 |
cleanUpScreens() |
334 |
|
335 |
def setMacs( self ): |
336 |
"""Set MAC addrs to correspond to default MACs on hosts.
|
337 |
Assume that the host only has one interface."""
|
338 |
for host in self.hosts: |
339 |
host.setMAC( host.intfs[ 0 ], host.defaultMAC )
|
340 |
|
341 |
def staticArp( self ): |
342 |
"Add all-pairs ARP entries to remove the need to handle broadcast."
|
343 |
for src in self.hosts: |
344 |
for dst in self.hosts: |
345 |
if src != dst:
|
346 |
src.setARP( ip=dst.IP(), mac=dst.MAC() ) |
347 |
|
348 |
def start( self ): |
349 |
"Start controller and switches."
|
350 |
if not self.built: |
351 |
self.build()
|
352 |
info( '*** Starting controller\n' )
|
353 |
for controller in self.controllers: |
354 |
controller.start() |
355 |
info( '*** Starting %s switches\n' % len( self.switches ) ) |
356 |
for switch in self.switches: |
357 |
info( switch.name + ' ')
|
358 |
switch.start( self.controllers )
|
359 |
info( '\n' )
|
360 |
|
361 |
def stop( self ): |
362 |
"Stop the controller(s), switches and hosts"
|
363 |
if self.terms: |
364 |
info( '*** Stopping %i terms\n' % len( self.terms ) ) |
365 |
self.stopXterms()
|
366 |
info( '*** Stopping %i hosts\n' % len( self.hosts ) ) |
367 |
for host in self.hosts: |
368 |
info( '%s ' % host.name )
|
369 |
host.terminate() |
370 |
info( '\n' )
|
371 |
info( '*** Stopping %i switches\n' % len( self.switches ) ) |
372 |
for switch in self.switches: |
373 |
info( switch.name ) |
374 |
switch.stop() |
375 |
info( '\n' )
|
376 |
info( '*** Stopping %i controllers\n' % len( self.controllers ) ) |
377 |
for controller in self.controllers: |
378 |
controller.stop() |
379 |
info( '*** Done\n' )
|
380 |
|
381 |
def run( self, test, *args, **kwargs ): |
382 |
"Perform a complete start/test/stop cycle."
|
383 |
self.start()
|
384 |
info( '*** Running test\n' )
|
385 |
result = test( *args, **kwargs ) |
386 |
self.stop()
|
387 |
return result
|
388 |
|
389 |
def monitor( self, hosts=None, timeoutms=-1 ): |
390 |
"""Monitor a set of hosts (or all hosts by default),
|
391 |
and return their output, a line at a time.
|
392 |
hosts: (optional) set of hosts to monitor
|
393 |
timeoutms: (optional) timeout value in ms
|
394 |
returns: iterator which returns host, line"""
|
395 |
if hosts is None: |
396 |
hosts = self.hosts
|
397 |
poller = select.poll() |
398 |
Node = hosts[ 0 ] # so we can call class method fdToNode |
399 |
for host in hosts: |
400 |
poller.register( host.stdout ) |
401 |
while True: |
402 |
ready = poller.poll( timeoutms ) |
403 |
for fd, event in ready: |
404 |
host = Node.fdToNode( fd ) |
405 |
if event & select.POLLIN:
|
406 |
line = host.readline() |
407 |
if line is not None: |
408 |
yield host, line
|
409 |
# Return if non-blocking
|
410 |
if not ready and timeoutms >= 0: |
411 |
yield None, None |
412 |
|
413 |
@staticmethod
|
414 |
def _parsePing( pingOutput ): |
415 |
"Parse ping output and return packets sent, received."
|
416 |
# Check for downed link
|
417 |
if 'connect: Network is unreachable' in pingOutput: |
418 |
return (1, 0) |
419 |
r = r'(\d+) packets transmitted, (\d+) received'
|
420 |
m = re.search( r, pingOutput ) |
421 |
if m == None: |
422 |
error( '*** Error: could not parse ping output: %s\n' %
|
423 |
pingOutput ) |
424 |
return (1, 0) |
425 |
sent, received = int( m.group( 1 ) ), int( m.group( 2 ) ) |
426 |
return sent, received
|
427 |
|
428 |
def ping( self, hosts=None ): |
429 |
"""Ping between all specified hosts.
|
430 |
hosts: list of hosts
|
431 |
returns: ploss packet loss percentage"""
|
432 |
# should we check if running?
|
433 |
packets = 0
|
434 |
lost = 0
|
435 |
ploss = None
|
436 |
if not hosts: |
437 |
hosts = self.hosts
|
438 |
output( '*** Ping: testing ping reachability\n' )
|
439 |
for node in hosts: |
440 |
output( '%s -> ' % node.name )
|
441 |
for dest in hosts: |
442 |
if node != dest:
|
443 |
result = node.cmd( 'ping -c1 ' + dest.IP() )
|
444 |
sent, received = self._parsePing( result )
|
445 |
packets += sent |
446 |
if received > sent:
|
447 |
error( '*** Error: received too many packets' )
|
448 |
error( '%s' % result )
|
449 |
node.cmdPrint( 'route' )
|
450 |
exit( 1 ) |
451 |
lost += sent - received |
452 |
output( ( '%s ' % dest.name ) if received else 'X ' ) |
453 |
output( '\n' )
|
454 |
ploss = 100 * lost / packets
|
455 |
output( "*** Results: %i%% dropped (%d/%d lost)\n" %
|
456 |
( ploss, lost, packets ) ) |
457 |
return ploss
|
458 |
|
459 |
def pingAll( self ): |
460 |
"""Ping between all hosts.
|
461 |
returns: ploss packet loss percentage"""
|
462 |
return self.ping() |
463 |
|
464 |
def pingPair( self ): |
465 |
"""Ping between first two hosts, useful for testing.
|
466 |
returns: ploss packet loss percentage"""
|
467 |
hosts = [ self.hosts[ 0 ], self.hosts[ 1 ] ] |
468 |
return self.ping( hosts=hosts ) |
469 |
|
470 |
@staticmethod
|
471 |
def _parseIperf( iperfOutput ): |
472 |
"""Parse iperf output and return bandwidth.
|
473 |
iperfOutput: string
|
474 |
returns: result string"""
|
475 |
r = r'([\d\.]+ \w+/sec)'
|
476 |
m = re.search( r, iperfOutput ) |
477 |
if m:
|
478 |
return m.group( 1 ) |
479 |
else:
|
480 |
raise Exception( 'could not parse iperf output: ' + iperfOutput ) |
481 |
|
482 |
def iperf( self, hosts=None, l4Type='TCP', udpBw='10M' ): |
483 |
"""Run iperf between two hosts.
|
484 |
hosts: list of hosts; if None, uses opposite hosts
|
485 |
l4Type: string, one of [ TCP, UDP ]
|
486 |
returns: results two-element array of server and client speeds"""
|
487 |
if not hosts: |
488 |
hosts = [ self.hosts[ 0 ], self.hosts[ -1 ] ] |
489 |
else:
|
490 |
assert len( hosts ) == 2 |
491 |
client, server = hosts |
492 |
output( '*** Iperf: testing ' + l4Type + ' bandwidth between ' ) |
493 |
output( "%s and %s\n" % ( client.name, server.name ) )
|
494 |
server.cmd( 'killall -9 iperf' )
|
495 |
iperfArgs = 'iperf '
|
496 |
bwArgs = ''
|
497 |
if l4Type == 'UDP': |
498 |
iperfArgs += '-u '
|
499 |
bwArgs = '-b ' + udpBw + ' ' |
500 |
elif l4Type != 'TCP': |
501 |
raise Exception( 'Unexpected l4 type: %s' % l4Type ) |
502 |
server.sendCmd( iperfArgs + '-s', printPid=True ) |
503 |
servout = ''
|
504 |
while server.lastPid is None: |
505 |
servout += server.monitor() |
506 |
cliout = client.cmd( iperfArgs + '-t 5 -c ' + server.IP() + ' ' + |
507 |
bwArgs ) |
508 |
debug( 'Client output: %s\n' % cliout )
|
509 |
server.sendInt() |
510 |
servout += server.waitOutput() |
511 |
debug( 'Server output: %s\n' % servout )
|
512 |
result = [ self._parseIperf( servout ), self._parseIperf( cliout ) ] |
513 |
if l4Type == 'UDP': |
514 |
result.insert( 0, udpBw )
|
515 |
output( '*** Results: %s\n' % result )
|
516 |
return result
|
517 |
|
518 |
def configLinkStatus( self, src, dst, status ): |
519 |
"""Change status of src <-> dst links.
|
520 |
src: node name
|
521 |
dst: node name
|
522 |
status: string {up, down}"""
|
523 |
if src not in self.nameToNode: |
524 |
error( 'src not in network: %s\n' % src )
|
525 |
elif dst not in self.nameToNode: |
526 |
error( 'dst not in network: %s\n' % dst )
|
527 |
else:
|
528 |
srcNode, dstNode = self.nameToNode[ src ], self.nameToNode[ dst ] |
529 |
connections = srcNode.connectionsTo( dstNode ) |
530 |
if len( connections ) == 0: |
531 |
error( 'src and dst not connected: %s %s\n' % ( src, dst) )
|
532 |
for srcIntf, dstIntf in connections: |
533 |
result = srcNode.cmd( 'ifconfig', srcIntf, status )
|
534 |
if result:
|
535 |
error( 'link src status change failed: %s\n' % result )
|
536 |
result = dstNode.cmd( 'ifconfig', dstIntf, status )
|
537 |
if result:
|
538 |
error( 'link dst status change failed: %s\n' % result )
|
539 |
|
540 |
def interact( self ): |
541 |
"Start network and run our simple CLI."
|
542 |
self.start()
|
543 |
result = CLI( self )
|
544 |
self.stop()
|
545 |
return result
|
546 |
|
547 |
|
548 |
# pylint thinks inited is unused
|
549 |
# pylint: disable-msg=W0612
|
550 |
|
551 |
def init(): |
552 |
"Initialize Mininet."
|
553 |
if init.inited:
|
554 |
return
|
555 |
if os.getuid() != 0: |
556 |
# Note: this script must be run as root
|
557 |
# Perhaps we should do so automatically!
|
558 |
print "*** Mininet must run as root." |
559 |
exit( 1 ) |
560 |
# If which produces no output, then mnexec is not in the path.
|
561 |
# May want to loosen this to handle mnexec in the current dir.
|
562 |
if not quietRun( 'which mnexec' ): |
563 |
raise Exception( "Could not find mnexec - check $PATH" ) |
564 |
fixLimits() |
565 |
init.inited = True
|
566 |
|
567 |
init.inited = False
|
568 |
|
569 |
# pylint: enable-msg=W0612
|