Revision 63e61e90

View differences:

conf/dummyrouting.ini
1
[dummyRoutingTest]
2

  
3
testModule = dummyrouting
4
testClass = dummyRoutingRandomTest
5
duration = 20
6
#graphDefinition = data/fullmesh.edges
7
graphDefinition = data/toy.edges
8

  
9
[dummyRoutingSmallTest]
10

  
11
testModule = dummyrouting
12
testClass = dummyRoutingRandomTest
13
duration = 20
14
#graphDefinition = data/fullmesh.edges
15
graphDefinition = data/small.edges
16

  
17
[dummyRoutingLargeTest]
18

  
19
testModule = dummyrouting
20
testClass = dummyRoutingRandomTest
21
duration = 60
22
#graphDefinition = data/fullmesh.edges
23
graphDefinition = data/large.edges
24

  
25
[dummyRoutingTwoNodes]
26

  
27
testModule = dummyrouting
28
testClass = dummyRoutingRandomTest
29
duration = 15 
30
#graphDefinition = data/fullmesh.edges
31
graphDefinition = data/twonodes.edges
32
startLog = 5s
33
stopLog = 10s
34
logInterval = 0.1s
35
verbose = ""
36

  
37
[dummyRoutingDebugTest]
38

  
39
testModule = dummyrouting
40
testClass = dummyRoutingRandomTest
41
duration = 10s
42
#graphDefinition = data/fullmesh.edges
43
graphDefinition = data/twonodes.edges
44
startLog = 5s
45
stopLog = 7s
46
logInterval = 0.1s
47
verbose = ""
48
stopNode = 6s
49

  
50
[dummyRoutingCrashTest]
51

  
52
testModule = dummyrouting
53
testClass = dummyRoutingRandomTest
54
duration =40 
55
graphDefinition = data/toy.edges
56
stopNode = 15s
57
startLog = 14s
58
stopLog = 35s
59
logInterval = 0.1s
60
verbose = ""
61
centralityTuning = ""
62

  
63
[dummyRoutingCircleTest]
64

  
65
testModule = dummyrouting
66
testClass = dummyRoutingRandomTest
67
duration = 30
68
graphDefinition = data/circle.edges
69
verbose = ""
70
centralityTuning = ""
71

  
72
[dummyRoutingLineTest]
73

  
74
testModule = dummyrouting
75
testClass = dummyRoutingRandomTest
76
duration = 30
77
graphDefinition = data/line.edges
78
verbose = ""
79
centralityTuning = ""
80

  
81
[dummyRoutingPLTest]
82

  
83
testModule = dummyrouting
84
testClass = dummyRoutingRandomTest
85
duration = 41
86
graphDefinition = data/random/PL/_30_PL_0.edges
87
verbose = ""
88
stopNode = 25s
89
nodeCrashed = 5
90
startLog = 5s
91
stopLog = 40s
92
logInterval = 0.1s
93
#centralityTuning = ""
94

  
95
[dummyRoutingSerialTest]
96

  
97
testModule = dummyrouting
98
testClass = dummyRoutingRandomTest
99
duration = 35
100
graphDefinition = data/toy.edges
101
verbose = ""
102
stopNode = 21s
103
#stopCentralNode = 3
104
startLog = 20s
105
stopLog = 35s
106
logInterval = 0.1s
107
stopAllNodes = ""
108
#centralityTuning = ""
109

  
110

  
111
[dummyRoutingSerialTestOpt]
112

  
113
testModule = dummyrouting
114
testClass = dummyRoutingRandomTest
115
duration = 35
116
graphDefinition = data/toy.edges
117
verbose = ""
118
stopNode = 21s
119
#stopCentralNode = 3
120
startLog = 20s
121
stopLog = 35s
122
logInterval = 0.1s
123
stopAllNodes = ""
124
centralityTuning = ""
125

  
126
[dummyRoutingSerialPLTestTiny]
127

  
128
testModule = dummyrouting
129
testClass = dummyRoutingRandomTest
130
duration = 20
131
verbose = ""
132
stopNode = 5s
133
#stopCentralNode = 3
134
startLog = 4s
135
stopLog = 19s
136
logInterval = 0.1s
137
stopAllNodes = ""
138
#centralityTuning = ""
139

  
140
[dummyRoutingSerialPLTest]
141

  
142
testModule = dummyrouting
143
testClass = dummyRoutingRandomTest
144
duration = 60
145
graphDefinition = data/random/PL/_30_PL_0.edges
146
stopNode = 21s
147
#stopCentralNode = 3
148
startLog = 20s
149
stopLog = 55s
150
logInterval = 0.1s
151
stopAllNodes = 15
152

  
153
[dummyRoutingSerialPLTestOpt:dummyRoutingSerialPLTest]
154
centralityTuning = ""
155

  
156
[dummyRoutingSerialPLTestAll:dummyRoutingSerialPLTest]
157
stopAllNodes = ""
158
[testStanza:dummyRoutingSerialPLTest]
159
stopAllNodes = 2
160
[testStanzaOpt:dummyRoutingSerialPLTest]
161
stopAllNodes = 2
162
centralityTuning = ""
conf/olsr.ini
2 2

  
3 3
testModule = olsr
4 4
testClass = OLSRTest
5
times = 1
6
olsrPath = "../olsrd/olsrd"
7
duration = 120
5 8

  
6 9
[OLSRToy:OLSRTest]
7 10

  
8 11
NumPing = 3
9
duration = 30
10
graphDefinition = data/toy.edges
11
HelloInterval = 1
12
TcInterval = 2
13
startLog = 10s
14
stopLog = 29s
15
stopAllNodes = 1
16
stopNode = 20s
17

  
18
[OLSRCircle:OLSRTest]
19

  
20
NumPing = 3
21
duration = 40
22
graphDefinition = data/circle.edges
23
HelloInterval = 1
24
TcInterval = 2
25
startLog = 20s
26
stopLog = 39s
27
stopAllNodes = 1
28
stopNode = 21s
29

  
30
[OLSRTestSmall:OLSRTest]
31

  
32
graphDefinition = ../../data_sets/random_graphs/CN/30/_30_CN_1.edges
33
duration = 60
34
HelloInterval = 2
35
TcInterval = 5
36
stopNode = 40s
37
startLog = 38s
38
stopLog = 58s
39
NumPing = 3
40
stopAllNodes = 1
41

  
42
[OLSRTestLarge:OLSRTest]
43

  
44
graphDefinition = ../../data_sets/random_graphs/CN/100/_100_CN_0.edges
45
duration = 200
46
HelloInterval = 2
47
TcInterval = 5
48
stopNode = 175s
49
startLog = 120s
50
stopLog = 198s
51
stopAllNodes = 1
52

  
53
[OLSRTestVeryLarge:OLSRTest]
54

  
55
# DON'T RUN THIS ON THE LAPTOP!
56
graphDefinition = ../../data_sets/random_graphs/CN/150/_150_CN_0.edges
57
duration = 260
58
#graphDefinition = data/fullmesh.edges
59
HelloInterval = 2
60
TcInterval = 5
61
stopNode = 220s
62
startLog = 215s
63
stopLog = 240s
64

  
65

  
66

  
67
[OLSRSerialTest]
68

  
69
testModule = olsr
70
testClass = OLSRTest
71
duration = 60
72
graphDefinition = ../../data_sets/random_graphs/CN/30/_30_CN_0.edges
73
stopNode = 21s
74
#stopCentralNode = 3
75
startLog = 20s
76
stopLog = 55s
77
logInterval = 0.1s
78
stopAllNodes = 10
79

  
80
[OLSRSerialTestOpt:OLSRSerialTest]
81
centralityTuning = ""
82

  
83

  
84
## TO be removed, just testing code
85
[OLSRSignal:OLSRTest]
86

  
87
#NumPing = 3
88
duration = 50
89
graphDefinition = data/triangle.edges
90
HelloInterval = 1
91
TcInterval = 2
92
startLog = 30s
93
stopLog = 49s
94
stopAllNodes = [1]
95
stopNode = 31s
96

  
97
[OLSRSignalCrash:OLSRTest]
98

  
99
duration = 60
100
graphDefinition = data/triangle.edges
101
HelloInterval = 1
102
TcInterval = 2
103
startLog = 40s
104
stopLog = 49s
105
stopList = 0
106
stopTime = 41
107

  
108
[OLSRCPU:OLSRTest]
109

  
110
graphDefinition = data/small.edges
111 12
duration = 120
13
graphDefinition = data/toy.edges
112 14
HelloInterval = 1
113 15
TcInterval = 2
114
startLog = 100s
115
stopLog = 119s
116
stopAllNodes = 1
117
stopTime = 101
118
popRouting = True
119

  
120
[OLSRCrash:OLSRTest]
121

  
122
duration = 60
123
graphDefinition = data/small.edges
124
HelloInterval = 1
125
TcInterval = 3
126
startLog = 40s
127
stopLog = 49s
128
stopAllNodes = 10
129
stopTime = 41
130
popRouting = True
131

  
132

  
133
[OLSRScript:OLSRTest]
134

  
135
duration = 90
136
graphDefinition = data/large.edges
137
HelloInterval = 1
138
TcInterval = 3
139
startLog = 80s
140
stopLog = 89s
141
stopAllNodes = 5
142
stopTime = 81
143
popRouting = True
144

  
145
[OLSRScriptPop:OLSRTest]
146

  
147
duration = 90
148
graphDefinition = data/large.edges
149
HelloInterval = 1
150
TcInterval = 3
151
startLog = 80s
152
stopLog = 89s
153
stopAllNodes = 20
154
stopTime = 81
155
popRouting = True
156

  
157

  
158
[OLSRFFWienNoPop:OLSRTest]
159

  
160
duration = 90
161
graphDefinition = data/FFWien0.edges
162
HelloInterval = 2
163
TcInterval = 5
164
startLog = 70s
165
stopLog = 89s
166
stopAllNodes = 100
167
stopTime = 71
168
popRouting = False
169

  
170
[OLSRFFWienPop:OLSRTest]
171

  
172
duration = 90
173
graphDefinition = data/FFWien0.edges
174
HelloInterval = 2
175
TcInterval = 5
176
startLog = 70s
177
stopLog = 89s
178
stopAllNodes = 100
179
stopTime = 71
180
popRouting = True
181

  
182
[OLSRFFGrazNoPop:OLSRTest]
183

  
184
duration = 90
185
graphDefinition = data/FFGraz0.edges
186
HelloInterval = 2
187
TcInterval = 5
188
startLog = 70s
189
stopLog = 89s
190
stopAllNodes = 100
191
stopTime = 71
192
popRouting = False
193

  
194
[OLSRFFGrazPop:OLSRTest]
195

  
196
duration = 90
197
graphDefinition = data/FFGraz0.edges
198
HelloInterval = 2
199
TcInterval = 5
200
startLog = 70s
201
stopLog = 89s
202
stopAllNodes = 100
203
stopTime = 71
204
popRouting = True
205

  
206

  
207
[OLSRNinuxPop:OLSRTest]
208

  
209
duration = 90
210
graphDefinition = data/ninux0.edges
211
HelloInterval = 2
212
TcInterval = 5
213
startLog = 70s
214
stopLog = 89s
215
stopAllNodes = 100
216
stopTime = 71
217
popRouting = True
218

  
219

  
220
[OLSRNinuxNoPop:OLSRTest]
221

  
222
duration = 90
223
graphDefinition = data/ninux0.edges
224
HelloInterval = 2
225
TcInterval = 3
226
startLog = 70s
227
stopLog = 89s
228
stopAllNodes = 100
229
stopTime = 71
230
popRouting = False
231

  
232
[OLSREraseme:OLSRTest]
233

  
234
duration = 120
235
graphDefinition = data/ninux0.edges
236
HelloInterval = 2
237
TcInterval = 5
238
startLog = 100s
239
stopLog = 119s
240
stopAllNodes = 1
241
stopTime = 101
242
popRouting = True
243

  
244 16

  
conf/ping.ini
1
[pingTest]
2

  
3
testModule = ping
4
testClass = pingRandomTest
5
duration = 10
6
graphDefinition = data/toy.edges
7

  
8
[pingTestNinuxSmall]
9

  
10
testModule = ping
11
testClass = pingFullRandomTest
12
duration = 20
13
graphDefinition = data/small.edges
14
randomPing = True
15

  
16

  
scripts/compare_results_files.py
1
#!/usr/bin/env python
2
import json
3
import time
4
import argparse
5
import pprint
6
import matplotlib.pyplot as plt
7
from collections import defaultdict
8
import numpy as np
9

  
10
class ResultsComparer():
11

  
12
    def print_error(self, error):
13
        print "ERROR:", error
14

  
15
    def __init__(self, file_name):
16
        try:
17
            f = open(file_name, 'r')
18
            self.json = json.load(f)
19
        except IOError:
20
            self.print_error("could not open file"+file_name)
21
            exit()
22
        except ValueError:
23
            self.print_error(file_name + "contains no valid JSON")
24
            exit()
25
        self.data = defaultdict(dict)
26

  
27
    def parse_results(self):
28
        """ FIXME describe the format of the JSON files here """
29
        for topo_file in self.json:
30
            if topo_file in ['command', 'time']:
31
                continue
32
            # we want just numerical run ids, not other fields
33
            run_ids = sorted([x for x in self.json[topo_file].keys() \
34
                    if x.isdigit()])
35
            for runId in run_ids:
36
                g_type = self.json[topo_file][runId]["topology_type"]
37
                size = self.json[topo_file][runId]["network_size"]
38
                if not self.json[topo_file][runId]["failed_nodes"]:
39
                    continue
40
                failed_node = \
41
                    self.json[topo_file][runId]["failed_nodes"].keys()[0]
42
                failures = \
43
                    self.json[topo_file][runId]["failures"]
44
                results = self.json[topo_file][runId]["results"]
45
                s = sorted(results.keys(), key = lambda x: float(x))
46

  
47
                self.optimized = self.json[topo_file][runId]["optimized"]
48

  
49
                if sum(results[s[-1]][1:]):
50
                    print "Graph:", topo_file, "run id:", runId, \
51
                            "optimized:",self.optimized, \
52
                            "has unrepaired routes, skipping."
53
                    continue
54

  
55
                if not s:
56
                    continue
57
                min_time = float(s[0])
58
                max_time = float(s[-1])
59

  
60
                frequency = (max_time - min_time)/len(results.keys())
61
                if size not in self.data[g_type]:
62
                    self.data[g_type][size] = {'x':defaultdict(list), \
63
                            'y':defaultdict(list), 'topo_file':defaultdict(list)}
64
                self.data[g_type][size]['x'][runId].append(failed_node)
65
                self.data[g_type][size]['y'][runId].append(failures*frequency)
66
                self.data[g_type][size]['topo_file'][runId].append(topo_file)
67
                self.simulation_time = self.json["time"]
68

  
69
    def print_raw_data(self):
70
        for g_type in self.data:
71
            for size in self.data[g_type]:
72
                print "Type:", g_type, "Size: ", size
73
                for runId in self.data[g_type][size]['y']['topo_file']:
74
                    print self.data[g_type][size]['x']['topo_file'][runId]
75
                    print self.data[g_type][size]['y']['topo_file'][runId]
76
        
77
    def average_data(self):
78
        self.average_data = defaultdict(dict) 
79
        for g_type in self.data:
80
            for size in self.data[g_type]:
81
                crashes = [x for x in self.data[g_type][size]['y'].keys() \
82
                        if x.isdigit()]
83
                graphs = \
84
                    min([len(self.data[g_type][size]['y'][c]) for c in crashes])
85
                avg_y = []
86
                for i in sorted([int(x) for x in crashes]):
87
                    avg_v = 0
88
                    for run in range(graphs):
89
                        avg_v += self.data[g_type][size]['y'][str(i)][run]
90
                    avg_y.append(1.0*avg_v/graphs)
91
                self.average_data[size][g_type] = {'y':avg_y, 'runs':graphs}
92

  
93

  
94

  
95
    def format_data_for_plot(self, desc=""):
96
        self.formatted_data = {}
97
        for size in self.average_data:
98
            data = {}
99
            data["plot"] = {}
100
            data["desc"] = desc
101
            title = "nodes: " + str(size) 
102
            runs = self.average_data[size].items()[0][1]['runs']
103
            title += ", runs:" + str(runs)
104
            data["title"] = title
105
            data["time"] = time.ctime(int(self.simulation_time))
106
            data["optimized"] = self.optimized
107
            for g_type in self.average_data[size]:
108
                data["plot"][g_type] = self.average_data[size][g_type]['y']
109
            self.formatted_data[size] = data
110

  
111

  
112
class DataMerger():
113

  
114
    def __init__(self):
115
        self.data = {}
116

  
117
    def merge_data(self, r_c):
118
        results_comparer = r_c.data
119
        for g_type in results_comparer:
120
            for size in results_comparer[g_type]:
121
                if size not in self.data:
122
                    self.data[size] = {}
123
                if g_type not in self.data[size]:
124
                    self.data[size][g_type] = {}
125
                opt = r_c.optimized
126
                if opt not in self.data[size][g_type]:
127
                    self.data[size][g_type][opt] = {}
128
                for run_id, run_id_vec in results_comparer[g_type][size]['y'].items():
129
                    # run_id follows the order in the sequence of failures
130
                    # so it mirrors the order by centrality of failed nodes
131
                    if run_id not in self.data[size][g_type][opt]:
132
                        self.data[size][g_type][opt][run_id] = {}
133
                        for idx, failed_routes in enumerate(run_id_vec):
134
                            graph = \
135
                                results_comparer[g_type][size]['topo_file'][run_id][idx]
136
                            if graph not in self.data[size][g_type][opt][run_id]:
137
                                self.data[size][g_type][opt][run_id][graph] = \
138
                                    failed_routes
139

  
140
    def compare_data(self, rem_extremes=False):
141
        data_c = defaultdict(dict)
142
        for size in self.data:
143
            for g_type in self.data[size]:
144
                data_c[size][g_type] = {'y':{}}
145
                if len(self.data[size][g_type]) == 2:
146
                    y = []
147
                    for run_id in sorted(self.data[size][g_type][True], key = lambda(x) : int(x)):
148
                        for graph in self.data[size][g_type][False][run_id]:
149
                            if graph in self.data[size][g_type][True][run_id]:
150
                                if self.data[size][g_type][True][run_id][graph]:
151
                                    try:
152
                                        y.append(self.data[size][g_type][True][run_id][graph] \
153
                                                / self.data[size][g_type][False][run_id][graph])
154
                                    except ZeroDivisionError:
155
                                        pass
156
                        data_c[size][g_type]['y'][int(run_id)] = \
157
                                np.average(y)
158
        return data_c
159

  
160

  
161

  
162
    def plot_data(self, data):
163
        # TODO add info to the graph
164
        for size in data:
165
            f = plt.figure()
166
            plt.title("Size:"+size)
167
            for g_type in data[size]:
168
                plt.plot(data[size][g_type]['y'].keys(), 
169
                        data[size][g_type]['y'].values(), label=g_type)
170
            plt.legend()
171
            plt.show()
172

  
173
    def print_data(self):
174
        pp = pprint.PrettyPrinter()
175
        pp.pprint(self.data)
176

  
177

  
178

  
179
def parse_args():
180
     parser = argparse.ArgumentParser(description = \
181
             "parser of results file generated by run_batch_simulations.py")
182
     parser.add_argument('-d', dest='description', help="a description to "+\
183
             "be added to the results", default="", type=str)
184
     parser.add_argument('-f', dest='res_file',
185
             help="the file with the results", action="append",
186
             required=True, type=str)
187
     parser.add_argument('-s', dest='show_graph',
188
             help="show the plot", action="store_true",
189
             required=False, default=False)
190
     #parser.add_argument('-o', dest='res_file_opt',
191
     #        help="the file with the results for the optimized run",
192
     #        required=False, type=str)
193
     args = parser.parse_args()
194
     return args
195

  
196
    
197

  
198
args = parse_args()
199
m = DataMerger()
200
for res_file in args.res_file:
201
    r = ResultsComparer(res_file)
202
    r.parse_results()
203
    r.average_data()
204
    r.format_data_for_plot()
205
    m.merge_data(r)
206
c = m.compare_data()
207
if args.show_graph:
208
    m.plot_data(c)
209
m.print_data()
210
#if args.res_file_opt:
211
#    ro = ResultsComparer(args.res_file_opt)
212
#    ro.parse_results()
213
#    ro.average_data()
214
             #    ro.format_data_for_plot()
215
#    m = generate_relative_dataset(r.formatted_data, ro.formatted_data)
216
#    print_data(m)
217

  
218
#
219
#def generate_relative_dataset(non_opt, opt):
220
#
221
#    # let's check the size of the result set
222
#    if opt.keys() == non_opt.keys():
223
#        for size in opt:
224
#            if opt[size]['plot'].keys() == non_opt[size]['plot'].keys():
225
#                for g_type in opt[size]['plot']:
226
#                    if len(opt[size]['plot'][g_type]) == \
227
#                            len(non_opt[size]['plot'][g_type]):
228
#                        continue
229
#                    else:
230
#                        print opt[size]['plot'][g_type], "is not as long as", \
231
#                                non_opt[size]['plot'][g_type]
232
#                    return {}
233
#            else:
234
#                print opt[size]['plot'], " differs from ", non_opt[size]['plot']
235
#                return {}
236
#    else:
237
#        print opt.keys(), "differs from", non_opt.keys()
238
#        return {}
239
#
240
#    merged_data = opt.copy()
241
#    for size in opt:
242
#        for g_type in opt[size]['plot']:
243
#            merged_data[size]['plot'][g_type] = np.array(
244
#                    opt[size]['plot'][g_type])/np.array(
245
#                    non_opt[size]['plot'][g_type])
246
#            merged_data[size]['title'] += "size:" + str(size)
247
#    return  merged_data
248
#
249
#def print_data(merged_data):
250
#    for size in merged_data:
251
#        f = plt.figure()
252
#        f.title = merged_data[size]['title']
253
#        for g_type in merged_data[size]['plot']:
254
#            y = merged_data[size]['plot'][g_type]
255
#            plt.plot(range(len(y)), y, 'o', label=g_type)
256
#        plt.xlabel("Crashed node, orderd by betweenness")
257
#        plt.ylabel("Failed routes")
258
#        plt.ylim([0,1])
259
#        plt.legend()
260
#        plt.show()
scripts/compare_topologies_recursively.py
1
#!/usr/bin/env python
2

  
3
import sys
4
sys.path.append('../community_networks_analysis')
5

  
6
from misclibs import navigateRoutingTables
7
from gengraphs import loadGraph
8
import glob
9
import json
10
import networkx as nx
11

  
12
if len(sys.argv) < 3:
13
    print "This script compares the output of the emulator with a real topology, and "
14
    print "checks that the routing tables computed by mininet are the same computed"
15
    print "with networkX using the shortest path method. In case it finds failed nodes"
16
    print "it checks the routing tables coherently."
17
    print "usage: ./compare_topologies_recursively.py ",\
18
            "path_prefix full_topology"
19
    print "path_prefix is the prefix of the routing table files generated by dummyrouting"
20
    print "full_topology is the edge file decstibing the original network"
21
    sys.exit(1)
22

  
23
failedNodes = []
24

  
25
pathPrefix = sys.argv[1]
26
topoFile = sys.argv[2]
27
g = loadGraph(topoFile, remap=True)
28
print g.nodes()
29

  
30
jsonRt = {}
31
nodeList = set()
32

  
33

  
34
# read topology files, populate the global routing table structure
35
for topoFile in glob.glob(pathPrefix+"*.rt"):
36
    try:
37
        f = open(topoFile, "r")
38
        j = json.load(f)
39
    except Exception as e:
40
        print "NOK", str(e)
41
        sys.exit(1)
42
    nodeIP = ".".join(j["node"].split(":")[0].split(".")[:3])
43
    if j["failed"] == True:
44
        failedNodes.append(nodeIP)
45
        continue
46
    rt = j["rt"]
47
    jsonRt[nodeIP] = {}
48
    for dest, nh in rt.items():
49
        shortDest = ".".join(dest.split(".")[:3])
50
        shortNh = ".".join(nh[0].split(".")[:3])
51
        jsonRt[nodeIP][shortDest] = [shortNh] + nh[1:]
52

  
53
    nodeList.add(str(nodeIP))
54

  
55
shortedFailedNodes = [int(n.split(".")[-1]) for n in failedNodes]
56

  
57
print "failed nodes", failedNodes, shortedFailedNodes
58

  
59
nl = list(nodeList)
60
if len(nl) == 0:
61
    print "NOK: can not read routing tables"
62
    sys.exit(1)
63

  
64
failedNodesErrors = 0
65

  
66
# check that noone has a route to a failed node:
67
if failedNodes != []:
68
    for node in jsonRt:
69
        if set(jsonRt[node]).intersection(failedNodes):
70
            failedNodesErrors += 1
71
            print "node", node, "has a route to a failed node:", failedNodes
72

  
73
print "failed nodes errors:", failedNodesErrors
74

  
75
#for node, rt  in jsonRt.items():
76
#    if len(rt) != len(nl) - 1:
77
#        print "node ", node, "misses some routes"
78
#        print json.dumps(jsonRt, indent=1)
79
#        sys.exit(1)
80

  
81
#print json.dumps(jsonRt, indent=1)
82

  
83
errors = 0
84

  
85
for fnode in failedNodes:
86
    f = int(fnode.split(".")[2])
87
    g.remove_node(f)
88

  
89
# navigate the global routing table and compare the routes with 
90
# the ones computed on the original graph
91
for i in range(len(nl)):
92
    for j in range(i+1, len(nl)):
93
        sIP = nl[i]
94
        dIP = nl[j]
95
        s = int(nl[i].split(".")[2])
96
        d = int(nl[j].split(".")[2])
97
        print "== rt ", s, d
98
        try:
99
            route = navigateRoutingTables(jsonRt, sIP,
100
                dIP, [], 0)
101
        except KeyError as e:
102
            errors += 1
103
            print str(e)
104
            print sIP, jsonRt[sIP]
105
            print dIP, jsonRt[dIP]
106
            print "NOK!: there is no route from ", s, "to", d
107
            continue
108
        except Exception as e:
109
            print "Error in navigating the RT"
110
            print str(e)
111
            continue
112
        allRoutes = [p for p in nx.all_shortest_paths(g, s, d)]
113

  
114
        shortedRoute =  [int(r.split(".")[2]) for r in route[0]]
115

  
116
        if shortedRoute not in allRoutes:
117
            for computedRoute in allRoutes:
118
                # if the route computed on the full graph includes a failed node
119
                # we don't raise an error, else we do
120
                if set(computedRoute).intersection(shortedFailedNodes) != set():
121
                    continue
122
                else:
123
                    print "NOK!: route", shortedRoute, "not in ", allRoutes
124
                    errors += 1
125
                    break
126
        print "OK!: route", shortedRoute, "found in", allRoutes
127

  
128
print "Found ", errors, "errors"
129

  
130

  
131

  
132

  
133

  
scripts/k_core.py
1
#!/usr/bin/env python
2
import networkx as nx
3
import sys
4
from matplotlib import pyplot as plt
5

  
6

  
7
g = nx.read_weighted_edgelist(sys.argv[1])
8

  
9
g.remove_edges_from(g.selfloop_edges())
10
c = nx.k_core(g, 2)
11

  
12
a = [n for n in nx.articulation_points(c)]
13

  
14
fail_candidates = [ n for n in c.nodes() if n not in a]
15

  
16
for n in fail_candidates:
17
    gg = g.copy()
18
    gg.remove_node(n)
19
    comp = nx.connected_components(gg)
20
    print n, comp
21
    isolated_nodes = [x for component in comp[1:] for x in component]
22
    print "XX", isolated_nodes
23

  
24

  
25
nx.draw(g)
26
plt.show()
27
nx.draw(c)
28
plt.show()
29

  
30

  
scripts/killall.sh
1
#!/bin/bash
2

  
3
for i in `ps aux | grep dummy | grep python | awk '{print $2}'`; 
4
 do sudo kill $i; 
5
done
6
for i in `ps aux | grep dummy | grep python | awk '{print $2}'`; 
7
 do sudo kill -9 $i; 
8
done
scripts/measure_breakage_time.py
1
#!/usr/bin/env python
2

  
3
import sys
4
sys.path.append('../community_networks_analysis')
5

  
6
from misclibs import navigateRoutingTables, LoopError
7
from collections import defaultdict, Counter
8
import glob
9
import simplejson as json
10
import copy
11

  
12
class resultParser():
13

  
14
    def readTopology(self, pathPrefix, matchPath=""):
15
        """ load all the .json files with the logged routing tables, 
16
        return the global time-varying routing table """
17
        jsonRt = {}
18
        nodeSet = set()
19
        failedNodes = {}
20
        signallingSent = 0
21
        timeBasedRoute = {}
22
        logFrequency = 0
23
        helloTimers = []
24
        tcTimers = []
25
        # FIXME add read from zip files here
26
        files = glob.glob(pathPrefix + "*" + matchPath + ".json")
27
        print pathPrefix + "*" + matchPath + ".json"
28
        for topoFile in files:
29
            try:
30
                f = open(topoFile, "r")
31
                j = json.load(f)
32
                f.close()
33
            except Exception as e:
34
                print "NOK", str(e)
35
                print topoFile
36
                try:
37
                    f.close()
38
                except:
39
                    pass
40
                return {},0,0,0,0,0
41
            #nodeIP = ".".join(j["node"].split(":")[0].split(".")[:3])
42
            nodeIP = j["node"].split(":")[0]
43
            rt = j["log"]
44
            # interval between two samples in seconds
45
            logFrequency = float(j["logFrequency"])/1000
46
            helloTimers.append(float(j["hello_timer"]))
47
            tcTimers.append(float(j["tc_timer"]))
48
            # number of loss in a second
49
            runId = j["failureId"]
50
            if runId not in timeBasedRoute:
51
                timeBasedRoute[runId] = defaultdict(dict)
52
            if runId not in failedNodes:
53
                failedNodes[runId] = {}
54
            if j["fail"] == True:
55
                failedNodes[runId][nodeIP] = float(j["failtime"])
56
            if runId not in jsonRt:
57
                jsonRt[runId] = defaultdict(dict)
58
            try:
59
                for logId, logDump in rt.items():
60
                    jsonRt[runId][logId][nodeIP] = logDump["RT"]
61
                    if "time" not in jsonRt[runId][logId] or \
62
                            jsonRt[runId][logId]["time"] > float(logDump["time"]):
63
                        jsonRt[runId][logId]["time"] = float(logDump["time"])
64
                    timeBasedRoute[runId][logId][nodeIP] = [float(logDump["time"]), logId]
65
            except KeyError:
66
                print "ERROR: topo file", topoFile, "on run_id", runId,\
67
                      "contins wrong keys"
68
                del jsonRt[runId]
69
            nodeSet.add(str(nodeIP))
70
        # alignedJsonRt = self.reorderLogs(timeBasedRoute, jsonRt, failedNodes, nodeSet)
71
        return jsonRt, nodeSet, failedNodes, signallingSent, \
72
                logFrequency
73

  
74
    def reorderLogs(self, timeBasedRoute, jsonRt, failedNodes, nodeSet):
75

  
76
        logWindow = {} 
77
        orderedLogSequence = []
78
        alignedJsonRt = {}
79

  
80

  
81
        for runId in timeBasedRoute:
82
            # just a big time
83
            earliestFailure = (2050-1970)*365*24*60*60
84
            for node, failTime in failedNodes[runId].items():
85
                if failTime < earliestFailure:
86
                    earliestFailure = failTime
87
            # for each time,  a list of [IP, logId] that logged at that time
88
            logSequence = defaultdict(list)
89
            for logId in timeBasedRoute[runId]:
90
                for nodeIP in timeBasedRoute[runId][logId]:
91
                    try:
92
                        nodeLogId = timeBasedRoute[runId][logId][nodeIP][1]
93
                        nodeLogTime = timeBasedRoute[runId][logId][nodeIP][0]
94
                        logSequence[nodeLogTime].append([nodeIP, nodeLogId])
95
                    except KeyError:
96
                        print "WARNING, key", nodeIP, "not present in logId", \
97
                              logId
98
            orderedLogSequence = sorted(logSequence.items(),
99
                                        key=lambda x: x[0])
100
            newJsonRt = defaultdict(dict)
101
            newJsonRtCheck = defaultdict(dict)
102

  
103
            currLogId = 1
104
            includeFailed = True
105
            logWindow = dict.fromkeys(nodeSet, 0)
106
            for (t, data) in orderedLogSequence:
107
                for [ip, nodeRunId] in data:
108
                    # right end of time window arrived to the failure time
109
                    # we have to remove from current time-window all the failed
110
                    # nodes if they have no log currently assigned, as they
111
                    # will not have any more saved log
112
                    if t > earliestFailure:
113
                        if includeFailed:  # this bool is needed to repeat the
114
                                           # next routing only once
115
                            for ip in list(logWindow.keys()):
116
                                if ip in failedNodes[runId].keys() and\
117
                                   logWindow[ip] == 0:
118
                                        # we have a failed node that did
119
                                        # not save
120
                                        # log in this time window, so we will
121
                                        # remove all failed nodes from the
122
                                        # current window
123
                                        print "removing failed nodes",
124
                                        for ip in failedNodes[runId].keys():
125
                                            del logWindow[ip]
126
                                            print ip,
127
                                        print "failtime", earliestFailure
128
                                        includeFailed = False
129
                                        # we're looping on a modified structure
130
                                        break
131
                                else:
132
                                    logWindow[ip] = [t, nodeRunId]
133
                        elif ip not in failedNodes[runId]:
134
                            logWindow[ip] = [t, nodeRunId]
135
                    else:
136
                        logWindow[ip] = [t, nodeRunId]
137
                if 0 not in logWindow.values():
138
                    for ip, [tt, lid] in logWindow.items():
139
                        if ip not in jsonRt[runId][lid]:
140
                            print "WARNING: removing logId", lid
141
                            continue
142
                        newJsonRt[currLogId][ip] = jsonRt[runId][lid][ip]
143
                        if "time" not in newJsonRt[currLogId] or\
144
                            tt < newJsonRt[currLogId]["time"]:
145
                            newJsonRt[currLogId]["time"] = tt
146
                        newJsonRtCheck[currLogId][ip] = tt
147
                    currLogId += 1
148
                    # reset the logWindow
149
                    logWindow = dict.fromkeys(logWindow, 0)
150
            alignedJsonRt[runId] = copy.deepcopy(newJsonRt)
151
        return alignedJsonRt
152

  
153
    def checkRoutingTables(self, jsonRt, nodeSet, failedNodes, silent=True):
154
        errors = 0
155
        loops = 0
156
        # remove any non IP-like string from keys
157
        ipTest = lambda x: len(x.split(".")) == 4 and [int(b) for b in x.split(".")]
158
        nl = [k for k in jsonRt.keys() if ipTest(k)]
159
        routesOk = 0
160
        for i in range(len(nl)):
161
            sIP = nl[i]
162
            for j in range(len(nl)):
163
                if i == j:
164
                    continue
165
                dIP = nl[j]
166
                try:
167
                    route = navigateRoutingTables(jsonRt, sIP,
168
                        dIP, [], 0, silent, use_base_ip=True)
169
                except KeyError:
170
                    errors += 1
171
                    if not silent:
172
                        print "NOK!: there is no route from ", sIP, "to", dIP
173
                    continue
174
                except LoopError:
175
                    if not silent:
176
                        print "NOK: there is a loop from", sIP, "to", dIP
177
                    loops += 1
178
                    continue
179
                if not silent:
180
                    print "OK!: route", route
181
                routesOk += 1
182
        return [routesOk, errors, loops]
183

  
184
    def gnuplotOutput(self, results, outFile="/tmp/res.gnuplot"):
185

  
186
        failTime = 0
187
        totFailures = 0
188
        ff = open(outFile, "w")
189
        for runId in results:
190
            for time in sorted(results[runId]):
191
                if sum(results[runId][time][1:3]):
192
                    failTime = time
193
                    break
194
            print >> ff, "time,", "broken,", "loop,", "total"
195
            for time in sorted(results[runId]):
196
                [ok, broken, loop, logId] = results[runId][time]
197
                print >> ff, time - failTime,  ",",  broken,\
198
                    ",", loop, ",", broken+loop
199
                totFailures += broken + loop
200
        ff.close()
201
        print "totFailures", totFailures
202

  
203

  
204
    def parseAllRuns(self, jsonRt, nodeSet, failedNodes, silent=True):
205

  
206
        retDict = {}
207
        # first we realign the logs, that can be 
208
        # misaligned at start or beginning, since the 
209
        # daemon starts and stops at different times
210

  
211
        # we also assure that the failure is contemporary for
212
        # all failed nodes, that is, from a certain logId 
213
        # all the jsonRt do not include the rt of the
214
        # failed nodes and we reset the failure time to the last 
215
        # available log time
216
        
217
        minFailTime = min(failedNodes.values())
218
        idToPurge = []
219
        for logId, rt in sorted(jsonRt.items(),
220
                key = lambda x: int(x[0])):
221
                for node in nodeSet:
222
                    if node not in rt.keys():
223
                        # this node is not in the rt
224
                        if node not in failedNodes:
225
                            # this node should be in the rt
226
                            # something did not work in this run
227
                            idToPurge.append(logId)
228
                            break
229
        for idx in idToPurge:
230
            print "WARNING: Purged run", idx
231
            del jsonRt[idx]
232

  
233
        for logId, rt in sorted(jsonRt.items(),
234
                key = lambda x: int(x[0])):
235
            print "===========", logId, "=========="
236
            ret = self.checkRoutingTables(
237
                    jsonRt[logId], nodeSet, failedNodes, silent=silent)
238
            ret.append(logId)
239
            retDict[jsonRt[logId]["time"]] = ret
240
        return retDict
241

  
242

  
243
if __name__ == "__main__":
244

  
245
    if len(sys.argv) < 2:
246
        print "This script parses dumps of routing tables, recomputes all the shortest paths"
247
        print "and finds the number and time of breakage of the network"
248
        print "usage: ./measure_breakage_time.py ",\
249
                "path_prefix"
250
        print "path_prefix is the prefix of the routing table files generated by dummyrouting"
251
        sys.exit(1)
252

  
253

  
254
    pathPrefix = sys.argv[1]
255

  
256
    p = resultParser()
257
    jsonRt, nodeSet, failedNodes, signallingSent,\
258
        logFrequency = p.readTopology(pathPrefix)
259

  
260
    if not nodeSet:
261
        print "NOK: can not read routing tables"
262
        sys.exit(1)
263

  
264

  
265
    results = {}
266
    for runId in jsonRt:
267
        results[runId] = p.parseAllRuns(jsonRt[runId], nodeSet, 
268
                failedNodes[runId], silent=True)
269
        print results
270

  
271
    p.gnuplotOutput(results)
272

  
scripts/plot_R.py
1
#!/usr/bin/env python
2
import json
3
import sys
4

  
5
try:
6
    pop_file = sys.argv[1]
7
    nonpop_file = sys.argv[2]
8
except:
9
    print "usage: ./plot_R.py popfile.results nonpopfile.results"
10
    exit(1)
11

  
12

  
13
try:
14
    f = open(pop_file, "r")
15
    p = json.load(f)
16
    f.close()
17
    f = open(nonpop_file, "r")
18
    np = json.load(f)
19
    f.close()
20
except:
21
    print "could not load json files!"
22
    exit(1)
23

  
24
pop_failures = {}
25
nonpop_failures = {}
26

  
27
for failure_id, data in p.items():
28
    try:
29
        idx = int(failure_id)
30
        pop_failures[idx] = float(data["failures"])
31
    except ValueError:
32
        pass
33

  
34
for failure_id, data in np.items():
35
    try:
36
        idx = int(failure_id)
37
        nonpop_failures[idx] = float(data["failures"])
38
    except ValueError:
39
        pass
40

  
41
data_file_name = "/tmp/data_file.txt"
42
data_file = open(data_file_name, "w")
43
gnuplot_file = open("/tmp/gnuplot_file.txt", "w")
44

  
45
print  >> data_file, "h,", "R" 
46
sorted_values = []
47
relative_values = []
48
avg = 0.0
49
global_loss_r_num = 0
50
global_loss_r_den = 0
51
for failure_id in sorted(pop_failures.keys()):
52
    #print >> data_file, failure_id, ",",\
53
    # nonpop_failures[failure_id]-pop_failures[failure_id]
54
    try:
55
        if int(pop_failures[failure_id]) == 0 or int(nonpop_failures[failure_id]) == 0:
56
            relative_values.append(0)
57
            sorted_values.append(0)
58
        else:
59
            sorted_values.append(nonpop_failures[failure_id]-pop_failures[failure_id])
60
            relative_values.append(1 - pop_failures[failure_id]/nonpop_failures[failure_id])
61
            global_loss_r_num += pop_failures[failure_id]
62
            global_loss_r_den += nonpop_failures[failure_id]
63
            avg += nonpop_failures[failure_id]-pop_failures[failure_id]
64
            print failure_id, pop_failures[failure_id], nonpop_failures[failure_id], pop_failures[failure_id]-nonpop_failures[failure_id]
65
    except:
66
        print "XXX", failure_id
67
        pass
68

  
69
avg /= len(pop_failures)
70
print "global loss reduction",  1 - global_loss_r_num/global_loss_r_den
71

  
72
for (ext_idx, (idx, data)) in enumerate(sorted(enumerate(sorted_values), key = lambda x: x[1], reverse=True)):
73
    print >> data_file, ext_idx, ",",\
74
         data, ",", relative_values[idx]
75

  
76
print >> gnuplot_file,\
77
"""
78
set term eps enhanced
79
set output "/tmp/absolute_failures.eps
80
set xlabel "Failed Node"
81
set ylabel "L_r"
82
set datafile separator ','
83
plot "%s" using 1:2 pt 7 ps 0.5 title columnhead, 0. w l lc 0 title "", %f w l lc 0 lt 5 title ""  
84

  
85
set output "/tmp/relative_failures.eps
86
set xlabel "Failed Node"
87
set ylabel "L_r"
88
set datafile separator ','
89
plot "%s" using 1:3 w lp title columnhead, 0. w l lc 0 title ""  """ % (data_file_name, avg, data_file_name)
90

  
91
data_file.close()
92
gnuplot_file.close()
93

  
94

  
95

  
96

  
97

  
scripts/random_graph_generator.py
1
#!/usr/bin/env python
2

  
3
import networkx as nx
4
import sys
5
import argparse
6

  
7
class GraphGenerator():
8

  
9
    graph_types = {"RA":"graph_generator_RA",
10
            "PL":"graph_generator_PL",
11
            "RE":"graph_generator_RE",
12
            "SW":"graph_generator_SW"}
13

  
14
    def __init__(self):
15
        self.args = None
16
        self.graph_generator = None
17

  
18
    def parse_args(self):
19
        parser = argparse.ArgumentParser(description = "graph generator")
20
        parser.add_argument("-t", dest="type", help="type of graph (RAndom, "+\
21
                "Power Law, REgular, Small World)",
22
                choices = self.graph_types.keys(), required=True)
23
        parser.add_argument("-n", dest="num_nodes", help="number of nodes",
24
            required=True, type=int)
25
        parser.add_argument("-g", dest="num_graphs", 
26
                help="number of graphs to generate",
27
                required=True, type=int)
28
        parser.add_argument("-p", dest="prefix", required=True,
29
                help="prefix to add to output files", type=str)
30
        self.args = parser.parse_args()
31

  
32
    def graph_generator_SW(self):
33
        return nx.connected_watts_strogatz_graph(self.args.num_nodes,
34
                self.args.num_nodes/5, 0.1)
35

  
36
    def graph_generator_RA(self):
37
        return nx.fast_gnp_random_graph(self.args.num_nodes,0.1)
38

  
39
    def graph_generator_RE(self):
40
        return nx.random_regular_graph(4, self.args.num_nodes)
41

  
42
    def graph_generator_PL(self):
43
        return nx.barabasi_albert_graph(self.args.num_nodes, 2)
44

  
45
    def generate_graphs(self):
46
        generate_graph_function = getattr(self, self.graph_types[self.args.type])
47
        ret_graphs = []
48
        for i in range(self.args.num_graphs):
49
            g = generate_graph_function()
50
            #if nx.is_connected(g):
51
            #    for e in g.edges(data=True):
52
            #        e[2]["weight"] = 1
53
            #nx.write_edgelist(g, self.prefix+"-"+str(i)+".edges", 
54
            #        data=["weight"])
55
            ret_graphs.append(g)
56
        return ret_graphs
57

  
58
    def add_weight(self, g):
59
        for e in g.edges(data=True):
60
            e[2]["weight"] = 1
61

  
62
if __name__ == "__main__":
63
    gn = GraphGenerator()
64
    gn.parse_args()
65
    for i, g in enumerate(gn.generate_graphs()):
66
        graph_name = gn.args.prefix + "_" + str(gn.args.num_nodes) + \
67
            "_" + gn.args.type + "_" + str(i) + ".edges"
68
        gn.add_weight(g)
69
        nx.write_edgelist(g, graph_name, data=["weight"])
scripts/run_batch_simulations.py
1
#! /usr/bin/env python
2

  
3
import sys
4
sys.path.append("../")
5
sys.path.append("../test_code/")
6
sys.path.append('../community_networks_analysis/')
7
import matplotlib
8
matplotlib.use("Agg")
9

  
10
import os
11
import time
12
import argparse
13
import glob
14
import tarfile
15
import matplotlib.pyplot as plt
16
from subprocess import check_output, CalledProcessError, call
17
from collections import defaultdict
18
import json
19

  
20
from inherit_config_parser import InheritConfigParser
21
from measure_breakage_time import resultParser
22
from dummyrouting import OptimizeGraphChoice
23
from gengraphs import loadGraph
24

  
25
topology_override_string = "no_topology_override_see_config_file"
26

  
27
class EmulationRunner():
28

  
29
    def __init__(self):
30
        self.path_prefix = ""
31
        self.args = None
32
        self.run_dict = []
33
        self.defaultFailures = 10
34

  
35
    def parse_args(self):
36
        parser = argparse.ArgumentParser(description = "batch simulation launcher"+ \
37
            " and analyser")
38
        parser.add_argument("-r", dest="runs", help="number of runs",
39
            default=1, type=int)
40
        parser.add_argument("-f", dest="confile", help="configuration file",
41
            default="conf/dummyrouting.ini", type=str)
42
        parser.add_argument("-t", dest="stanza", required=True,
43
                help="name of the configuration to run", type=str)
44
        parser.add_argument("-p", dest="parseonly", action="store_true",
45
                help="do not run the simulation, only parse results")
46
        parser.add_argument("-c", dest="check_connectivity", action="store_true",
47
                help="select graphs in order to have a minimum number " +\
48
                        "of repetitions per run_id (see code)")
49
        parser.add_argument("-g", dest="graphfolder", action="append",
50
                help="a folder with .adj files from which to"\
51
                        +"extract topologies (multiple folders are supported)")
52
        self.args = parser.parse_args()
53

  
54
        if self.args.check_connectivity:
55
            try:
56
                f = self.extract_simulation_parameter_from_conf(
57
                            "stopAllNodes",
58
                        self.args.confile, self.args.stanza)
59
                if f.isdigit():
60
                    self.failures = int(f)
61
                else:
62
                    self.failures = self.defaultFailures
63
            except:
64
                self.failures = self.defaultFailures
65

  
66
    def extract_simulation_parameter_from_conf(self, conf, 
67
            file_name, stanza):
68
        parser = InheritConfigParser()
69
        parser.optionxform = str
70
        file_name = "../" + file_name
71
        parser.read(file_name)
72

  
73
        if stanza not in parser.sections():
74
            print file_name, stanza
75
            print "ERROR: I can't find the configuration specified! this run will fail"
76
        r = parser.get(stanza, conf)
77
        return r
78

  
79
    def run_and_parse(self, size, type, res=None, 
80
            topo_files = [topology_override_string], 
81
                run_args=[], auto_clean=False):
82
        if not self.args.parseonly and os.getuid() != 0:
83
            print "You should run this script as root"
84
            sys.exit(1)
85
        p = resultParser()
86
        self.path_prefix = "/tmp/dummyrouting-log"
87
        if res == None:
88
            res = defaultdict(dict)
89
        try:
90
            self.extract_simulation_parameter_from_conf("centralityTuning", 
91
                str(self.args.confile), str(self.args.stanza))
92
            optimized = True
93
        except:
94
            optimized = False
95

  
96
        prev_run_id = ""
97
        for idx, topo in enumerate(topo_files):
98
            if prev_run_id:
99
                self.save_environment(prev_run_id)
100
                prev_run_id = ""
101
            if run_args:
102
                overrideConf = run_args[idx]
103
            else:
104
                overrideConf = ""
105
            command = ["./wcn_simulator.py", "-f", str(self.args.confile), \
106
                    "-t", str(self.args.stanza), "-o", overrideConf]
107
            #TODO: yes this sucks a bit...
108
            if topo != topology_override_string:
109
                command +=  ["-g", os.path.abspath(topo)]
110
                prev_run_id = os.path.splitext(os.path.basename(topo))[0] 
111
            prev_run_id += str(self.args.stanza)
112

  
113
            if not self.args.parseonly and not auto_clean:
114
                self.clean_environment()
115
                auto_clean=True
116
            elif auto_clean:
117
                self.clean_environment(auto=True)
118

  
119
            self.command = command
120
            if not self.args.parseonly:
121
                self.execute_run(command)
122
            jsonRt, nodeSet, failedNodes, signallingSent, sigPerSec,\
123
                logFrequency = p.readTopology(self.path_prefix)
124
            for runId in jsonRt:
125
                total_fail_samples = 0
126
                results = p.parseAllRuns(jsonRt[runId], nodeSet, 
127
                        failedNodes[runId], silent=True)
128
                failures = 0
129
                log_time_array = sorted(results)
130
                for tt in log_time_array:
131
                    failures += sum(results[tt][1:])
132
                    total_fail_samples += 1
133
                res[topo][runId] = {}
134
                res[topo][runId]["signalling"] = signallingSent
135
                res[topo][runId]["failures"] = failures
136
                res[topo][runId]["failed_nodes"] = failedNodes[runId]
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff