-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgraphbuilder.py
More file actions
632 lines (506 loc) · 24.5 KB
/
graphbuilder.py
File metadata and controls
632 lines (506 loc) · 24.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
import torch
import os
import sys
import subprocess
import glob
import pickle
from typing import Union, List
from collections import Counter, defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from torch_geometric.loader import DataLoader
from torch_geometric.datasets import TUDataset
from torch_geometric.utils.convert import from_networkx
from function_manager import function_manager
from pprof import Pprof
def generate_graphs(data_dir='data/profile_data/', debug=False):
"""
生成图数据
Args:
data_dir: 数据目录
debug: 是否开启调试模式
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(base_dir, data_dir)
graph_dir = os.path.join(data_dir, 'nx')
os.makedirs(graph_dir, exist_ok=True)
for filename in os.listdir(data_dir):
if filename.endswith('.pb'):
file_path = os.path.join(data_dir, filename)
profile = Pprof.parse_profile(file_path)
graph = build_call_graph([profile], filename=filename)
if debug:
if len(graph) == 0:
print(f"Empty graph for {filename}")
graph_file_path = os.path.join(graph_dir, f'{filename}.gpickle')
graph.filename = file_path
pickle.dump(graph, open(graph_file_path, 'wb'))
def load_graph_from_glob_directory(data_dir_glob, label, generate=True, debug=False):
"""
从glob目录加载图数据
Args:
data_dir_glob: glob目录
label: 图的标签
generate: 是否生成图数据
debug: 是否开启调试模式
Returns:
(graph_list, label_list): 图数据和标签列表
"""
graph_dir = os.path.join(data_dir_glob, 'nx')
if generate:
for data_dir in glob.glob(data_dir_glob):
if debug:
print(f"Generating graphs for {data_dir}")
Pprof.check_data(data_dir, debug=debug)
generate_graphs(data_dir)
graph_list = []
for filename in glob.glob(os.path.join(graph_dir, '*.gpickle')):
graph = nx.read_gpickle(filename)
graph = pickle.load(open(filename, 'rb'))
graph_list.append(graph)
assert len(graph_list) > 0, f"No graph in {data_dir_glob}"
return graph_list, [label] * len(graph_list)
def build_call_graph(profiles: List, duration=5, build_inline=True, use_level=False, filename=None) -> nx.DiGraph:
"""构建调用图的通用函数
Args:
profiles: Profile对象列表
duration: 采样时长(分钟)
build_inline: 是否考虑内联函数
use_level: 是否区分不同层级的同名函数
Returns:
nx.DiGraph: 调用图, 0节点为根节点
"""
graph = nx.DiGraph()
graph.filename = filename
node_map = {}
# next_node_id = 0
min_count = sys.maxsize
total_count = 0
num_profiles = len(profiles)
# allowed_binaries = None
allowed_binaries = ["[kernel.kallsyms]"]
# 定义添加节点的函数
def add_node(func_name, count, flat_count, filename, line_number, binary, level, profile_idx):
# nonlocal next_node_id
# 根据use_level决定节点键的形式
node_key = (func_name, level) if use_level else func_name
node_id = function_manager[func_name]['id']
if node_key not in node_map:
# 添加节点属性
node_attrs = {
'x': 0,
'flat_x': 0,
'name': func_name,
'node_id': node_id,
'filename': filename,
'line_number': line_number,
'binary': binary,
'time_list': np.zeros(num_profiles, dtype=np.float64)
}
if use_level:
node_attrs['level'] = level
graph.add_node(node_id, **node_attrs)
node_map[node_key] = node_id
# next_node_id += 1
# 统计每个节点的count
graph.nodes[node_map[node_key]]['x'] += count
graph.nodes[node_map[node_key]]['flat_x'] += flat_count
graph.nodes[node_map[node_key]]['time_list'][profile_idx] += count
return node_key
# 定义添加边的函数
def add_edge(caller_id, callee_id, count, profile_idx):
if not graph.has_edge(caller_id, callee_id):
graph.add_edge(caller_id, callee_id,
edge_attr=0,
time_list=np.zeros(num_profiles, dtype=np.float64))
graph.edges[caller_id, callee_id]['edge_attr'] += count
graph.edges[caller_id, callee_id]['time_list'][profile_idx] += count
# 添加根结点
add_node(func_name='root', count=0, flat_count=0, filename='', line_number='', binary='', level=0, profile_idx=0)
# 统计每个节点在每个 profile 下的 count
for profile_idx, profile in enumerate(profiles):
# 计算每个Profile的采样时间
for sample in profile.sample:
count = sample.value[0]
min_count = min(min_count, count)
total_count += count
# 构建每个sample的调用链
call_chain = [] # 存储函数信息的列表
# 从叶子节点开始遍历,构建调用链
for location_id in sample.location_id:
location = profile.location[location_id - 1]
mapping = profile.mapping[location.mapping_id - 1]
assert location.id == location_id
assert mapping.id == location.mapping_id
binary = profile.string_table[mapping.filename]
# 添加根据binary名称过滤的能力
if allowed_binaries is not None and binary not in allowed_binaries:
# print(f"binary {binary} not in allowed_binaries: {allowed_binaries}")
continue
lines = location.line
if len(lines) == 0: # 不考虑没有line的情况
continue
# 处理内联函数
if build_inline and len(lines) > 1:
for inline_line in lines[:-1]:
inline_function = profile.function[inline_line.function_id - 1]
assert inline_function.id == inline_line.function_id
inline_func_name = profile.string_table[inline_function.name]
inline_filename = profile.string_table[inline_function.filename]
inline_line_number = inline_line.line
# 收集内联函数信息
call_chain.append((inline_func_name, count, inline_filename, inline_line_number, binary))
# 处理主函数调用
line = lines[-1]
function = profile.function[line.function_id - 1]
assert function.id == line.function_id
func_name = profile.string_table[function.name]
filename = profile.string_table[function.filename]
line_number = line.line
# 收集主函数信息
call_chain.append((func_name, count, filename, line_number, binary))
if not call_chain:
continue
# 反转调用链,使其从调用者到被调用者
call_chain.append(('root', 0, '', '', '')) # 添加根节点信息
call_chain.reverse()
# 添加节点和边
node_keys = []
for level, (func_name, count, filename, line_number, binary) in enumerate(call_chain[:-1]):
node_key = add_node(func_name=func_name, count=count, flat_count=0,
filename=filename, line_number=line_number, binary=binary, level=level, profile_idx=profile_idx)
node_keys.append(node_key)
# 添加最后一个节点, 统计flat时间
level, (func_name, count, filename, line_number, binary) = len(call_chain) - 1, call_chain[-1]
node_key = add_node(func_name=func_name, count=count, flat_count=count,
filename=filename, line_number=line_number, binary=binary, level=level, profile_idx=profile_idx)
node_keys.append(node_key)
# 添加函数间的调用边
for i in range(len(call_chain) - 1):
caller_key = node_keys[i]
callee_key = node_keys[i + 1]
caller_id = node_map[caller_key]
callee_id = node_map[callee_key]
add_edge(caller_id=caller_id, callee_id=callee_id, count=call_chain[i + 1][1], profile_idx=profile_idx)
if len(graph.nodes) < 5:
print(f"[graphbuilder] Warning: graph has less than 5 nodes: {filename}")
################## 后处理阶段, 计算调用比率 ##################
# 计算节点的统计信息
for node in graph.nodes:
node_data = graph.nodes[node]
node_count = node_data['x']
# 保存原始计数数据到count_list,与build_call_graph保持一致
node_data['count_list'] = node_data['time_list'].copy() # 使用深拷贝保存原始数据
node_data['count_list'] /= min_count
node_data['count'] = float(np.mean(node_data['count_list']))
node_data['time_list'] /= total_count / num_profiles
node_data['x'] = node_count / total_count # x为调用占比
node_flat_count = node_data['flat_x']
node_data['flat_count'] = node_flat_count // min_count
node_data['flat_x'] = node_flat_count / total_count
node_data['rate'] = node_data['x']
node_data['flat_rate'] = node_data['flat_x']
# 计算统计信息
node_data['mu'] = float(np.mean(node_data['time_list']))
node_data['count_mu'] = float(np.mean(node_data['count_list']))
node_data['sigma'] = float(np.std(node_data['time_list'])) if len(node_data['time_list']) > 1 else 0.0
node_data['count_sigma'] = float(np.std(node_data['count_list'])) if len(node_data['count_list']) > 1 else 0.0
# 计算边的统计信息
for edge in graph.edges:
edge_data = graph.edges[edge]
edge_count = edge_data['edge_attr']
# 计算边的time_list统计信息
edge_data['count_list'] = edge_data['time_list'].copy() # 使用深拷贝保存原始数据
edge_data['count_list'] /= min_count
edge_data['time_list'] /= total_count / num_profiles
edge_data['count'] = float(np.mean(edge_data['count_list']))
edge_data['edge_attr'] = edge_count / total_count
edge_data['mu'] = float(np.mean(edge_data['time_list']))
edge_data['count_mu'] = float(np.mean(edge_data['count_list']))
edge_data['sigma'] = float(np.std(edge_data['time_list'])) if len(edge_data['time_list']) > 1 else 0.0
edge_data['count_sigma'] = float(np.std(edge_data['count_list'])) if len(edge_data['count_list']) > 1 else 0.0
assert function_manager['root']['id'] == 0
graph.nodes[0]['x'] = graph.nodes[0]['rate'] = 1.0
graph.nodes[0]['flat_x'] = 0
graph.nodes[0]['flat_rate'] = 0.0
graph.time = total_count / 1e9 / num_profiles # 单位为秒
graph.profile = profiles[0] # 记录第一个Profile
return graph
def build_call_graph_with_level(profile, duration=5, build_inline=True) -> nx.DiGraph:
"""构建调用图,区分不同层级的同名函数
Args:
profile: Profile对象
duration: 采样时长(分钟)
build_inline: 是否考虑内联函数
Returns:
nx.DiGraph: 调用图, 0节点为根节点
"""
return build_call_graph(profile, duration, build_inline, use_level=True)
def load_common_graph(service: str, generate=True) -> nx.DiGraph:
"""
读取指定服务目录下的所有profile文件,构建一个common图
Args:
service: 服务名称
generate: 是否重新生成common图
Returns:
nx.DiGraph: 包含多个profile统计信息的公共图
"""
# 创建输出目录
output_dir = './data_common'
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, f'{service}_common.gpickle')
if generate:
# 构建数据目录路径
data_dir = f'./data_normal/5m/{service}'
if not os.path.exists(data_dir):
raise FileNotFoundError(f"目录不存在: {data_dir}")
Pprof.check_data(data_dir, debug=False)
# 收集所有profile文件
profile_files = glob.glob(os.path.join(data_dir, f'*.pb'))
if not profile_files:
raise FileNotFoundError(f"在 {data_dir} 中未找到profile文件")
print(f"找到 {len(profile_files)} 个profile文件")
# 读取所有profile
profiles = []
for file_path in profile_files:
try:
profile = Pprof.parse_profile(file_path)
profiles.append(profile)
except Exception as e:
print(f"警告: 无法解析文件 {file_path}: {str(e)}")
continue
if not profiles:
raise ValueError("没有成功解析任何profile文件")
print(f"成功解析 {len(profiles)} 个profile")
# 构建common图
common_graph = build_call_graph(profiles)
# 保存common图
pickle.dump(common_graph, open(output_path, 'wb'))
print(f"Common图已保存到: {output_path}")
else:
common_graph = pickle.load(open(output_path, 'rb'))
return common_graph
def piecewise_linear_encoding(value, num_bins=768, min_val=0, max_val=1):
"""
将数值型特征进行分段线性编码
Args:
value: 需要编码的数值
num_bins: 分段数量
min_val: 最小值
max_val: 最大值
Returns:
torch.Tensor: 编码后的向量,形状为[num_bins]
"""
# 确保值在范围内
value = max(min(value, max_val), min_val)
# 计算每个bin的宽度
bin_width = (max_val - min_val) / num_bins
# 计算value所在的bin索引
bin_index = int((value - min_val) / bin_width)
# 处理边界情况
if bin_index == num_bins:
bin_index = num_bins - 1
# 创建编码向量
encoding = torch.zeros(num_bins)
encoding[:bin_index] = 1 # 设置x左边的桶值都为1
# 计算x所在桶的左右边界值
left_value = min_val + bin_index * bin_width
# 设置x所在的桶值为(x-left)/(right-left)
encoding[bin_index] = (value - left_value) / bin_width
return encoding
def graph_to_data(graph: nx.DiGraph, label: int, embed=True, num_bins=768):
"""
将nx.DiGraph转换为Torch Geometric的Data对象
Args:
graph (nx.DiGraph): 输入的有向图
label (int): 图的标签
embed (bool): 是否使用嵌入特征, 768维函数嵌入+num_bins维rate嵌入
num_bins (int): 分段数量
Returns:
data (torch_geometric.data.Data): 转换后的数据对象
"""
# nx.DiGraph 转换为 Torch Geometric 的 Data 对象,并且保留节点和边的信息
data = from_networkx(graph)
# data.x应为列向量
if len(data.x.shape) == 1:
data.x = data.x.view(-1, 1)
# 若存在edge_attr,则转换为列向量
if 'edge_attr' in data and len(data.edge_attr.shape) == 1:
data.edge_attr = data.edge_attr.view(-1, 1)
# 获取节点数量
num_nodes = data.x.size(0) if hasattr(data, 'x') else len(graph.nodes)
if embed:
# 创建分段线性编码特征
rate_encodings = []
for i, node_idx in enumerate(graph.nodes()):
node_data = graph.nodes[node_idx]
# 对rate进行分段线性编码
rate_encoding = piecewise_linear_encoding(node_data['x'], num_bins=num_bins)
rate_encodings.append(rate_encoding)
# 将函数名转换为嵌入向量
embeddings = torch.cat([function_manager[name]['embedding'] for name in data.name], dim=0)
rate_encodings = torch.stack(rate_encodings, dim=0)
data.x = torch.cat([embeddings, rate_encodings], dim=1)
# data.x = rate_encodings
edge_encodings = []
for i, (src, dst) in enumerate(graph.edges()):
edge_data = graph.edges[src, dst]
# 对edge_attr进行分段线性编码
edge_encoding = piecewise_linear_encoding(edge_data['edge_attr'], num_bins=num_bins)
edge_encodings.append(edge_encoding)
# 将所有边的编码堆叠成一个张量
edge_encodings = torch.stack(edge_encodings, dim=0)
data.edge_attr = edge_encodings
# 设置标签
data.y = torch.tensor([label])
data.time = torch.tensor([graph.time])
# 删除多余的属性
for attr in ['node_id', 'name', 'filename', 'line_number']:
if hasattr(data, attr):
delattr(data, attr)
return data
def compare_call_graph(new_graph: nx.DiGraph, normal_graph: nx.DiGraph,
n_sigma=2, n_call_chain=10, rate_threshold=0.001, non_zero_threshold=5) -> str:
"""
比对两张图的差异,分析异常节点和调用链
Args:
new_graph: 需要分析的新图
normal_graph: 作为基准的正常图,是多个profile构成的common图
n_sigma: 基准sigma倍数阈值,会根据mu大小动态调整
n_call_chain: 打印top n个差异节点的调用链
rate_threshold: 过滤调用率小于阈值的节点和边
non_zero_threshold: 过滤time_list中非零值少于阈值的节点
Returns:
str: 包含分析结果的文本
"""
# 创建一个过滤后的图,只包含调用率高于阈值的节点和边
filtered_new_graph = nx.DiGraph()
# 添加调用率高于阈值的节点
for node in new_graph.nodes():
node_data = new_graph.nodes[node]
node_rate = node_data.get('x', 0)
# 始终保留根节点,其他节点按照阈值过滤
if node == 0 or node_rate >= rate_threshold:
filtered_new_graph.add_node(node, **node_data)
# 添加调用率高于阈值的边
for u, v in new_graph.edges():
if u in filtered_new_graph.nodes() and v in filtered_new_graph.nodes():
edge_data = new_graph.edges[u, v]
edge_rate = edge_data.get('edge_attr', 0)
if edge_rate >= rate_threshold:
filtered_new_graph.add_edge(u, v, **edge_data)
# 收集所有需要显示名称的节点ID
node_ids_to_show = set()
# 1. 找出差异节点(超出2sigma的节点)
diff_nodes = []
for node in filtered_new_graph.nodes():
if node == 0:
continue
if node not in normal_graph.nodes():
node_ids_to_show.add(node)
continue
new_data = filtered_new_graph.nodes[node]
normal_data = normal_graph.nodes[node]
# 检查节点是否过于稀疏
if 'time_list' in normal_data:
# 计算非零值的数量
non_zero_count = np.count_nonzero(normal_data['time_list'])
if non_zero_count <= non_zero_threshold: # 如果非零值少于5个,跳过该节点
continue
new_time = new_data.get('x', 0) # 使用x作为时间特征
normal_mu = normal_data['mu']
normal_sigma = normal_data['sigma']
# 动态调整sigma阈值,根据mu大小计算实际sigma倍数
# 当mu很小时保持原来的n_sigma,当mu增大时阈值线性减小
# 例如:当mu=0时阈值为n_sigma,当mu=0.1时阈值减为1.0
mu_threshold = 0.1 # 当mu达到此值时,sigma阈值减小到最小值
min_sigma = 1.0 # sigma阈值的最小值
# 线性插值计算实际sigma阈值
# actual_sigma_threshold = max(min_sigma, n_sigma - (n_sigma - min_sigma) * min(1.0, normal_mu / mu_threshold))
actual_sigma_threshold = - (n_sigma - 1) / mu_threshold * normal_mu + n_sigma
# 检查节点的时间特征是否超出动态计算的sigma范围
if abs(new_time - normal_mu) > actual_sigma_threshold * normal_sigma:
diff_nodes.append({
'node_id': node,
'name': new_data['name'],
'binary': new_data['binary'],
'new_time': new_time,
'normal_mu': normal_mu,
'normal_sigma': normal_sigma,
'diff': (new_time - normal_mu) / normal_sigma * 2 * np.log10(normal_mu * 10000 + 1), # 提高大基数mu的优先级,使用对数函数使绝对差10倍时优先级增加2倍
'actual_threshold': actual_sigma_threshold, # 添加实际使用的阈值
'non_zero_count': non_zero_count
})
# 使用自定义排序函数
diff_nodes.sort(key=lambda x: x['diff'], reverse=True)
for node_data in diff_nodes:
node_ids_to_show.add(node_data['node_id'])
result = []
result.append(f"Total Time: new: {new_graph.time:.5f}s, normal: {normal_graph.time:.5f}s\n")
# 2. 按binary分类打印节点ID和名称对应关系
result.append("Node ID and Name Mapping by Binary (non means not in normal graph):")
# 收集按binary分类的节点
binary_nodes = defaultdict(list)
for node_id in sorted(node_ids_to_show):
if node_id not in filtered_new_graph.nodes():
continue
node_data = filtered_new_graph.nodes[node_id]
binary_name = node_data['binary']
binary_nodes[binary_name].append((node_id, node_data))
# 按binary分类打印
for binary, nodes in sorted(binary_nodes.items()):
if binary == '':
continue
result.append(f"\nBinary: {binary}")
for node_id, node_data in sorted(nodes, key=lambda x: x[0]):
normal_value = normal_graph.nodes.get(node_id, {}).get('x', 'non')
normal_display = f"{normal_value:.5f}" if isinstance(normal_value, (int, float)) else normal_value
result.append(f" N{node_id}({node_data['x']:.5f}/{normal_display}): {node_data['name']}")
result.append("\n")
# 打印两端节点都在node_ids_to_show中的边
result.append("Edges between important nodes:")
edges_to_show = []
for u, v in filtered_new_graph.edges():
if u in node_ids_to_show and v in node_ids_to_show:
edge_data = filtered_new_graph.edges[u, v]
edge_rate = edge_data.get('edge_attr', 0)
edges_to_show.append((u, v, edge_rate))
# 按边的权重排序
edges_to_show.sort(key=lambda x: x[2], reverse=True)
for u, v, rate in edges_to_show:
result.append(f"N{u} -> N{v} ({rate:.5f})")
result.append("\n")
# 3. 打印差异节点信息
result.append(f"Diff Nodes over dynamic sigma threshold:")
for node_data in diff_nodes:
result.append(
f"N{node_data['node_id']}({node_data['new_time']:.5f}/{node_data['normal_mu']:.5f}), diff score: {node_data['diff']:.2f}"
)
result.append("\n")
# 4. 打印调用链
result.append(f"Top{n_call_chain} Diff Nodes Call Chain:")
# 获取top5差异节点的调用链,并收集调用链中的节点ID
call_chains = []
for i, node_data in enumerate(diff_nodes):
try:
# 使用过滤后的图寻找路径,确保只包含调用率高的路径
path = nx.shortest_path(filtered_new_graph, source=0, target=node_data['node_id'])
path_node_strs = []
for path_node in path:
path_node_data = filtered_new_graph.nodes[path_node]
node_rate = path_node_data.get('x', 0)
normal_value = normal_graph.nodes.get(path_node, {}).get('x', 'non')
normal_display = f"{normal_value:.5f}" if isinstance(normal_value, (int, float)) else normal_value
path_node_strs.append(f"N{path_node} ({node_rate:.5f}/{normal_display})")
path_str = ' -> '.join(path_node_strs)
if path_str in call_chains:
continue
call_chains.append(path_str)
result.append(f"{path_str}")
node_ids_to_show.update(path) # 添加调用链中的所有节点ID
except nx.NetworkXNoPath:
pass
if len(call_chains) >= n_call_chain:
break
return "\n".join(result)