xref: /aosp_15_r20/external/bcc/src/python/bcc/table.py (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1# Copyright 2015 PLUMgrid
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16try:
17    from collections.abc import MutableMapping
18except ImportError:
19    from collections import MutableMapping
20from time import strftime
21import ctypes as ct
22from functools import reduce
23import os
24import errno
25import re
26import sys
27
28from .libbcc import lib, _RAW_CB_TYPE, _LOST_CB_TYPE, _RINGBUF_CB_TYPE, bcc_perf_buffer_opts
29from .utils import get_online_cpus
30from .utils import get_possible_cpus
31
32BPF_MAP_TYPE_HASH = 1
33BPF_MAP_TYPE_ARRAY = 2
34BPF_MAP_TYPE_PROG_ARRAY = 3
35BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
36BPF_MAP_TYPE_PERCPU_HASH = 5
37BPF_MAP_TYPE_PERCPU_ARRAY = 6
38BPF_MAP_TYPE_STACK_TRACE = 7
39BPF_MAP_TYPE_CGROUP_ARRAY = 8
40BPF_MAP_TYPE_LRU_HASH = 9
41BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
42BPF_MAP_TYPE_LPM_TRIE = 11
43BPF_MAP_TYPE_ARRAY_OF_MAPS = 12
44BPF_MAP_TYPE_HASH_OF_MAPS = 13
45BPF_MAP_TYPE_DEVMAP = 14
46BPF_MAP_TYPE_SOCKMAP = 15
47BPF_MAP_TYPE_CPUMAP = 16
48BPF_MAP_TYPE_XSKMAP = 17
49BPF_MAP_TYPE_SOCKHASH = 18
50BPF_MAP_TYPE_CGROUP_STORAGE = 19
51BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20
52BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21
53BPF_MAP_TYPE_QUEUE = 22
54BPF_MAP_TYPE_STACK = 23
55BPF_MAP_TYPE_SK_STORAGE = 24
56BPF_MAP_TYPE_DEVMAP_HASH = 25
57BPF_MAP_TYPE_STRUCT_OPS = 26
58BPF_MAP_TYPE_RINGBUF = 27
59BPF_MAP_TYPE_INODE_STORAGE = 28
60BPF_MAP_TYPE_TASK_STORAGE = 29
61
62map_type_name = {
63    BPF_MAP_TYPE_HASH: "HASH",
64    BPF_MAP_TYPE_ARRAY: "ARRAY",
65    BPF_MAP_TYPE_PROG_ARRAY: "PROG_ARRAY",
66    BPF_MAP_TYPE_PERF_EVENT_ARRAY: "PERF_EVENT_ARRAY",
67    BPF_MAP_TYPE_PERCPU_HASH: "PERCPU_HASH",
68    BPF_MAP_TYPE_PERCPU_ARRAY: "PERCPU_ARRAY",
69    BPF_MAP_TYPE_STACK_TRACE: "STACK_TRACE",
70    BPF_MAP_TYPE_CGROUP_ARRAY: "CGROUP_ARRAY",
71    BPF_MAP_TYPE_LRU_HASH: "LRU_HASH",
72    BPF_MAP_TYPE_LRU_PERCPU_HASH: "LRU_PERCPU_HASH",
73    BPF_MAP_TYPE_LPM_TRIE: "LPM_TRIE",
74    BPF_MAP_TYPE_ARRAY_OF_MAPS: "ARRAY_OF_MAPS",
75    BPF_MAP_TYPE_HASH_OF_MAPS: "HASH_OF_MAPS",
76    BPF_MAP_TYPE_DEVMAP: "DEVMAP",
77    BPF_MAP_TYPE_SOCKMAP: "SOCKMAP",
78    BPF_MAP_TYPE_CPUMAP: "CPUMAP",
79    BPF_MAP_TYPE_XSKMAP: "XSKMAP",
80    BPF_MAP_TYPE_SOCKHASH: "SOCKHASH",
81    BPF_MAP_TYPE_CGROUP_STORAGE: "CGROUP_STORAGE",
82    BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: "REUSEPORT_SOCKARRAY",
83    BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: "PERCPU_CGROUP_STORAGE",
84    BPF_MAP_TYPE_QUEUE: "QUEUE",
85    BPF_MAP_TYPE_STACK: "STACK",
86    BPF_MAP_TYPE_SK_STORAGE: "SK_STORAGE",
87    BPF_MAP_TYPE_DEVMAP_HASH: "DEVMAP_HASH",
88    BPF_MAP_TYPE_STRUCT_OPS: "STRUCT_OPS",
89    BPF_MAP_TYPE_RINGBUF: "RINGBUF",
90    BPF_MAP_TYPE_INODE_STORAGE: "INODE_STORAGE",
91    BPF_MAP_TYPE_TASK_STORAGE: "TASK_STORAGE",
92}
93
94stars_max = 40
95log2_index_max = 65
96linear_index_max = 1025
97
98# helper functions, consider moving these to a utils module
99def _stars(val, val_max, width):
100    i = 0
101    text = ""
102    while (1):
103        if (i > (width * val / val_max) - 1) or (i > width - 1):
104            break
105        text += "*"
106        i += 1
107    if val > val_max:
108        text = text[:-1] + "+"
109    return text
110
111def get_json_hist(vals, val_type, section_bucket=None):
112    return _get_json_hist(vals, val_type, section_bucket=None)
113
114def _get_json_hist(vals, val_type, section_bucket=None):
115    hist_list = []
116    max_nonzero_idx = 0
117    for i in range(len(vals)):
118        if vals[i] != 0:
119            max_nonzero_idx = i
120    index = 1
121    prev = 0
122    for i in range(len(vals)):
123        if i != 0 and i <= max_nonzero_idx:
124            index = index * 2
125
126            list_obj = {}
127            list_obj['interval-start'] = prev
128            list_obj['interval-end'] = int(index) - 1
129            list_obj['count'] = int(vals[i])
130
131            hist_list.append(list_obj)
132
133            prev = index
134    histogram = {"ts": strftime("%Y-%m-%d %H:%M:%S"), "val_type": val_type, "data": hist_list}
135    if section_bucket:
136        histogram[section_bucket[0]] = section_bucket[1]
137    return histogram
138
139def _print_log2_hist(vals, val_type, strip_leading_zero):
140    global stars_max
141    log2_dist_max = 64
142    idx_max = -1
143    val_max = 0
144
145    for i, v in enumerate(vals):
146        if v > 0: idx_max = i
147        if v > val_max: val_max = v
148
149    if idx_max <= 32:
150        header = "     %-19s : count     distribution"
151        body = "%10d -> %-10d : %-8d |%-*s|"
152        stars = stars_max
153    else:
154        header = "               %-29s : count     distribution"
155        body = "%20d -> %-20d : %-8d |%-*s|"
156        stars = int(stars_max / 2)
157
158    if idx_max > 0:
159        print(header % val_type)
160
161    for i in range(1, idx_max + 1):
162        low = (1 << i) >> 1
163        high = (1 << i) - 1
164        if (low == high):
165            low -= 1
166        val = vals[i]
167
168        if strip_leading_zero:
169            if val:
170                print(body % (low, high, val, stars,
171                              _stars(val, val_max, stars)))
172                strip_leading_zero = False
173        else:
174            print(body % (low, high, val, stars,
175                          _stars(val, val_max, stars)))
176
177def _print_linear_hist(vals, val_type, strip_leading_zero):
178    global stars_max
179    log2_dist_max = 64
180    idx_max = -1
181    val_max = 0
182
183    for i, v in enumerate(vals):
184        if v > 0: idx_max = i
185        if v > val_max: val_max = v
186
187    header = "     %-13s : count     distribution"
188    body = "        %-10d : %-8d |%-*s|"
189    stars = stars_max
190
191    if idx_max >= 0:
192        print(header % val_type)
193    for i in range(0, idx_max + 1):
194        val = vals[i]
195
196        if strip_leading_zero:
197            if val:
198                print(body % (i, val, stars,
199                              _stars(val, val_max, stars)))
200                strip_leading_zero = False
201        else:
202                print(body % (i, val, stars,
203                              _stars(val, val_max, stars)))
204
205
206def get_table_type_name(ttype):
207    try:
208        return map_type_name[ttype]
209    except KeyError:
210        return "<unknown>"
211
212
213def _get_event_class(event_map):
214    ct_mapping = {
215        'char'              : ct.c_char,
216        's8'                : ct.c_char,
217        'unsigned char'     : ct.c_ubyte,
218        'u8'                : ct.c_ubyte,
219        'u8 *'              : ct.c_char_p,
220        'char *'            : ct.c_char_p,
221        'short'             : ct.c_short,
222        's16'               : ct.c_short,
223        'unsigned short'    : ct.c_ushort,
224        'u16'               : ct.c_ushort,
225        'int'               : ct.c_int,
226        's32'               : ct.c_int,
227        'enum'              : ct.c_int,
228        'unsigned int'      : ct.c_uint,
229        'u32'               : ct.c_uint,
230        'long'              : ct.c_long,
231        'unsigned long'     : ct.c_ulong,
232        'long long'         : ct.c_longlong,
233        's64'               : ct.c_longlong,
234        'unsigned long long': ct.c_ulonglong,
235        'u64'               : ct.c_ulonglong,
236        '__int128'          : (ct.c_longlong * 2),
237        'unsigned __int128' : (ct.c_ulonglong * 2),
238        'void *'            : ct.c_void_p,
239    }
240
241    # handle array types e.g. "int [16]", "char[16]" or "unsigned char[16]"
242    array_type = re.compile(r"(\S+(?: \S+)*) ?\[([0-9]+)\]$")
243
244    fields = []
245    num_fields = lib.bpf_perf_event_fields(event_map.bpf.module, event_map._name)
246    i = 0
247    while i < num_fields:
248        field = lib.bpf_perf_event_field(event_map.bpf.module, event_map._name, i).decode()
249        m = re.match(r"(.*)#(.*)", field)
250        field_name = m.group(1)
251        field_type = m.group(2)
252
253        if re.match(r"enum .*", field_type):
254            field_type = "enum"
255
256        m = array_type.match(field_type)
257        try:
258            if m:
259                fields.append((field_name, ct_mapping[m.group(1)] * int(m.group(2))))
260            else:
261                fields.append((field_name, ct_mapping[field_type]))
262        except KeyError:
263            # Using print+sys.exit instead of raising exceptions,
264            # because exceptions are caught by the caller.
265            print("Type: '%s' not recognized. Please define the data with ctypes manually."
266                  % field_type, file=sys.stderr)
267            sys.exit(1)
268        i += 1
269    return type('', (ct.Structure,), {'_fields_': fields})
270
271
272def Table(bpf, map_id, map_fd, keytype, leaftype, name, **kwargs):
273    """Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
274
275    Create a python object out of a reference to a bpf table handle"""
276
277    ttype = lib.bpf_table_type_id(bpf.module, map_id)
278    t = None
279    if ttype == BPF_MAP_TYPE_HASH:
280        t = HashTable(bpf, map_id, map_fd, keytype, leaftype)
281    elif ttype == BPF_MAP_TYPE_ARRAY:
282        t = Array(bpf, map_id, map_fd, keytype, leaftype)
283    elif ttype == BPF_MAP_TYPE_PROG_ARRAY:
284        t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
285    elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
286        t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype, name)
287    elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
288        t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
289    elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
290        t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
291    elif ttype == BPF_MAP_TYPE_LPM_TRIE:
292        t = LpmTrie(bpf, map_id, map_fd, keytype, leaftype)
293    elif ttype == BPF_MAP_TYPE_STACK_TRACE:
294        t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
295    elif ttype == BPF_MAP_TYPE_LRU_HASH:
296        t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
297    elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
298        t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
299    elif ttype == BPF_MAP_TYPE_CGROUP_ARRAY:
300        t = CgroupArray(bpf, map_id, map_fd, keytype, leaftype)
301    elif ttype == BPF_MAP_TYPE_DEVMAP:
302        t = DevMap(bpf, map_id, map_fd, keytype, leaftype)
303    elif ttype == BPF_MAP_TYPE_CPUMAP:
304        t = CpuMap(bpf, map_id, map_fd, keytype, leaftype)
305    elif ttype == BPF_MAP_TYPE_XSKMAP:
306        t = XskMap(bpf, map_id, map_fd, keytype, leaftype)
307    elif ttype == BPF_MAP_TYPE_ARRAY_OF_MAPS:
308        t = MapInMapArray(bpf, map_id, map_fd, keytype, leaftype)
309    elif ttype == BPF_MAP_TYPE_HASH_OF_MAPS:
310        t = MapInMapHash(bpf, map_id, map_fd, keytype, leaftype)
311    elif ttype == BPF_MAP_TYPE_QUEUE or ttype == BPF_MAP_TYPE_STACK:
312        t = QueueStack(bpf, map_id, map_fd, leaftype)
313    elif ttype == BPF_MAP_TYPE_RINGBUF:
314        t = RingBuf(bpf, map_id, map_fd, keytype, leaftype, name)
315    if t == None:
316        raise Exception("Unknown table type %d" % ttype)
317    return t
318
319
320class TableBase(MutableMapping):
321
322    def __init__(self, bpf, map_id, map_fd, keytype, leaftype, name=None):
323        self.bpf = bpf
324        self.map_id = map_id
325        self.map_fd = map_fd
326        self.Key = keytype
327        self.Leaf = leaftype
328        self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
329        self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
330        self._cbs = {}
331        self._name = name
332        self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
333                self.map_id))
334
335    def get_fd(self):
336        return self.map_fd
337
338    def key_sprintf(self, key):
339        buf = ct.create_string_buffer(ct.sizeof(self.Key) * 8)
340        res = lib.bpf_table_key_snprintf(self.bpf.module, self.map_id, buf,
341                                         len(buf), ct.byref(key))
342        if res < 0:
343            raise Exception("Could not printf key")
344        return buf.value
345
346    def leaf_sprintf(self, leaf):
347        buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
348        res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id, buf,
349                                          len(buf), ct.byref(leaf))
350        if res < 0:
351            raise Exception("Could not printf leaf")
352        return buf.value
353
354    def key_scanf(self, key_str):
355        key = self.Key()
356        res = lib.bpf_table_key_sscanf(self.bpf.module, self.map_id, key_str,
357                                       ct.byref(key))
358        if res < 0:
359            raise Exception("Could not scanf key")
360        return key
361
362    def leaf_scanf(self, leaf_str):
363        leaf = self.Leaf()
364        res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id, leaf_str,
365                                        ct.byref(leaf))
366        if res < 0:
367            raise Exception("Could not scanf leaf")
368        return leaf
369
370    def __getitem__(self, key):
371        leaf = self.Leaf()
372        res = lib.bpf_lookup_elem(self.map_fd, ct.byref(key), ct.byref(leaf))
373        if res < 0:
374            raise KeyError
375        return leaf
376
377    def __setitem__(self, key, leaf):
378        res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
379        if res < 0:
380            errstr = os.strerror(ct.get_errno())
381            raise Exception("Could not update table: %s" % errstr)
382
383    def __delitem__(self, key):
384        res = lib.bpf_delete_elem(self.map_fd, ct.byref(key))
385        if res < 0:
386            raise KeyError
387
388    # override the MutableMapping's implementation of these since they
389    # don't handle KeyError nicely
390    def itervalues(self):
391        for key in self:
392            # a map entry may be deleted in between discovering the key and
393            # fetching the value, suppress such errors
394            try:
395                yield self[key]
396            except KeyError:
397                pass
398
399    def iteritems(self):
400        for key in self:
401            try:
402                yield (key, self[key])
403            except KeyError:
404                pass
405
406    def items(self):
407        return [item for item in self.iteritems()]
408
409    def values(self):
410        return [value for value in self.itervalues()]
411
412    def clear(self):
413        # default clear uses popitem, which can race with the bpf prog
414        for k in self.keys():
415            self.__delitem__(k)
416
417    def _alloc_keys_values(self, alloc_k=False, alloc_v=False, count=None):
418        """Allocate keys and/or values arrays. Useful for in items_*_batch.
419
420        Args:
421            alloc_k (bool): True to allocate keys array, False otherwise.
422            Default is False.
423            alloc_v (bool): True to allocate values array, False otherwise.
424            Default is False.
425            count (int): number of elements in the array(s) to allocate. If
426            count is None then it allocates the maximum number of elements i.e
427            self.max_entries.
428
429        Returns:
430            tuple: (count, keys, values). Where count is ct.c_uint32,
431            and keys and values an instance of ct.Array
432        Raises:
433            ValueError: If count is less than 1 or greater than
434            self.max_entries.
435        """
436        keys = values = None
437        if not alloc_k and not alloc_v:
438            return (ct.c_uint32(0), None, None)
439
440        if not count:  # means alloc maximum size
441            count = self.max_entries
442        elif count < 1 or count > self.max_entries:
443            raise ValueError("Wrong count")
444
445        if alloc_k:
446            keys = (self.Key * count)()
447        if alloc_v:
448            values = (self.Leaf * count)()
449
450        return (ct.c_uint32(count), keys, values)
451
452    def _sanity_check_keys_values(self, keys=None, values=None):
453        """Check if the given keys or values have the right type and size.
454
455        Args:
456            keys (ct.Array): keys array to check
457            values (ct.Array): values array to check
458        Returns:
459            ct.c_uint32 : the size of the array(s)
460        Raises:
461            ValueError: If length of arrays is less than 1 or greater than
462            self.max_entries, or when both arrays length are different.
463            TypeError: If the keys and values are not an instance of ct.Array
464        """
465        arr_len = 0
466        for elem in [keys, values]:
467            if elem:
468                if not isinstance(elem, ct.Array):
469                    raise TypeError
470
471                arr_len = len(elem)
472                if arr_len < 1 or arr_len > self.max_entries:
473                    raise ValueError("Array's length is wrong")
474
475        if keys and values:
476            # check both length are equal
477            if len(keys) != len(values):
478                raise ValueError("keys array length != values array length")
479
480        return ct.c_uint32(arr_len)
481
482    def items_lookup_batch(self):
483        """Look up all the key-value pairs in the map.
484
485        Args:
486            None
487        Yields:
488            tuple: The tuple of (key,value) for every entries that have
489            been looked up.
490        Notes: lookup batch on a keys subset is not supported by the kernel.
491        """
492        for k, v in self._items_lookup_and_optionally_delete_batch(delete=False):
493            yield(k, v)
494        return
495
496    def items_delete_batch(self, ct_keys=None):
497        """Delete the key-value pairs related to the keys given as parameters.
498        Note that if no key are given, it is faster to call
499        lib.bpf_lookup_and_delete_batch than create keys array and then call
500        lib.bpf_delete_batch on these keys.
501
502        Args:
503            ct_keys (ct.Array): keys array to delete. If an array of keys is
504            given then it deletes all the related keys-values.
505            If keys is None (default) then it deletes all entries.
506        Yields:
507            tuple: The tuple of (key,value) for every entries that have
508            been deleted.
509        Raises:
510            Exception: If bpf syscall return value indicates an error.
511        """
512        if ct_keys is not None:
513            ct_cnt = self._sanity_check_keys_values(keys=ct_keys)
514            res = lib.bpf_delete_batch(self.map_fd,
515                                       ct.byref(ct_keys),
516                                       ct.byref(ct_cnt)
517                                       )
518            if (res != 0):
519                raise Exception("BPF_MAP_DELETE_BATCH has failed: %s"
520                                % os.strerror(ct.get_errno()))
521
522        else:
523            for _ in self.items_lookup_and_delete_batch():
524                return
525
526    def items_update_batch(self, ct_keys, ct_values):
527        """Update all the key-value pairs in the map provided.
528        The arrays must be the same length, between 1 and the maximum number
529        of entries.
530
531        Args:
532            ct_keys (ct.Array): keys array to update
533            ct_values (ct.Array): values array to update
534        Raises:
535            Exception: If bpf syscall return value indicates an error.
536        """
537        ct_cnt = self._sanity_check_keys_values(keys=ct_keys, values=ct_values)
538        res = lib.bpf_update_batch(self.map_fd,
539                                   ct.byref(ct_keys),
540                                   ct.byref(ct_values),
541                                   ct.byref(ct_cnt)
542                                   )
543        if (res != 0):
544            raise Exception("BPF_MAP_UPDATE_BATCH has failed: %s"
545                            % os.strerror(ct.get_errno()))
546
547    def items_lookup_and_delete_batch(self):
548        """Look up and delete all the key-value pairs in the map.
549
550        Args:
551            None
552        Yields:
553            tuple: The tuple of (key,value) for every entries that have
554            been looked up and deleted.
555        Notes: lookup and delete batch on a keys subset is not supported by
556        the kernel.
557        """
558        for k, v in self._items_lookup_and_optionally_delete_batch(delete=True):
559            yield(k, v)
560        return
561
562    def _items_lookup_and_optionally_delete_batch(self, delete=True):
563        """Look up and optionally delete all the key-value pairs in the map.
564
565        Args:
566            delete (bool) : look up and delete the key-value pairs when True,
567            else just look up.
568        Yields:
569            tuple: The tuple of (key,value) for every entries that have
570            been looked up and deleted.
571        Raises:
572            Exception: If bpf syscall return value indicates an error.
573        Notes: lookup and delete batch on a keys subset is not supported by
574        the kernel.
575        """
576        if delete is True:
577            bpf_batch = lib.bpf_lookup_and_delete_batch
578            bpf_cmd = "BPF_MAP_LOOKUP_AND_DELETE_BATCH"
579        else:
580            bpf_batch = lib.bpf_lookup_batch
581            bpf_cmd = "BPF_MAP_LOOKUP_BATCH"
582
583        # alloc keys and values to the max size
584        ct_buf_size, ct_keys, ct_values = self._alloc_keys_values(alloc_k=True,
585                                                                  alloc_v=True)
586        ct_out_batch = ct_cnt = ct.c_uint32(0)
587        total = 0
588        while True:
589            ct_cnt.value = ct_buf_size.value - total
590            res = bpf_batch(self.map_fd,
591                            ct.byref(ct_out_batch) if total else None,
592                            ct.byref(ct_out_batch),
593                            ct.byref(ct_keys, ct.sizeof(self.Key) * total),
594                            ct.byref(ct_values, ct.sizeof(self.Leaf) * total),
595                            ct.byref(ct_cnt)
596                            )
597            errcode = ct.get_errno()
598            total += ct_cnt.value
599            if (res != 0 and errcode != errno.ENOENT):
600                raise Exception("%s has failed: %s" % (bpf_cmd,
601                                                       os.strerror(errcode)))
602
603            if res != 0:
604                break  # success
605
606            if total == ct_buf_size.value:  # buffer full, we can't progress
607                break
608
609            if ct_cnt.value == 0:
610                # no progress, probably because concurrent update
611                # puts too many elements in one bucket.
612                break
613
614        for i in range(0, total):
615            k = ct_keys[i]
616            v = ct_values[i]
617            if not isinstance(k, ct.Structure):
618                k = self.Key(k)
619            if not isinstance(v, ct.Structure):
620                v = self.Leaf(v)
621            yield (k, v)
622
623    def zero(self):
624        # Even though this is not very efficient, we grab the entire list of
625        # keys before enumerating it. This helps avoid a potential race where
626        # the leaf assignment changes a hash table bucket that is being
627        # enumerated by the same loop, and may lead to a hang.
628        for k in list(self.keys()):
629            self[k] = self.Leaf()
630
631    def __iter__(self):
632        return TableBase.Iter(self)
633
634    def iter(self): return self.__iter__()
635    def keys(self): return self.__iter__()
636
637    class Iter(object):
638        def __init__(self, table):
639            self.table = table
640            self.key = None
641        def __iter__(self):
642            return self
643        def __next__(self):
644            return self.next()
645        def next(self):
646            self.key = self.table.next(self.key)
647            return self.key
648
649    def next(self, key):
650        next_key = self.Key()
651
652        if key is None:
653            res = lib.bpf_get_first_key(self.map_fd, ct.byref(next_key),
654                                        ct.sizeof(self.Key))
655        else:
656            res = lib.bpf_get_next_key(self.map_fd, ct.byref(key),
657                                       ct.byref(next_key))
658
659        if res < 0:
660            raise StopIteration()
661        return next_key
662
663    def decode_c_struct(self, tmp, buckets, bucket_fn, bucket_sort_fn, index_max=log2_index_max):
664        f1 = self.Key._fields_[0][0]
665        f2 = self.Key._fields_[1][0]
666        # The above code assumes that self.Key._fields_[1][0] holds the
667        # slot. But a padding member may have been inserted here, which
668        # breaks the assumption and leads to chaos.
669        # TODO: this is a quick fix. Fixing/working around in the BCC
670        # internal library is the right thing to do.
671        if f2 == '__pad_1' and len(self.Key._fields_) == 3:
672            f2 = self.Key._fields_[2][0]
673        for k, v in self.items():
674            bucket = getattr(k, f1)
675            if bucket_fn:
676                bucket = bucket_fn(bucket)
677            vals = tmp[bucket] = tmp.get(bucket, [0] * index_max)
678            slot = getattr(k, f2)
679            vals[slot] = v.value
680        buckets_lst = list(tmp.keys())
681        if bucket_sort_fn:
682            buckets_lst = bucket_sort_fn(buckets_lst)
683        for bucket in buckets_lst:
684            buckets.append(bucket)
685
686    def print_json_hist(self, val_type="value", section_header="Bucket ptr",
687                        section_print_fn=None, bucket_fn=None, bucket_sort_fn=None):
688        """print_json_hist(val_type="value", section_header="Bucket ptr",
689                                   section_print_fn=None, bucket_fn=None,
690                                   bucket_sort_fn=None):
691
692                Prints a table as a json histogram. The table must be stored as
693                log2. The val_type argument is optional, and is a column header.
694                If the histogram has a secondary key, the dictionary will be split by secondary key
695                If section_print_fn is not None, it will be passed the bucket value
696                to format into a string as it sees fit. If bucket_fn is not None,
697                it will be used to produce a bucket value for the histogram keys.
698                If bucket_sort_fn is not None, it will be used to sort the buckets
699                before iterating them, and it is useful when there are multiple fields
700                in the secondary key.
701                The maximum index allowed is log2_index_max (65), which will
702                accommodate any 64-bit integer in the histogram.
703                """
704        if isinstance(self.Key(), ct.Structure):
705            tmp = {}
706            buckets = []
707            self.decode_c_struct(tmp, buckets, bucket_fn, bucket_sort_fn)
708            for bucket in buckets:
709                vals = tmp[bucket]
710                if section_print_fn:
711                    section_bucket = (section_header, section_print_fn(bucket))
712                else:
713                    section_bucket = (section_header, bucket)
714                print(_get_json_hist(vals, val_type, section_bucket))
715
716        else:
717            vals = [0] * log2_index_max
718            for k, v in self.items():
719                vals[k.value] = v.value
720            print(_get_json_hist(vals, val_type))
721
722    def print_log2_hist(self, val_type="value", section_header="Bucket ptr",
723            section_print_fn=None, bucket_fn=None, strip_leading_zero=None,
724            bucket_sort_fn=None):
725        """print_log2_hist(val_type="value", section_header="Bucket ptr",
726                           section_print_fn=None, bucket_fn=None,
727                           strip_leading_zero=None, bucket_sort_fn=None):
728
729        Prints a table as a log2 histogram. The table must be stored as
730        log2. The val_type argument is optional, and is a column header.
731        If the histogram has a secondary key, multiple tables will print
732        and section_header can be used as a header description for each.
733        If section_print_fn is not None, it will be passed the bucket value
734        to format into a string as it sees fit. If bucket_fn is not None,
735        it will be used to produce a bucket value for the histogram keys.
736        If the value of strip_leading_zero is not False, prints a histogram
737        that is omitted leading zeros from the beginning.
738        If bucket_sort_fn is not None, it will be used to sort the buckets
739        before iterating them, and it is useful when there are multiple fields
740        in the secondary key.
741        The maximum index allowed is log2_index_max (65), which will
742        accommodate any 64-bit integer in the histogram.
743        """
744        if isinstance(self.Key(), ct.Structure):
745            tmp = {}
746            buckets = []
747            self.decode_c_struct(tmp, buckets, bucket_fn, bucket_sort_fn)
748            for bucket in buckets:
749                vals = tmp[bucket]
750                if section_print_fn:
751                    print("\n%s = %s" % (section_header,
752                        section_print_fn(bucket)))
753                else:
754                    print("\n%s = %r" % (section_header, bucket))
755                _print_log2_hist(vals, val_type, strip_leading_zero)
756        else:
757            vals = [0] * log2_index_max
758            for k, v in self.items():
759                vals[k.value] = v.value
760            _print_log2_hist(vals, val_type, strip_leading_zero)
761
762    def print_linear_hist(self, val_type="value", section_header="Bucket ptr",
763            section_print_fn=None, bucket_fn=None, strip_leading_zero=None,
764            bucket_sort_fn=None):
765        """print_linear_hist(val_type="value", section_header="Bucket ptr",
766                           section_print_fn=None, bucket_fn=None,
767                           strip_leading_zero=None, bucket_sort_fn=None)
768
769        Prints a table as a linear histogram. This is intended to span integer
770        ranges, eg, from 0 to 100. The val_type argument is optional, and is a
771        column header.  If the histogram has a secondary key, multiple tables
772        will print and section_header can be used as a header description for
773        each.  If section_print_fn is not None, it will be passed the bucket
774        value to format into a string as it sees fit. If bucket_fn is not None,
775        it will be used to produce a bucket value for the histogram keys.
776        If the value of strip_leading_zero is not False, prints a histogram
777        that is omitted leading zeros from the beginning.
778        If bucket_sort_fn is not None, it will be used to sort the buckets
779        before iterating them, and it is useful when there are multiple fields
780        in the secondary key.
781        The maximum index allowed is linear_index_max (1025), which is hoped
782        to be sufficient for integer ranges spanned.
783        """
784        if isinstance(self.Key(), ct.Structure):
785            tmp = {}
786            buckets = []
787            self.decode_c_struct(tmp, buckets, bucket_fn, bucket_sort_fn, linear_index_max)
788
789            for bucket in buckets:
790                vals = tmp[bucket]
791                if section_print_fn:
792                    print("\n%s = %s" % (section_header,
793                        section_print_fn(bucket)))
794                else:
795                    print("\n%s = %r" % (section_header, bucket))
796                _print_linear_hist(vals, val_type, strip_leading_zero)
797        else:
798            vals = [0] * linear_index_max
799            for k, v in self.items():
800                try:
801                    vals[k.value] = v.value
802                except IndexError:
803                    # Improve error text. If the limit proves a nusiance, this
804                    # function be rewritten to avoid having one.
805                    raise IndexError(("Index in print_linear_hist() of %d " +
806                        "exceeds max of %d.") % (k.value, linear_index_max))
807            _print_linear_hist(vals, val_type, strip_leading_zero)
808
809
810class HashTable(TableBase):
811    def __init__(self, *args, **kwargs):
812        super(HashTable, self).__init__(*args, **kwargs)
813
814    def __len__(self):
815        i = 0
816        for k in self: i += 1
817        return i
818
819class LruHash(HashTable):
820    def __init__(self, *args, **kwargs):
821        super(LruHash, self).__init__(*args, **kwargs)
822
823class ArrayBase(TableBase):
824    def __init__(self, *args, **kwargs):
825        super(ArrayBase, self).__init__(*args, **kwargs)
826
827    def _normalize_key(self, key):
828        if isinstance(key, int):
829            if key < 0:
830                key = len(self) + key
831            key = self.Key(key)
832        if not isinstance(key, ct._SimpleCData):
833            raise IndexError("Array index must be an integer type")
834        if key.value >= len(self):
835            raise IndexError("Array index out of range")
836        return key
837
838    def __len__(self):
839        return self.max_entries
840
841    def __getitem__(self, key):
842        key = self._normalize_key(key)
843        return super(ArrayBase, self).__getitem__(key)
844
845    def __setitem__(self, key, leaf):
846        key = self._normalize_key(key)
847        super(ArrayBase, self).__setitem__(key, leaf)
848
849    def __delitem__(self, key):
850        key = self._normalize_key(key)
851        super(ArrayBase, self).__delitem__(key)
852
853    def clearitem(self, key):
854        key = self._normalize_key(key)
855        leaf = self.Leaf()
856        res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
857        if res < 0:
858            raise Exception("Could not clear item")
859
860    def __iter__(self):
861        return ArrayBase.Iter(self, self.Key)
862
863    class Iter(object):
864        def __init__(self, table, keytype):
865            self.Key = keytype
866            self.table = table
867            self.i = -1
868
869        def __iter__(self):
870            return self
871        def __next__(self):
872            return self.next()
873        def next(self):
874            self.i += 1
875            if self.i == len(self.table):
876                raise StopIteration()
877            return self.Key(self.i)
878
879class Array(ArrayBase):
880    def __init__(self, *args, **kwargs):
881        super(Array, self).__init__(*args, **kwargs)
882
883    def __delitem__(self, key):
884        # Delete in Array type does not have an effect, so zero out instead
885        self.clearitem(key)
886
887class ProgArray(ArrayBase):
888    def __init__(self, *args, **kwargs):
889        super(ProgArray, self).__init__(*args, **kwargs)
890
891    def __setitem__(self, key, leaf):
892        if isinstance(leaf, int):
893            leaf = self.Leaf(leaf)
894        if isinstance(leaf, self.bpf.Function):
895            leaf = self.Leaf(leaf.fd)
896        super(ProgArray, self).__setitem__(key, leaf)
897
898class FileDesc:
899    def __init__(self, fd):
900        if (fd is None) or (fd < 0):
901            raise Exception("Invalid file descriptor")
902        self.fd = fd
903
904    def clean_up(self):
905        if (self.fd is not None) and (self.fd >= 0):
906            os.close(self.fd)
907            self.fd = None
908
909    def __del__(self):
910        self.clean_up()
911
912    def __enter__(self, *args, **kwargs):
913        return self
914
915    def __exit__(self, *args, **kwargs):
916        self.clean_up()
917
918class CgroupArray(ArrayBase):
919    def __init__(self, *args, **kwargs):
920        super(CgroupArray, self).__init__(*args, **kwargs)
921
922    def __setitem__(self, key, leaf):
923        if isinstance(leaf, int):
924            super(CgroupArray, self).__setitem__(key, self.Leaf(leaf))
925        elif isinstance(leaf, str):
926            # TODO: Add os.O_CLOEXEC once we move to Python version >3.3
927            with FileDesc(os.open(leaf, os.O_RDONLY)) as f:
928                super(CgroupArray, self).__setitem__(key, self.Leaf(f.fd))
929        else:
930            raise Exception("Cgroup array key must be either FD or cgroup path")
931
932class PerfEventArray(ArrayBase):
933
934    def __init__(self, *args, **kwargs):
935        super(PerfEventArray, self).__init__(*args, **kwargs)
936        self._open_key_fds = {}
937        self._event_class = None
938
939    def __del__(self):
940        keys = list(self._open_key_fds.keys())
941        for key in keys:
942            del self[key]
943
944    def __delitem__(self, key):
945        if key not in self._open_key_fds:
946            return
947        # Delete entry from the array
948        super(PerfEventArray, self).__delitem__(key)
949        key_id = (id(self), key)
950        if key_id in self.bpf.perf_buffers:
951            # The key is opened for perf ring buffer
952            lib.perf_reader_free(self.bpf.perf_buffers[key_id])
953            del self.bpf.perf_buffers[key_id]
954            del self._cbs[key]
955        else:
956            # The key is opened for perf event read
957            lib.bpf_close_perf_event_fd(self._open_key_fds[key])
958        del self._open_key_fds[key]
959
960    def event(self, data):
961        """event(data)
962
963        When perf buffers are opened to receive custom perf event,
964        the underlying event data struct which is defined in C in
965        the BPF program can be deduced via this function. This avoids
966        redundant definitions in Python.
967        """
968        if self._event_class == None:
969            self._event_class = _get_event_class(self)
970        return ct.cast(data, ct.POINTER(self._event_class)).contents
971
972    def open_perf_buffer(self, callback, page_cnt=8, lost_cb=None, wakeup_events=1):
973        """open_perf_buffers(callback)
974
975        Opens a set of per-cpu ring buffer to receive custom perf event
976        data from the bpf program. The callback will be invoked for each
977        event submitted from the kernel, up to millions per second. Use
978        page_cnt to change the size of the per-cpu ring buffer. The value
979        must be a power of two and defaults to 8.
980        """
981
982        if page_cnt & (page_cnt - 1) != 0:
983            raise Exception("Perf buffer page_cnt must be a power of two")
984
985        for i in get_online_cpus():
986            self._open_perf_buffer(i, callback, page_cnt, lost_cb, wakeup_events)
987
988    def _open_perf_buffer(self, cpu, callback, page_cnt, lost_cb, wakeup_events):
989        def raw_cb_(_, data, size):
990            try:
991                callback(cpu, data, size)
992            except IOError as e:
993                if e.errno == errno.EPIPE:
994                    exit()
995                else:
996                    raise e
997        def lost_cb_(_, lost):
998            try:
999                lost_cb(lost)
1000            except IOError as e:
1001                if e.errno == errno.EPIPE:
1002                    exit()
1003                else:
1004                    raise e
1005        fn = _RAW_CB_TYPE(raw_cb_)
1006        lost_fn = _LOST_CB_TYPE(lost_cb_) if lost_cb else ct.cast(None, _LOST_CB_TYPE)
1007        opts = bcc_perf_buffer_opts()
1008        opts.pid = -1
1009        opts.cpu = cpu
1010        opts.wakeup_events = wakeup_events
1011        reader = lib.bpf_open_perf_buffer_opts(fn, lost_fn, None, page_cnt, ct.byref(opts))
1012        if not reader:
1013            raise Exception("Could not open perf buffer")
1014        fd = lib.perf_reader_fd(reader)
1015        self[self.Key(cpu)] = self.Leaf(fd)
1016        self.bpf.perf_buffers[(id(self), cpu)] = reader
1017        # keep a refcnt
1018        self._cbs[cpu] = (fn, lost_fn)
1019        # The actual fd is held by the perf reader, add to track opened keys
1020        self._open_key_fds[cpu] = -1
1021
1022    def _open_perf_event(self, cpu, typ, config, pid=-1):
1023        fd = lib.bpf_open_perf_event(typ, config, pid, cpu)
1024        if fd < 0:
1025            raise Exception("bpf_open_perf_event failed")
1026        self[self.Key(cpu)] = self.Leaf(fd)
1027        self._open_key_fds[cpu] = fd
1028
1029    def open_perf_event(self, typ, config, pid=-1):
1030        """open_perf_event(typ, config)
1031
1032        Configures the table such that calls from the bpf program to
1033        table.perf_read(CUR_CPU_IDENTIFIER) will return the hardware
1034        counter denoted by event ev on the local cpu.
1035        """
1036        for i in get_online_cpus():
1037            self._open_perf_event(i, typ, config, pid)
1038
1039
1040class PerCpuHash(HashTable):
1041    def __init__(self, *args, **kwargs):
1042        self.reducer = kwargs.pop("reducer", None)
1043        super(PerCpuHash, self).__init__(*args, **kwargs)
1044        self.sLeaf = self.Leaf
1045        self.total_cpu = len(get_possible_cpus())
1046        # This needs to be 8 as hard coded into the linux kernel.
1047        self.alignment = ct.sizeof(self.sLeaf) % 8
1048        if self.alignment == 0:
1049            self.Leaf = self.sLeaf * self.total_cpu
1050        else:
1051            # Currently Float, Char, un-aligned structs are not supported
1052            if self.sLeaf == ct.c_uint:
1053                self.Leaf = ct.c_uint64 * self.total_cpu
1054            elif self.sLeaf == ct.c_int:
1055                self.Leaf = ct.c_int64 * self.total_cpu
1056            else:
1057                raise IndexError("Leaf must be aligned to 8 bytes")
1058
1059    def getvalue(self, key):
1060        result = super(PerCpuHash, self).__getitem__(key)
1061        if self.alignment == 0:
1062            ret = result
1063        else:
1064            ret = (self.sLeaf * self.total_cpu)()
1065            for i in range(0, self.total_cpu):
1066                ret[i] = result[i]
1067        return ret
1068
1069    def __getitem__(self, key):
1070        if self.reducer:
1071            return reduce(self.reducer, self.getvalue(key))
1072        else:
1073            return self.getvalue(key)
1074
1075    def __setitem__(self, key, leaf):
1076        super(PerCpuHash, self).__setitem__(key, leaf)
1077
1078    def sum(self, key):
1079        if isinstance(self.Leaf(), ct.Structure):
1080            raise IndexError("Leaf must be an integer type for default sum functions")
1081        return self.sLeaf(sum(self.getvalue(key)))
1082
1083    def max(self, key):
1084        if isinstance(self.Leaf(), ct.Structure):
1085            raise IndexError("Leaf must be an integer type for default max functions")
1086        return self.sLeaf(max(self.getvalue(key)))
1087
1088    def average(self, key):
1089        result = self.sum(key)
1090        return result.value / self.total_cpu
1091
1092class LruPerCpuHash(PerCpuHash):
1093    def __init__(self, *args, **kwargs):
1094        super(LruPerCpuHash, self).__init__(*args, **kwargs)
1095
1096class PerCpuArray(ArrayBase):
1097    def __init__(self, *args, **kwargs):
1098        self.reducer = kwargs.pop("reducer", None)
1099        super(PerCpuArray, self).__init__(*args, **kwargs)
1100        self.sLeaf = self.Leaf
1101        self.total_cpu = len(get_possible_cpus())
1102        # This needs to be 8 as hard coded into the linux kernel.
1103        self.alignment = ct.sizeof(self.sLeaf) % 8
1104        if self.alignment == 0:
1105            self.Leaf = self.sLeaf * self.total_cpu
1106        else:
1107            # Currently Float, Char, un-aligned structs are not supported
1108            if self.sLeaf == ct.c_uint:
1109                self.Leaf = ct.c_uint64 * self.total_cpu
1110            elif self.sLeaf == ct.c_int:
1111                self.Leaf = ct.c_int64 * self.total_cpu
1112            else:
1113                raise IndexError("Leaf must be aligned to 8 bytes")
1114
1115    def getvalue(self, key):
1116        result = super(PerCpuArray, self).__getitem__(key)
1117        if self.alignment == 0:
1118            ret = result
1119        else:
1120            ret = (self.sLeaf * self.total_cpu)()
1121            for i in range(0, self.total_cpu):
1122                ret[i] = result[i]
1123        return ret
1124
1125    def __getitem__(self, key):
1126        if (self.reducer):
1127            return reduce(self.reducer, self.getvalue(key))
1128        else:
1129            return self.getvalue(key)
1130
1131    def __setitem__(self, key, leaf):
1132        super(PerCpuArray, self).__setitem__(key, leaf)
1133
1134    def __delitem__(self, key):
1135        # Delete in this type does not have an effect, so zero out instead
1136        self.clearitem(key)
1137
1138    def sum(self, key):
1139        if isinstance(self.Leaf(), ct.Structure):
1140            raise IndexError("Leaf must be an integer type for default sum functions")
1141        return self.sLeaf(sum(self.getvalue(key)))
1142
1143    def max(self, key):
1144        if isinstance(self.Leaf(), ct.Structure):
1145            raise IndexError("Leaf must be an integer type for default max functions")
1146        return self.sLeaf(max(self.getvalue(key)))
1147
1148    def average(self, key):
1149        result = self.sum(key)
1150        return result.value / self.total_cpu
1151
1152class LpmTrie(TableBase):
1153    def __init__(self, *args, **kwargs):
1154        super(LpmTrie, self).__init__(*args, **kwargs)
1155
1156    def __len__(self):
1157        raise NotImplementedError
1158
1159
1160class StackTrace(TableBase):
1161    MAX_DEPTH = 127
1162    BPF_F_STACK_BUILD_ID = (1<<5)
1163    BPF_STACK_BUILD_ID_EMPTY =  0 #can't get stacktrace
1164    BPF_STACK_BUILD_ID_VALID = 1 #valid build-id,ip
1165    BPF_STACK_BUILD_ID_IP = 2 #fallback to ip
1166
1167    def __init__(self, *args, **kwargs):
1168        super(StackTrace, self).__init__(*args, **kwargs)
1169
1170    class StackWalker(object):
1171        def __init__(self, stack, flags, resolve=None):
1172            self.stack = stack
1173            self.n = -1
1174            self.resolve = resolve
1175            self.flags = flags
1176
1177        def __iter__(self):
1178            return self
1179
1180        def __next__(self):
1181            return self.next()
1182
1183        def next(self):
1184            self.n += 1
1185            if self.n == StackTrace.MAX_DEPTH:
1186                raise StopIteration()
1187
1188            if self.flags & StackTrace.BPF_F_STACK_BUILD_ID:
1189              addr = self.stack.trace[self.n]
1190              if addr.status == StackTrace.BPF_STACK_BUILD_ID_IP or \
1191                 addr.status == StackTrace.BPF_STACK_BUILD_ID_EMPTY:
1192                  raise StopIteration()
1193            else:
1194              addr = self.stack.ip[self.n]
1195
1196            if addr == 0 :
1197                raise StopIteration()
1198
1199            return self.resolve(addr) if self.resolve else addr
1200
1201    def walk(self, stack_id, resolve=None):
1202        return StackTrace.StackWalker(self[self.Key(stack_id)], self.flags, resolve)
1203
1204    def __len__(self):
1205        i = 0
1206        for k in self: i += 1
1207        return i
1208
1209    def clear(self):
1210        pass
1211
1212class DevMap(ArrayBase):
1213    def __init__(self, *args, **kwargs):
1214        super(DevMap, self).__init__(*args, **kwargs)
1215
1216class CpuMap(ArrayBase):
1217    def __init__(self, *args, **kwargs):
1218        super(CpuMap, self).__init__(*args, **kwargs)
1219
1220class XskMap(ArrayBase):
1221    def __init__(self, *args, **kwargs):
1222        super(XskMap, self).__init__(*args, **kwargs)
1223
1224class MapInMapArray(ArrayBase):
1225    def __init__(self, *args, **kwargs):
1226        super(MapInMapArray, self).__init__(*args, **kwargs)
1227
1228class MapInMapHash(HashTable):
1229    def __init__(self, *args, **kwargs):
1230        super(MapInMapHash, self).__init__(*args, **kwargs)
1231
1232class RingBuf(TableBase):
1233    def __init__(self, *args, **kwargs):
1234        super(RingBuf, self).__init__(*args, **kwargs)
1235        self._ringbuf = None
1236        self._event_class = None
1237
1238    def __delitem(self, key):
1239        pass
1240
1241    def __del__(self):
1242        pass
1243
1244    def __len__(self):
1245        return 0
1246
1247    def event(self, data):
1248        """event(data)
1249
1250        When ring buffers are opened to receive custom event,
1251        the underlying event data struct which is defined in C in
1252        the BPF program can be deduced via this function. This avoids
1253        redundant definitions in Python.
1254        """
1255        if self._event_class == None:
1256            self._event_class = _get_event_class(self)
1257        return ct.cast(data, ct.POINTER(self._event_class)).contents
1258
1259    def open_ring_buffer(self, callback, ctx=None):
1260        """open_ring_buffer(callback)
1261
1262        Opens a ring buffer to receive custom event data from the bpf program.
1263        The callback will be invoked for each event submitted from the kernel,
1264        up to millions per second.
1265        """
1266
1267        def ringbuf_cb_(ctx, data, size):
1268            try:
1269                ret = callback(ctx, data, size)
1270                # Callback for ringbufs should _always_ return an integer.
1271                # If the function the user registers does not,
1272                # simply fall back to returning 0.
1273                try:
1274                    ret = int(ret)
1275                except:
1276                    ret = 0
1277            except IOError as e:
1278                if e.errno == errno.EPIPE:
1279                    exit()
1280                else:
1281                    raise e
1282            return ret
1283
1284        fn = _RINGBUF_CB_TYPE(ringbuf_cb_)
1285        self.bpf._open_ring_buffer(self.map_fd, fn, ctx)
1286        # keep a refcnt
1287        self._cbs[0] = fn
1288
1289class QueueStack:
1290    # Flag for map.push
1291    BPF_EXIST = 2
1292
1293    def __init__(self, bpf, map_id, map_fd, leaftype):
1294        self.bpf = bpf
1295        self.map_id = map_id
1296        self.map_fd = map_fd
1297        self.Leaf = leaftype
1298        self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
1299        self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
1300        self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
1301                self.map_id))
1302
1303    def leaf_sprintf(self, leaf):
1304        buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
1305        res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id, buf,
1306                                          len(buf), ct.byref(leaf))
1307        if res < 0:
1308            raise Exception("Could not printf leaf")
1309        return buf.value
1310
1311    def leaf_scanf(self, leaf_str):
1312        leaf = self.Leaf()
1313        res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id, leaf_str,
1314                                        ct.byref(leaf))
1315        if res < 0:
1316            raise Exception("Could not scanf leaf")
1317        return leaf
1318
1319    def push(self, leaf, flags=0):
1320        res = lib.bpf_update_elem(self.map_fd, None, ct.byref(leaf), flags)
1321        if res < 0:
1322            errstr = os.strerror(ct.get_errno())
1323            raise Exception("Could not push to table: %s" % errstr)
1324
1325    def pop(self):
1326        leaf = self.Leaf()
1327        res = lib.bpf_lookup_and_delete(self.map_fd, None, ct.byref(leaf))
1328        if res < 0:
1329            raise KeyError("Could not pop from table")
1330        return leaf
1331
1332    def peek(self):
1333        leaf = self.Leaf()
1334        res = lib.bpf_lookup_elem(self.map_fd, None, ct.byref(leaf))
1335        if res < 0:
1336            raise KeyError("Could not peek table")
1337        return leaf
1338
1339    def itervalues(self):
1340        # to avoid infinite loop, set maximum pops to max_entries
1341        cnt = self.max_entries
1342        while cnt:
1343            try:
1344                yield(self.pop())
1345                cnt -= 1
1346            except KeyError:
1347                return
1348
1349    def values(self):
1350        return [value for value in self.itervalues()]
1351