xref: /aosp_15_r20/external/bcc/tests/cc/test_perf_event.cc (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 /*
2  * Copyright (c) 2017 Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <linux/perf_event.h>
18 #include <linux/version.h>
19 #include <unistd.h>
20 #include <string>
21 
22 #include "BPF.h"
23 #include "catch.hpp"
24 
25 TEST_CASE("test read perf event", "[bpf_perf_event]") {
26 // The basic bpf_perf_event_read is supported since Kernel 4.3. However in that
27 // version it only supported HARDWARE and RAW events. On the other hand, our
28 // tests running on Jenkins won't have available HARDWARE counters since they
29 // are running on VMs. The support of other types of events such as SOFTWARE are
30 // only added since Kernel 4.13, hence we can only run the test since that.
31 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
32   const std::string BPF_PROGRAM = R"(
33     BPF_PERF_ARRAY(cnt, NUM_CPUS);
34     BPF_HASH(val, int, u64, 1);
35     BPF_HASH(ret, int, int, 1);
36     BPF_HASH(counter, int, struct bpf_perf_event_value, 1);
37 
38     int on_sys_getuid(void *ctx) {
39       int zero = 0;
40 
41       u64 v = cnt.perf_read(CUR_CPU_IDENTIFIER);
42       if (((s64)v < 0) && ((s64)v > -256))
43         return 0;
44       val.update(&zero, &v);
45     #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
46       u32 cpu = bpf_get_smp_processor_id();
47       struct bpf_perf_event_value c = {0};
48       int r = cnt.perf_counter_value(cpu, &c, sizeof(c));
49       ret.update(&zero, &r);
50       counter.update(&zero, &c);
51     #endif
52       return 0;
53     }
54   )";
55 
56   ebpf::BPF bpf;
57   ebpf::StatusTuple res(0);
58   res = bpf.init(
59       BPF_PROGRAM,
60       {"-DNUM_CPUS=" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN))}, {});
61   REQUIRE(res.ok());
62   int pid = getpid();
63   res =
64       bpf.open_perf_event("cnt", PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK, pid);
65   REQUIRE(res.ok());
66   std::string getuid_fnname = bpf.get_syscall_fnname("getuid");
67   res = bpf.attach_kprobe(getuid_fnname, "on_sys_getuid");
68   REQUIRE(res.ok());
69   REQUIRE(getuid() >= 0);
70   res = bpf.detach_kprobe(getuid_fnname);
71   REQUIRE(res.ok());
72   res = bpf.close_perf_event("cnt");
73   REQUIRE(res.ok());
74 
75   auto val = bpf.get_hash_table<int, uint64_t>("val");
76   REQUIRE(val[0] >= 0);
77 #endif
78 
79 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
80   auto counter_table =
81       bpf.get_hash_table<int, struct bpf_perf_event_value>("counter");
82   auto counter = counter_table[0];
83   auto ret = bpf.get_hash_table<int, int>("ret");
84   REQUIRE(ret[0] == 0);
85   REQUIRE(counter.counter >= 0);
86   REQUIRE(counter.enabled > 0);
87   REQUIRE(counter.running >= 0);
88   REQUIRE(counter.running <= counter.enabled);
89 #endif
90 }
91 
92 TEST_CASE("test attach perf event", "[bpf_perf_event]") {
93 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
94   const std::string BPF_PROGRAM = R"(
95     BPF_HASH(pid, int, u64, 1);
96     BPF_HASH(ret, int, int, 1);
97     BPF_HASH(counter, int, struct bpf_perf_event_value, 1);
98 
99     int on_event(void *ctx) {
100       int zero = 0;
101 
102       u64 p = bpf_get_current_pid_tgid();
103       pid.update(&zero, &p);
104     #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
105       struct bpf_perf_event_value c = {0};
106       int r = bpf_perf_prog_read_value(ctx, &c, sizeof(c));
107       ret.update(&zero, &r);
108       counter.update(&zero, &c);
109     #endif
110       return 0;
111     }
112   )";
113 
114   ebpf::BPF bpf;
115   ebpf::StatusTuple res(0);
116   res = bpf.init(BPF_PROGRAM);
117   REQUIRE(res.ok());
118   res = bpf.attach_perf_event(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK,
119                               "on_event", 0, 1000);
120   REQUIRE(res.ok());
121   sleep(1);
122   res = bpf.detach_perf_event(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK);
123   REQUIRE(res.ok());
124 
125   auto pid = bpf.get_hash_table<int, uint64_t>("pid");
126   REQUIRE(pid[0] >= 0);
127 #endif
128 
129 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
130   auto counter_table =
131       bpf.get_hash_table<int, struct bpf_perf_event_value>("counter");
132   auto counter = counter_table[0];
133   auto ret = bpf.get_hash_table<int, int>("ret");
134   REQUIRE(ret[0] == 0);
135   REQUIRE(counter.counter >= 0);
136   // the program slept one second between perf_event attachment and detachment
137   // in the above, so the enabled counter should be 1000000000ns or
138   // more. But in reality, most of counters (if not all) are 9xxxxxxxx,
139   // and I also saw 7xxxxxxxx. So let us a little bit conservative here and
140   // set 200000000 to avoie test flakiness.
141   REQUIRE(counter.enabled >= 200000000);
142   REQUIRE(counter.running >= 0);
143   REQUIRE(counter.running <= counter.enabled);
144 #endif
145 }
146